hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790018f581c8abddfcfdb7302b27ff01e6fbb81c | 1,139 | py | Python | 1573_number_ways_to_split_string.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | 1 | 2020-07-15T14:16:23.000Z | 2020-07-15T14:16:23.000Z | 1573_number_ways_to_split_string.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | null | null | null | 1573_number_ways_to_split_string.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | null | null | null | #
# 1573. Number of Ways to Split a String
#
# Q: https://leetcode.com/problems/number-of-ways-to-split-a-string/
# A: https://leetcode.com/problems/number-of-ways-to-split-a-string/discuss/830433/Javascript-Python3-C%2B%2B-solutions
#
class Solution:
def numWays(self, S: str, MOD = int(1e9 + 7)) -> int:
N = len(S)
cnt = len([c for c in S if c == '1'])
# case 1: all zeros, return the sum of the series for the cardinality of S minus 1
if not cnt:
return (N - 2) * (N - 1) // 2 % MOD
# case 2: cannot evenly divide the ones into 3 equal paritions
if cnt % 3:
return 0
# case 3: return the product of the first and second accumulated "gaps of zeros" between each parition of equal ones
K = cnt // 3
first = 0
second = 0
ones = 0
for i in range(N):
if S[i] == '1':
ones += 1
if ones == 1 * K and S[i] == '0': first +=1
if ones == 2 * K and S[i] == '0': second += 1
return (first + 1) * (second + 1) % MOD # ⭐️ +1 for "gaps of zeros" from i..j inclusive
| 39.275862 | 124 | 0.543459 |
class Solution:
def numWays(self, S: str, MOD = int(1e9 + 7)) -> int:
N = len(S)
cnt = len([c for c in S if c == '1'])
if not cnt:
return (N - 2) * (N - 1) // 2 % MOD
if cnt % 3:
return 0
K = cnt // 3
first = 0
second = 0
ones = 0
for i in range(N):
if S[i] == '1':
ones += 1
if ones == 1 * K and S[i] == '0': first +=1
if ones == 2 * K and S[i] == '0': second += 1
return (first + 1) * (second + 1) % MOD
| true | true |
790019d09eb81c29d6d6712867240f600e2c9dc0 | 3,343 | py | Python | services/endorser/api/core/config.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 12 | 2022-01-29T20:30:03.000Z | 2022-03-29T11:46:14.000Z | services/endorser/api/core/config.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 38 | 2021-11-22T17:52:50.000Z | 2022-03-31T17:52:00.000Z | services/endorser/api/core/config.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 9 | 2021-11-22T18:05:48.000Z | 2022-03-29T11:25:08.000Z | import logging
import os
from enum import Enum
from functools import lru_cache
from typing import Optional
from pydantic import BaseSettings, PostgresDsn
logger = logging.getLogger(__name__)
class EnvironmentEnum(str, Enum):
PRODUCTION = "production"
LOCAL = "local"
class GlobalConfig(BaseSettings):
TITLE: str = "Endorser"
DESCRIPTION: str = "An endorser service for aca-py wallets"
ENVIRONMENT: EnvironmentEnum
DEBUG: bool = False
TESTING: bool = False
TIMEZONE: str = "UTC"
# the following defaults match up with default values in scripts/.env.example
# these MUST be all set in non-local environments.
PSQL_HOST: str = os.environ.get("ENDORSER_POSTGRESQL_HOST", "localhost")
PSQL_PORT: int = os.environ.get("ENDORSER_POSTGRESQL_PORT", 5432)
PSQL_DB: str = os.environ.get("ENDORSER_POSTGRESQL_DB", "traction")
PSQL_USER: str = os.environ.get("ENDORSER_DB_USER", "tractionuser")
PSQL_PASS: str = os.environ.get("ENDORSER_DB_USER_PWD", "tractionPass")
PSQL_ADMIN_USER: str = os.environ.get("ENDORSER_DB_ADMIN", "tractionadminuser")
PSQL_ADMIN_PASS: str = os.environ.get("ENDORSER_DB_ADMIN_PWD", "tractionadminPass")
# application connection is async
# fmt: off
SQLALCHEMY_DATABASE_URI: PostgresDsn = (
f"postgresql+asyncpg://{PSQL_USER}:{PSQL_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# migrations connection uses owner role and is synchronous
SQLALCHEMY_DATABASE_ADMIN_URI: PostgresDsn = (
f"postgresql://{PSQL_ADMIN_USER}:{PSQL_ADMIN_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# fmt: on
ACAPY_ADMIN_URL: str = os.environ.get(
"ENDORSER_ACAPY_ADMIN_URL", "http://localhost:9031"
)
ACAPY_ADMIN_URL_API_KEY: str = os.environ.get(
"ENDORSER_ACAPY_ADMIN_URL_API_KEY", "change-me"
)
ENDORSER_API_ADMIN_USER: str = os.environ.get("ENDORSER_API_ADMIN_USER", "endorser")
ENDORSER_API_ADMIN_KEY: str = os.environ.get("ENDORSER_API_ADMIN_KEY", "change-me")
ENDORSER_WEBHOOK_URL: str = os.environ.get(
"ENDORSER_WEBHOOK_URL", "http://endorser-api:5000/webhook"
)
ACAPY_WEBHOOK_URL_API_KEY_NAME = "x-api-key"
ACAPY_WEBHOOK_URL_API_KEY: str = os.environ.get("ACAPY_WEBHOOK_URL_API_KEY", "")
DB_ECHO_LOG: bool = False
# Api V1 prefix
API_V1_STR = "/v1"
# openssl rand -hex 32
JWT_SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
JWT_ALGORITHM = "HS256"
JWT_ACCESS_TOKEN_EXPIRE_MINUTES = 300
class Config:
case_sensitive = True
class LocalConfig(GlobalConfig):
"""Local configurations."""
DEBUG: bool = True
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.LOCAL
class ProdConfig(GlobalConfig):
"""Production configurations."""
DEBUG: bool = False
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.PRODUCTION
class FactoryConfig:
def __init__(self, environment: Optional[str]):
self.environment = environment
def __call__(self) -> GlobalConfig:
if self.environment == EnvironmentEnum.LOCAL.value:
return LocalConfig()
return ProdConfig()
@lru_cache()
def get_configuration() -> GlobalConfig:
return FactoryConfig(os.environ.get("ENVIRONMENT"))()
settings = get_configuration()
| 30.669725 | 107 | 0.714029 | import logging
import os
from enum import Enum
from functools import lru_cache
from typing import Optional
from pydantic import BaseSettings, PostgresDsn
logger = logging.getLogger(__name__)
class EnvironmentEnum(str, Enum):
PRODUCTION = "production"
LOCAL = "local"
class GlobalConfig(BaseSettings):
TITLE: str = "Endorser"
DESCRIPTION: str = "An endorser service for aca-py wallets"
ENVIRONMENT: EnvironmentEnum
DEBUG: bool = False
TESTING: bool = False
TIMEZONE: str = "UTC"
PSQL_HOST: str = os.environ.get("ENDORSER_POSTGRESQL_HOST", "localhost")
PSQL_PORT: int = os.environ.get("ENDORSER_POSTGRESQL_PORT", 5432)
PSQL_DB: str = os.environ.get("ENDORSER_POSTGRESQL_DB", "traction")
PSQL_USER: str = os.environ.get("ENDORSER_DB_USER", "tractionuser")
PSQL_PASS: str = os.environ.get("ENDORSER_DB_USER_PWD", "tractionPass")
PSQL_ADMIN_USER: str = os.environ.get("ENDORSER_DB_ADMIN", "tractionadminuser")
PSQL_ADMIN_PASS: str = os.environ.get("ENDORSER_DB_ADMIN_PWD", "tractionadminPass")
SQLALCHEMY_DATABASE_URI: PostgresDsn = (
f"postgresql+asyncpg://{PSQL_USER}:{PSQL_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}"
)
SQLALCHEMY_DATABASE_ADMIN_URI: PostgresDsn = (
f"postgresql://{PSQL_ADMIN_USER}:{PSQL_ADMIN_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}"
)
ACAPY_ADMIN_URL: str = os.environ.get(
"ENDORSER_ACAPY_ADMIN_URL", "http://localhost:9031"
)
ACAPY_ADMIN_URL_API_KEY: str = os.environ.get(
"ENDORSER_ACAPY_ADMIN_URL_API_KEY", "change-me"
)
ENDORSER_API_ADMIN_USER: str = os.environ.get("ENDORSER_API_ADMIN_USER", "endorser")
ENDORSER_API_ADMIN_KEY: str = os.environ.get("ENDORSER_API_ADMIN_KEY", "change-me")
ENDORSER_WEBHOOK_URL: str = os.environ.get(
"ENDORSER_WEBHOOK_URL", "http://endorser-api:5000/webhook"
)
ACAPY_WEBHOOK_URL_API_KEY_NAME = "x-api-key"
ACAPY_WEBHOOK_URL_API_KEY: str = os.environ.get("ACAPY_WEBHOOK_URL_API_KEY", "")
DB_ECHO_LOG: bool = False
API_V1_STR = "/v1"
JWT_SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
JWT_ALGORITHM = "HS256"
JWT_ACCESS_TOKEN_EXPIRE_MINUTES = 300
class Config:
case_sensitive = True
class LocalConfig(GlobalConfig):
DEBUG: bool = True
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.LOCAL
class ProdConfig(GlobalConfig):
DEBUG: bool = False
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.PRODUCTION
class FactoryConfig:
def __init__(self, environment: Optional[str]):
self.environment = environment
def __call__(self) -> GlobalConfig:
if self.environment == EnvironmentEnum.LOCAL.value:
return LocalConfig()
return ProdConfig()
@lru_cache()
def get_configuration() -> GlobalConfig:
return FactoryConfig(os.environ.get("ENVIRONMENT"))()
settings = get_configuration()
| true | true |
79001a53ca98fa13b92179f06dcdd4fd9afdf353 | 416 | py | Python | gentelella.py | Pechsopha/KITPoint | 076890838ca7f57b76f7c9a9a4101c9e90b13d8b | [
"MIT"
] | 566 | 2017-11-27T15:35:48.000Z | 2022-03-25T19:35:25.000Z | gentelella.py | xu1u/flask-gentelella | 408fbecdd72548bb88b70e0b08f33ab43fd9fbcf | [
"MIT"
] | 21 | 2018-05-08T11:33:53.000Z | 2021-11-12T13:01:01.000Z | gentelella.py | xu1u/flask-gentelella | 408fbecdd72548bb88b70e0b08f33ab43fd9fbcf | [
"MIT"
] | 235 | 2017-12-07T13:56:01.000Z | 2022-03-11T12:48:02.000Z | from flask_migrate import Migrate
from os import environ
from sys import exit
from config import config_dict
from app import create_app, db
get_config_mode = environ.get('GENTELELLA_CONFIG_MODE', 'Debug')
try:
config_mode = config_dict[get_config_mode.capitalize()]
except KeyError:
exit('Error: Invalid GENTELELLA_CONFIG_MODE environment variable entry.')
app = create_app(config_mode)
Migrate(app, db)
| 24.470588 | 77 | 0.798077 | from flask_migrate import Migrate
from os import environ
from sys import exit
from config import config_dict
from app import create_app, db
get_config_mode = environ.get('GENTELELLA_CONFIG_MODE', 'Debug')
try:
config_mode = config_dict[get_config_mode.capitalize()]
except KeyError:
exit('Error: Invalid GENTELELLA_CONFIG_MODE environment variable entry.')
app = create_app(config_mode)
Migrate(app, db)
| true | true |
79001b68029fdd3de4f8cd7f49170776ecedbfc8 | 944 | py | Python | var/spack/repos/builtin/packages/r-matrixstats/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/r-matrixstats/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/r-matrixstats/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMatrixstats(RPackage):
"""High-performing functions operating on rows and columns of matrices,
e.g. col / rowMedians(), col / rowRanks(), and col / rowSds(). Functions
optimized per data type and for subsetted calculations such that both
memory usage and processing time is minimized. There are also optimized
vector-based methods, e.g. binMeans(), madDiff() and
weightedMedian()."""
homepage = "https://cran.rstudio.com/web/packages/matrixStats/index.html"
url = "https://cran.rstudio.com/src/contrib/matrixStats_0.52.2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/matrixStats"
version('0.52.2', '41b987d3ae96ee6895875c413adcba3c')
| 42.909091 | 79 | 0.720339 |
from spack import *
class RMatrixstats(RPackage):
homepage = "https://cran.rstudio.com/web/packages/matrixStats/index.html"
url = "https://cran.rstudio.com/src/contrib/matrixStats_0.52.2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/matrixStats"
version('0.52.2', '41b987d3ae96ee6895875c413adcba3c')
| true | true |
79001c59d764039891cc5215c23b31bcd7d78c17 | 1,223 | py | Python | var/spack/repos/builtin/packages/sspace-longread/package.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-03-05T10:54:32.000Z | 2021-03-05T14:14:52.000Z | var/spack/repos/builtin/packages/sspace-longread/package.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | var/spack/repos/builtin/packages/sspace-longread/package.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-07-19T20:31:27.000Z | 2021-07-19T21:14:14.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class SspaceLongread(Package):
"""SSPACE-LongRead is a stand-alone program for scaffolding pre-assembled
contigs using long reads
Note: A manual download is required for SSPACE-LongRead.
Spack will search your current directory for the download file.
Alternatively, add this file to a mirror so that Spack can find it.
For instructions on how to set up a mirror, see
http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "https://www.baseclear.com/genomics/bioinformatics/basetools/SSPACE-longread"
manual_download = True
version('1.1', '0bb5d8603d7ead4ff1596135a520cc26')
depends_on('perl', type=('build', 'run'))
def url_for_version(self, version):
return "file://{0}/40SSPACE-LongRead_v{1}.tar.gz".format(
os.getcwd(), version.dashed)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('blasr', prefix.bin)
install('SSPACE-LongRead.pl', prefix.bin)
| 33.972222 | 92 | 0.699918 |
import os
from spack import *
class SspaceLongread(Package):
homepage = "https://www.baseclear.com/genomics/bioinformatics/basetools/SSPACE-longread"
manual_download = True
version('1.1', '0bb5d8603d7ead4ff1596135a520cc26')
depends_on('perl', type=('build', 'run'))
def url_for_version(self, version):
return "file://{0}/40SSPACE-LongRead_v{1}.tar.gz".format(
os.getcwd(), version.dashed)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('blasr', prefix.bin)
install('SSPACE-LongRead.pl', prefix.bin)
| true | true |
79001cf4be9bcb32bc620a7c2a3dbe44d680d36c | 637,302 | py | Python | pandas/tests/test_frame.py | jaimefrio/pandas | d6a77007b247f3c218ecc38de8130e7d42e1d0e9 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/tests/test_frame.py | jaimefrio/pandas | d6a77007b247f3c218ecc38de8130e7d42e1d0e9 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/tests/test_frame.py | jaimefrio/pandas | d6a77007b247f3c218ecc38de8130e7d42e1d0e9 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
from datetime import datetime, timedelta, time, date
import sys
import operator
import re
import csv
import nose
import functools
import itertools
from itertools import product, permutations
from distutils.version import LooseVersion
from pandas.compat import(
map, zip, range, long, lrange, lmap, lzip,
OrderedDict, u, StringIO, is_platform_windows
)
from pandas import compat
from numpy import random, nan, inf
from numpy.random import randn
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
import pandas.core.nanops as nanops
import pandas.core.common as com
import pandas.core.format as fmt
import pandas.core.datetools as datetools
from pandas import (DataFrame, Index, Series, Panel, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp, date_range,
read_csv, timedelta_range, Timedelta, option_context, period_range)
from pandas.core.dtypes import DatetimeTZDtype
import pandas as pd
from pandas.parser import CParserError
from pandas.util.misc import is_little_endian
from pandas.util.testing import (assert_almost_equal,
assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp,
assertRaises,
makeCustomDataframe as mkdf,
ensure_clean,
SubclassedDataFrame)
from pandas.core.indexing import IndexingError
from pandas.core.common import PandasError
import pandas.util.testing as tm
import pandas.lib as lib
from numpy.testing.decorators import slow
#---------------------------------------------------------------------
# DataFrame test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
MIXED_FLOAT_DTYPES = ['float16','float32','float64']
MIXED_INT_DTYPES = ['uint8','uint16','uint32','uint64','int8','int16',
'int32','int64']
def _check_mixed_float(df, dtype = None):
# float16 are most likely to be upcasted to float32
dtypes = dict(A = 'float32', B = 'float32', C = 'float16', D = 'float64')
if isinstance(dtype, compat.string_types):
dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
def _check_mixed_int(df, dtype = None):
dtypes = dict(A = 'int32', B = 'uint64', C = 'uint8', D = 'int64')
if isinstance(dtype, compat.string_types):
dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
# slicing
sl = self.frame[:20]
self.assertEqual(20, len(sl.index))
# column access
for _, series in compat.iteritems(sl):
self.assertEqual(20, len(series.index))
self.assertTrue(tm.equalContents(series.index, sl.index))
for key, _ in compat.iteritems(self.frame._series):
self.assertIsNotNone(self.frame[key])
self.assertNotIn('random', self.frame)
with assertRaisesRegexp(KeyError, 'random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
df['@awesome_domain'] = ad
self.assertRaises(KeyError, df.__getitem__, 'df["$10"]')
res = df['@awesome_domain']
assert_numpy_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
try:
df[['baf']]
except KeyError:
pass
else:
self.fail("Dataframe failed to raise KeyError")
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
self.assertIsNone(self.frame.get('foo'))
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
# None
# GH 5652
for df in [DataFrame(), DataFrame(columns=list('AB')), DataFrame(columns=list('AB'),index=range(3)) ]:
result = df.get(None)
self.assertIsNone(result)
def test_getitem_iterator(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.ix[:, idx]
expected = self.frame.ix[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_getitem_list(self):
self.frame.columns.name = 'foo'
result = self.frame[['B', 'A']]
result2 = self.frame[Index(['B', 'A'])]
expected = self.frame.ix[:, ['B', 'A']]
expected.columns.name = 'foo'
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
self.assertEqual(result.columns.name, 'foo')
with assertRaisesRegexp(KeyError, 'not in index'):
self.frame[['B', 'A', 'food']]
with assertRaisesRegexp(KeyError, 'not in index'):
self.frame[Index(['B', 'A', 'foo'])]
# tuples
df = DataFrame(randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')], name=['sth', 'sth2']))
result = df[[('foo', 'bar'), ('baz', 'qux')]]
expected = df.ix[:, :2]
assert_frame_equal(result, expected)
self.assertEqual(result.columns.names, ['sth', 'sth2'])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'], check_names=False)
assert_series_equal(self.frame['A'], data['B'], check_names=False)
with assertRaisesRegexp(ValueError, 'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with assertRaisesRegexp(ValueError, 'Length of values does not match '
'length of index'):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.ix[1, ['tt1', 'tt2']] = [1, 2]
result = df.ix[1, ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.ix[1, ['tt1', 'tt2']] = ['1', '2']
result = df.ix[1, ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns, name=1)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006',periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10*len(columns)).reshape(-1,len(columns)), columns=columns, index=range(10))
Z = 100*X_orig.iloc[:,1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0,5,size=10).reshape(-1,5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s,s2)
assert_series_equal(s_orig+1,s)
self.assertIs(s,s2)
self.assertIs(s._data,s2._data)
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df,df2)
assert_frame_equal(df_orig+1,df)
self.assertIs(df,df2)
self.assertIs(df._data,df2._data)
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s,s2)
assert_series_equal(s_orig+1.5,s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df,df2)
assert_frame_equal(df_orig+1.5,df)
self.assertIs(df,df2)
self.assertIs(df._data,df2._data)
# mixed dtype
arr = np.random.randint(0,10,size=5)
df_orig = DataFrame({'A' : arr.copy(), 'B' : 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A' : arr.copy()+1, 'B' : 'foo'})
assert_frame_equal(df,expected)
assert_frame_equal(df2,expected)
self.assertIs(df._data,df2._data)
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A' : arr.copy()+1.5, 'B' : 'foo'})
assert_frame_equal(df,expected)
assert_frame_equal(df2,expected)
self.assertIs(df._data,df2._data)
def test_getitem_boolean(self):
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
self.assert_numpy_array_equal(subindex, subframe.index)
with assertRaisesRegexp(ValueError, 'Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assertRaisesRegexp(ValueError, 'boolean values only'):
self.tsframe[self.tsframe]
# test that Series work
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(dict([ (c,np.where(data[c] > 0, data[c], np.nan)) for c in data.columns ]),
index=data.index, columns=data.columns)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns = df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
self.assertEqual(bif[c].dtype, df[c].dtype)
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df>0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32' : 2, 'int64' : 2})
assert_series_equal(result, expected)
# int block splitting
df.ix[1:3,['E1','F1']] = 0
casted = df[df>0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})
assert_series_equal(result, expected)
# where dtype conversions
# GH 3733
df = DataFrame(data = np.random.randn(100, 50))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isnull(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result,expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.ix[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = randn(5, 5)
df = DataFrame(arr.copy(), columns = ['A','B','C','D','E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.ix[:-1]
expected = df.ix[df.index[:-1]]
assert_frame_equal(result, expected)
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
# 11320
df = pd.DataFrame({ "rna": (1.5,2.2,3.2,4.5),
-1000: [11,21,36,40],
0: [10,22,43,34],
1000:[0, 10, 20, 30] },columns=['rna',-1000,0,1000])
result = df[[1000]]
expected = df.iloc[:,[3]]
assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:,[1]]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
self.frame.ix[:, [-1]] = 0
self.assertTrue((self.frame['D'] == 0).all())
df = DataFrame(np.random.randn(8, 4))
self.assertTrue(isnull(df.ix[:, [-1]].values).all())
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
a.ix[-1] = a.ix[-2]
assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)
self.assertEqual(a.ix[-1].name, 'T')
self.assertEqual(a.ix[-2].name, 'S')
def test_getattr(self):
tm.assert_series_equal(self.frame.A, self.frame['A'])
self.assertRaises(AttributeError, getattr, self.frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
self.assertTrue((df.foobar == 5).all())
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
self.assertIn('col5', self.frame)
tm.assert_dict_equal(series, self.frame['col5'],
compare_keys=False)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_dict_equal(series, self.frame['col6'],
compare_keys=False)
with tm.assertRaises(KeyError):
self.frame[randn(len(self.frame) + 1)] = 1
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
self.assertTrue((self.frame['col9'] == arr).all())
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = self.frame[:2]
def f():
smaller['col10'] = ['1', '2']
self.assertRaises(com.SettingWithCopyError, f)
self.assertEqual(smaller['col10'].dtype, np.object_)
self.assertTrue((smaller['col10'] == ['1', '2']).all())
# with a dtype
for dtype in ['int32','int64','float32','float64']:
self.frame[dtype] = np.array(arr,dtype=dtype)
self.assertEqual(self.frame[dtype].dtype.name, dtype)
# dtype changing GH4204
df = DataFrame([[0,0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan,np.nan]])
assert_frame_equal(df,expected)
df = DataFrame([[0,0]])
df.loc[0] = np.nan
assert_frame_equal(df,expected)
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame['A'], check_names=False)
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = nan
self.assertTrue(notnull(s[5:10]).all())
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
# test that column reindexing works
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'Must pass DataFrame with boolean '
'values only'):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = nan
expected.values[mask.values] = nan
assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
self.assertEqual(self.frame['D'].dtype, np.int64)
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
self.frame['B'] = 0
self.assertEqual(self.frame['B'].dtype, np.int64)
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
self.assertTrue(issubclass(self.frame['B'].dtype.type, np.integer))
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
self.assertEqual(self.frame['foo'].dtype, np.int64)
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
self.assertEqual(self.frame['foo'].dtype, np.float64)
self.frame['something'] = 0
self.assertEqual(self.frame['something'].dtype, np.int64)
self.frame['something'] = 2
self.assertEqual(self.frame['something'].dtype, np.int64)
self.frame['something'] = 2.5
self.assertEqual(self.frame['something'].dtype, np.float64)
# GH 7704
# dtype conversion on setting
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10,'event'] = 'foo'
result = df.get_dtype_counts().sort_values()
expected = Series({'float64' : 3, 'object' : 1 }).sort_values()
assert_series_equal(result, expected)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.ix[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_setitem_corner(self):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
self.assertIn('B', df)
self.assertEqual(len(df.columns), 2)
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
# what to do when empty frame with index
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.object_)
# upcast
dm['C'] = 1
self.assertEqual(dm['C'].dtype, np.int64)
dm['E'] = 1.
self.assertEqual(dm['E'].dtype, np.float64)
# set existing column
dm['A'] = 'bar'
self.assertEqual('bar', dm['A'][0])
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
self.assertEqual(dm['foo'].dtype, np.object_)
dm['coercable'] = ['1', '2', '3']
self.assertEqual(dm['coercable'].dtype, np.object_)
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.ix[ix, ['title']] = 'foobar'
df.ix[ix, ['cruft']] = 0
assert(df.ix[1, 'title'] == 'foobar')
assert(df.ix[1, 'cruft'] == 0)
def test_setitem_ambig(self):
# difficulties with mixed-type data
from decimal import Decimal
# created as float type
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
self.assertEqual(len(dm.columns), 3)
# self.assertIsNone(dm.objects)
dm[1] = coercable_series
self.assertEqual(len(dm.columns), 3)
# self.assertIsNone(dm.objects)
dm[2] = uncoercable_series
self.assertEqual(len(dm.columns), 3)
# self.assertIsNotNone(dm.objects)
self.assertEqual(dm[2].dtype, np.object_)
def test_setitem_clear_caches(self):
# GH #304
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
# cache it
foo = df['z']
df.ix[2:, 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z')
self.assertIsNot(df['z'], foo)
assert_series_equal(df['z'], expected)
def test_setitem_None(self):
# GH #766
self.frame[None] = self.frame['A']
assert_series_equal(self.frame.iloc[:,-1], self.frame['A'], check_names=False)
assert_series_equal(self.frame.loc[:,None], self.frame['A'], check_names=False)
assert_series_equal(self.frame[None], self.frame['A'], check_names=False)
repr(self.frame)
def test_setitem_empty(self):
# GH 9596
df = pd.DataFrame({'a': ['1', '2', '3'],
'b': ['11', '22', '33'],
'c': ['111', '222', '333']})
result = df.copy()
result.loc[result.b.isnull(), 'a'] = result.a
assert_frame_equal(result, df)
def test_setitem_empty_frame_with_boolean(self):
# Test for issue #10126
for dtype in ('float', 'int64'):
for df in [
pd.DataFrame(dtype=dtype),
pd.DataFrame(dtype=dtype, index=[1]),
pd.DataFrame(dtype=dtype, columns=['A']),
]:
df2 = df.copy()
df[df > df2] = 47
assert_frame_equal(df, df2)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
self.assertEqual(len(f.columns), 3)
self.assertRaises(KeyError, f.__delitem__, 'D')
del f['B']
self.assertEqual(len(f.columns), 2)
def test_getitem_fancy_2d(self):
f = self.frame
ix = f.ix
assert_frame_equal(ix[:, ['B', 'A']], f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
assert_frame_equal(ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
assert_frame_equal(ix[5:10], f[5:10])
assert_frame_equal(ix[5:10, :], f[5:10])
assert_frame_equal(ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5], columns=['A', 'B']))
# slice rows with labels, inclusive!
expected = ix[5:11]
result = ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
assert_frame_equal(ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
exp = f.copy()
ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
self.assertRaises(ValueError, ix.__getitem__, f > 0.5)
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.ix[52195.1:52196.5]
self.assertEqual(len(s1), 2)
s1 = df.ix[52195.1:52196.6]
self.assertEqual(len(s1), 2)
s1 = df.ix[52195.1:52198.9]
self.assertEqual(len(s1), 3)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.ix[:8:2]
df.ix[:8:2] = np.nan
self.assertTrue(isnull(df.ix[:8:2]).values.all())
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
# this is OK
cp = df.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).values.all())
# so is this
cp = df.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = df.ix[4:10]
result2 = df.ix[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, df2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, df2.ix.__setitem__, slice(3, 11), 0)
def test_setitem_fancy_2d(self):
f = self.frame
ix = f.ix
# case 1
frame = self.frame.copy()
expected = frame.copy()
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
# case 2
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = randn(3, 2)
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
frame = self.frame.copy()
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
expected2 = self.frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
# case 4
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
# case 5
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
# case 7: slice columns
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
# slice with labels
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
# new corner case of boolean slicing / setting
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.ix[:, -3:]
self.assertEqual(sliced['D'].dtype, np.float64)
# get view with single block
# setting it triggers setting with copy
sliced = self.frame.ix[:, -3:]
def f():
sliced['C'] = 4.
self.assertRaises(com.SettingWithCopyError, f)
self.assertTrue((self.frame['C'] == 4).all())
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
# tmp correctly sets the dtype
# so match the exp way
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
# labels that aren't contained
self.assertRaises(KeyError, df.ix.__setitem__,
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
self.assertRaises(KeyError,
self.frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
self.assertRaises(KeyError,
self.frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
#self.assertRaises(KeyError,
# self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
self.assertTrue((result.values == 5).all())
self.mixed_frame.ix[5] = np.nan
self.assertTrue(isnull(self.mixed_frame.ix[5]).all())
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
check_names=False)
# #1432
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
self.assertTrue(df._is_mixed_type)
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(randn(10), name=0).sort_values()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.ix[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.ix[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
""" #1321
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
assert_frame_equal(rs, xp)
"""
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
sub = df.ix[:'d']
assert_frame_equal(sub, df)
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
ix = f.ix
# return self if no slicing...for now
self.assertIs(ix[:, :], f)
# low dimensional slice
xs1 = ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
assert_series_equal(xs1, xs2)
ts1 = ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
assert_series_equal(ts1, ts2)
# positional xs
xs1 = ix[0]
xs2 = f.xs(f.index[0])
assert_series_equal(xs1, xs2)
xs1 = ix[f.index[5]]
xs2 = f.xs(f.index[5])
assert_series_equal(xs1, xs2)
# single column
assert_series_equal(ix[:, 'A'], f['A'])
# return view
exp = f.copy()
exp.values[5] = 4
ix[5][:] = 4
assert_frame_equal(exp, f)
exp.values[:, 1] = 6
ix[:, 1][:] = 6
assert_frame_equal(exp, f)
# slice of mixed-frame
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
vals = randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.ix
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert_almost_equal(ix[idx, col], ts[idx])
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.ix
# individual value
for j, col in enumerate(f.columns):
ts = f[col]
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.ix
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[5:10, [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, 2:]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.ix[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.ix[[1, 4, 7]]
expected = self.frame.ix[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.ix[:, [2, 0, 1]]
expected = self.frame.ix[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.ix
with assertRaisesRegexp(IndexingError, 'Too many indexers'):
ix[:, :, :]
with assertRaises(IndexingError):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.ix[mask]
expected = self.frame.ix[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.ix[mask] = 0
expected.ix[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.ix[k1, k2]
expected = df.ix[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.ix[np.array([True, False, True]),
np.array([False, True])] = 5
expected.ix[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.ix[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.ix[4:5]
expected = df.reindex([4, 5]) # reindex with int
assert_frame_equal(result, expected, check_index_type=False)
self.assertEqual(len(result), 2)
result = df.ix[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
assert_frame_equal(result, expected)
self.assertEqual(len(result), 2)
# loc_float changes this to work properly
result = df.ix[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.ix[1:2] = 0
result = df[1:2]
self.assertTrue((result==0).all().all())
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
# stacklevel=False -> needed stacklevel depends on index type
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.iloc[1.0:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 1)
# GH 4892, float indexers in iloc are deprecated
import warnings
warnings.filterwarnings(action='error', category=FutureWarning)
cp = df.copy()
def f():
cp.iloc[1.0:5] = 0
self.assertRaises(FutureWarning, f)
def f():
result = cp.iloc[1.0:5] == 0
self.assertRaises(FutureWarning, f)
self.assertTrue(result.values.all())
self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all())
warnings.filterwarnings(action='default', category=FutureWarning)
cp = df.copy()
cp.iloc[4:5] = 0
self.assertTrue((cp.iloc[4:5] == 0).values.all())
self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all())
# float slicing
result = df.ix[1.0:5]
expected = df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 5)
result = df.ix[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.ix[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 1)
result = df.ix[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 5)
cp = df.copy()
cp.ix[1.0:5.0] = 0
result = cp.ix[1.0:5.0]
self.assertTrue((result == 0).values.all())
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.ix[::2, 'str'] = nan
expected = [nan, 'qux', nan, 'qux', nan]
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
from pandas import tslib
df.ix['b', 'timestamp'] = tslib.iNaT
self.assertTrue(com.isnull(df.ix['b', 'timestamp']))
# allow this syntax
df.ix['c', 'timestamp'] = nan
self.assertTrue(com.isnull(df.ix['c', 'timestamp']))
# allow this syntax
df.ix['d', :] = nan
self.assertTrue(com.isnull(df.ix['c', :]).all() == False)
# as of GH 3216 this will now work!
# try to set with a list like item
#self.assertRaises(
# Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_frame(self):
piece = self.frame.ix[:2, ['A', 'B']]
self.frame.ix[-2:, ['A', 'B']] = piece.values
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[ 1, 2], [3, 4]], index=f.index[0:2],columns=['A', 'B'])
key = (slice(None,2), ['A', 'B'])
f.ix[key] = piece
assert_almost_equal(f.ix[0:2, ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[ 1, 2 ], [3, 4], [5, 6], [7, 8]], index=list(f.index[0:2]) + ['foo','bar'],columns=['A', 'B'])
key = (slice(None,2), ['A', 'B'])
f.ix[key] = piece
assert_almost_equal(f.ix[0:2:, ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.ix[:2, ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.ix[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.ix[:2, ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.ix[key] = piece.values
assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])
df2 = df.copy()
df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5
expected = df.reindex(columns=['A','B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.ix[:2, ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.ix[-2:, ['A', 'B']] = piece
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
def test_setitem_fancy_exceptions(self):
pass
def test_getitem_boolean_missing(self):
pass
def test_setitem_boolean_missing(self):
pass
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.ix['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.ix['bar']
expected = df.ix[[2, 4]]
assert_frame_equal(result, expected)
result = df.ix['baz']
expected = df.ix[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.ix[['bar']]
exp = df.ix[[2, 4]]
assert_frame_equal(result, exp)
result = df.ix[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.ix[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
self.assertRaises(KeyError, df.ix.__getitem__, False)
self.assertRaises(KeyError, df.ix.__getitem__, True)
self.assertRaises(KeyError, df.ix.__setitem__, False, 0)
self.assertRaises(KeyError, df.ix.__setitem__, True, 0)
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
self.assertEqual(result.columns.name, 'foo')
expected = df.ix[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
for k, v in compat.iteritems(df):
self.assertEqual(type(v), Series)
def test_lookup(self):
def alt(df, rows, cols):
result = []
for r, c in zip(rows, cols):
result.append(df.get_value(r, c))
return result
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols)
assert_almost_equal(result, expected)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'])
assert_almost_equal(df['mask'], exp_mask)
self.assertEqual(df['mask'].dtype, np.bool_)
with tm.assertRaises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with tm.assertRaises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with tm.assertRaisesRegexp(ValueError, 'same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
self.frame.set_value(idx, col, 1)
assert_almost_equal(self.frame[col][idx], 1)
def test_set_value_resize(self):
res = self.frame.set_value('foobar', 'B', 0)
self.assertIs(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 0)
self.frame.loc['foobar','qux'] = 0
self.assertEqual(self.frame.get_value('foobar', 'qux'), 0)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 'sam')
self.assertEqual(res3['baz'].dtype, np.object_)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', True)
self.assertEqual(res3['baz'].dtype, np.object_)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['baz']))
self.assertTrue(isnull(res3['baz'].drop(['foobar'])).all())
self.assertRaises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
df.set_value('C', 2, 1.0)
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
#self.assertEqual(list(df.columns), list(df_orig.columns) + [2])
df = df_orig.copy()
df.loc['C', 2] = 1.0
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
#self.assertEqual(list(df.columns), list(df_orig.columns) + [2])
# create both new
df = df_orig.copy()
df.set_value('C', 'D', 1.0)
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
self.assertRaises(KeyError, df.get_value, 0, 1)
# self.assertRaises(KeyError, df.set_value, 0, 1, 0)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
self.assertTrue(issubclass(self.frame['E'].dtype.type,
(int, np.integer)))
result = self.frame.ix[self.frame.index[5], 'E']
self.assertTrue(com.is_integer(result))
def test_irow(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
df.irow(1)
result = df.iloc[1]
exp = df.ix[2]
assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.ix[4]
assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.ix[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
def f():
result[2] = 0.
self.assertRaises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_icol(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
df.icol(1)
result = df.iloc[:, 1]
exp = df.ix[:, 2]
assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.ix[:, 4]
assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.ix[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
def f():
result[8] = 0.
self.assertRaises(com.SettingWithCopyError, f)
self.assertTrue((df[8] == 0).all())
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_irow_icol_duplicates(self):
# 10711, deprecated
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.iloc[0]
result2 = df.ix[0]
tm.assertIsInstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
tm.assertIsInstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_icol_sparse_propegate_fill_value(self):
from pandas.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
self.assertTrue(len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values))
def test_iget_value(self):
# 10711 deprecated
with tm.assert_produces_warning(FutureWarning):
self.frame.iget_value(0,0)
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iat[i,j]
expected = self.frame.at[row, col]
assert_almost_equal(result, expected)
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
# the issue or reraising an outer exception without
# a named argument
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8,
9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ["a", "b"]
df.index = l
try:
repr(df)
except Exception as e:
self.assertNotEqual(type(e), UnboundLocalError)
def test_reindex_methods(self):
df = pd.DataFrame({'x': list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
for method, expected_values in [('nearest', [0, 1, 1, 2]),
('pad', [np.nan, 0, 1, 1]),
('backfill', [0, 1, 2, 2])]:
expected = pd.DataFrame({'x': expected_values}, index=target)
actual = df.reindex(target, method=method)
assert_frame_equal(expected, actual)
actual = df.reindex_like(df, method=method, tolerance=0)
assert_frame_equal(df, actual)
actual = df.reindex(target, method=method, tolerance=1)
assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
assert_frame_equal(e2, actual)
switched_method = ('pad' if method == 'backfill'
else 'backfill' if method == 'pad'
else method)
actual = df[::-1].reindex(target, method=switched_method)
assert_frame_equal(expected, actual)
expected = pd.DataFrame({'x': [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method='nearest', tolerance=0.2)
assert_frame_equal(expected, actual)
def test_non_monotonic_reindex_methods(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6,1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],
columns=list('A'))
# index is not monotonic increasing or decreasing
self.assertRaises(ValueError, df_rev.reindex, df.index, method='pad')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='ffill')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='bfill')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='nearest')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx, check_index_type=True):
f = lambda val: np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
def verify(df, level, idx, indexer, check_index_type=True):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
df = pd.DataFrame({'jim':list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe':list('abcdeabcd')[::-1],
'jolie':[10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C']]
for idx in target:
verify_first_level(df, 'jim', idx)
# reindex by these causes different MultiIndex levels
for idx in [['D', 'F'], ['A', 'C', 'B']]:
verify_first_level(df, 'jim', idx, check_index_type=False)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [], check_index_type=False)
df = DataFrame({'jim':['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe':['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
'jolie': np.concatenate([np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]]),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i+1])
i = [2,3,4,0,1,8,9,5,6,7,10,11,12,13,14,18,19,15,16,17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0,1,2,3,4,10,11,12,5,6,7,8,9,15,16,17,18,19,13,14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0,1,5,6,7,10,11,12,18,19,15,16,17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0,1,2,3,4,10,11,12,8,9,15,16,17,13,14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
def test_setitem_with_sparse_value(self):
# GH8131
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
df['new_column'] = sp_series
tm.assert_series_equal(df['new_column'], sp_series, check_names=False)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
.to_sparse(fill_value=0))
df['new_column'] = sp_series
exp = pd.Series([1, 0, 0], name='new_column')
tm.assert_series_equal(df['new_column'], exp)
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(int))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class SafeForSparse(object):
_multiprocess_can_split_ = True
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ('index', 'columns'):
ind = getattr(self.frame, attr)
ind.name = None
cp = self.frame.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.frame, attr).name)
def test_getitem_pop_assign_name(self):
s = self.frame['A']
self.assertEqual(s.name, 'A')
s = self.frame.pop('A')
self.assertEqual(s.name, 'A')
s = self.frame.ix[:, 'B']
self.assertEqual(s.name, 'B')
s2 = s.ix[:]
self.assertEqual(s2.name, 'B')
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
def test_join_index(self):
# left / right
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2)
self.assertTrue(f.index.equals(joined.index))
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='left')
self.assertTrue(joined.index.equals(f.index))
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='right')
self.assertTrue(joined.index.equals(f2.index))
self.assertEqual(len(joined.columns), 4)
# inner
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='inner')
self.assertTrue(joined.index.equals(f.index.intersection(f2.index)))
self.assertEqual(len(joined.columns), 4)
# outer
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='outer')
self.assertTrue(tm.equalContents(self.frame.index, joined.index))
self.assertEqual(len(joined.columns), 4)
assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with assertRaisesRegexp(ValueError, 'columns overlap but no suffix'):
self.frame.join(self.frame, how=how)
def test_join_index_more(self):
af = self.frame.ix[:, ['A', 'B']]
bf = self.frame.ix[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = self.frame['C'][::2]
expected['D'] = self.frame['D'][::2]
result = af.join(bf)
assert_frame_equal(result, expected)
result = af.join(bf, how='right')
assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
assert_frame_equal(result, expected.ix[:, result.columns])
def test_join_index_series(self):
df = self.frame.copy()
s = df.pop(self.frame.columns[-1])
joined = df.join(s)
assert_frame_equal(joined, self.frame, check_names=False) # TODO should this check_names ?
s.name = None
assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
def test_join_overlap(self):
df1 = self.frame.ix[:, ['A', 'B', 'C']]
df2 = self.frame.ix[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')
no_overlap = self.frame.ix[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo#')
expected = ['foo#%s' % c for c in self.frame.columns]
self.assert_numpy_array_equal(with_prefix.columns, expected)
with_suffix = self.frame.add_suffix('#foo')
expected = ['%s#foo' % c for c in self.frame.columns]
self.assert_numpy_array_equal(with_suffix.columns, expected)
class TestDataFrame(tm.TestCase, CheckIndexing,
SafeForSparse):
klass = DataFrame
_multiprocess_can_split_ = True
def setUp(self):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
# force these all to int64 to avoid platform testing issues
self.intframe = DataFrame(dict([ (c,s) for c,s in compat.iteritems(_intframe) ]), dtype = np.int64)
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.mixed_float = DataFrame({ 'A': _frame['A'].copy().astype('float32'),
'B': _frame['B'].copy().astype('float32'),
'C': _frame['C'].copy().astype('float16'),
'D': _frame['D'].copy().astype('float64') })
self.mixed_float2 = DataFrame({ 'A': _frame2['A'].copy().astype('float32'),
'B': _frame2['B'].copy().astype('float32'),
'C': _frame2['C'].copy().astype('float16'),
'D': _frame2['D'].copy().astype('float64') })
self.mixed_int = DataFrame({ 'A': _intframe['A'].copy().astype('int32'),
'B': np.ones(len(_intframe['B']),dtype='uint64'),
'C': _intframe['C'].copy().astype('uint8'),
'D': _intframe['D'].copy().astype('int64') })
self.all_mixed = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'float32' : np.array([1.]*10,dtype='float32'),
'int32' : np.array([1]*10,dtype='int32'),
}, index=np.arange(10))
self.tzframe = DataFrame({'A' : date_range('20130101',periods=3),
'B' : date_range('20130101',periods=3,tz='US/Eastern'),
'C' : date_range('20130101',periods=3,tz='CET')})
self.tzframe.iloc[1,1] = pd.NaT
self.tzframe.iloc[1,2] = pd.NaT
self.ts1 = tm.makeTimeSeries()
self.ts2 = tm.makeTimeSeries()[5:]
self.ts3 = tm.makeTimeSeries()[-5:]
self.ts4 = tm.makeTimeSeries()[1:-1]
self.ts_dict = {
'col1': self.ts1,
'col2': self.ts2,
'col3': self.ts3,
'col4': self.ts4,
}
self.empty = DataFrame({})
arr = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
self.simple = DataFrame(arr, columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
def test_get_axis(self):
f = self.frame
self.assertEqual(f._get_axis_number(0), 0)
self.assertEqual(f._get_axis_number(1), 1)
self.assertEqual(f._get_axis_number('index'), 0)
self.assertEqual(f._get_axis_number('rows'), 0)
self.assertEqual(f._get_axis_number('columns'), 1)
self.assertEqual(f._get_axis_name(0), 'index')
self.assertEqual(f._get_axis_name(1), 'columns')
self.assertEqual(f._get_axis_name('index'), 'index')
self.assertEqual(f._get_axis_name('rows'), 'index')
self.assertEqual(f._get_axis_name('columns'), 'columns')
self.assertIs(f._get_axis(0), f.index)
self.assertIs(f._get_axis(1), f.columns)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)
assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')
assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, None)
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo']
self.mixed_frame.index = idx
self.assertIs(self.mixed_frame['foo'].index, idx)
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]},
index = [2010,2011,2012])
expected = df.ix[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.ix[2010]
assert_series_equal(result,expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.ix[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.name, index.name)
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.ix[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.names, index.names)
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
self.assertEqual(result.index.name, 'C')
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
self.assertIn('A', df)
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
assert_frame_equal(result, expected, check_names=False) # TODO should set_index check_names ?
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
# with Categorical
df = DataFrame({'A' : np.random.randn(10),
'B' : ci.values })
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
# from a CategoricalIndex
df = DataFrame({'A' : np.random.randn(10),
'B' : ci })
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
idf = df.set_index('B').reset_index().set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
new_df = idf.reset_index()
new_df.index = df.B
tm.assert_index_equal(new_df.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
tm.assertIsInstance(idf.index, DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
i = pd.DatetimeIndex(pd.tseries.tools.to_datetime(['2013-1-1 13:00','2013-1-2 14:00'], errors="raise")).tz_localize('US/Pacific')
df = DataFrame(np.random.randn(2,1),columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')], dtype="object"))
# convert index to series
result = Series(i)
assert_series_equal(result, expected)
# assignt to frame
df['B'] = i
result = df['B']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'B')
# keep the timezone
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
# convert to utc
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = DatetimeIndex(expected.values).copy()
comp.tz = None
self.assert_numpy_array_equal(result.values, comp.values)
# list of datetimes with a tz
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'D')
# GH 6785
# set the index manually
import pytz
df = DataFrame([{'ts':datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo':1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
# GH 3950
# reset_index with single level
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx')
df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2),
datetime(2011, 1, 3), datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.ix[:, 1:]
xp.index = df.ix[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
# #1971
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
# it works!
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
def test_keys(self):
getkeys = self.frame.keys
self.assertIs(getkeys(), self.frame.columns)
def test_column_contains_typeerror(self):
try:
self.frame.columns in self.frame
except TypeError:
pass
def test_constructor(self):
df = DataFrame()
self.assertEqual(len(df.index), 0)
df = DataFrame(data={})
self.assertEqual(len(df.index), 0)
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
indexed_frame = DataFrame(data, index=index)
unindexed_frame = DataFrame(data)
self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
self.assertEqual(foo['a'].dtype, object)
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4,2)))
# this is ok
df['foo'] = np.ones((4,2)).tolist()
# this is not ok
self.assertRaises(ValueError, df.__setitem__, tuple(['test']), np.ones((4,2)))
# this is ok
df['foo2'] = np.ones((4,2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
self.assertEqual(orig_df['col1'][0], 1.)
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
self.assertEqual(df.values[0, 0], 99)
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
self.assertEqual(df.values[0, 0], 97)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
self.assertIsNone(df.ix[1, 0])
self.assertEqual(df.ix[0, 1], '2')
def test_constructor_list_frames(self):
# GH 3243
result = DataFrame([DataFrame([])])
self.assertEqual(result.shape, (1,0))
result = DataFrame([DataFrame(dict(A = lrange(5)))])
tm.assertIsInstance(result.iloc[0,0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad = None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [ np.array(np.random.rand(10), dtype = d) for d in dtypes ]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [ np.array(np.random.randint(10, size=10), dtype = d) for d in dtypes ]
zipper = lzip(dtypes,arrays)
for d,a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update(dict([ (d,a) for d,a in zipper ]))
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes = None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A = 1, B = 'foo', C = 'bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
self.assertEqual(a.dtype, df.a.dtype)
self.assertEqual(b.dtype, df.b.dtype)
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
# Assigning causes segfault in NumPy < 1.5.1
# rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
self.assert_numpy_array_equal(df.columns, rec.dtype.names)
df2 = DataFrame(rec, index=index)
self.assert_numpy_array_equal(df2.columns, rec.dtype.names)
self.assertTrue(df2.index.equals(index))
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
self.assertEqual(df.values.dtype, np.bool_)
def test_constructor_overflow_int64(self):
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
self.assertEqual(result['a'].dtype, object)
# #2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45), (long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
self.assertEqual(df_crawls['uid'].dtype, object)
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
self.assertEqual(expected, list(df.columns))
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
tm.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)
tm.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
self.assertEqual(len(frame), len(self.ts2))
self.assertNotIn('col1', frame)
self.assertTrue(isnull(frame['col3']).all())
# Corner cases
self.assertEqual(len(DataFrame({})), 0)
# mix dict and array, wrong size - no spec for which error should raise
# first
with tm.assertRaises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
self.assert_numpy_array_equal(frame.index, ['1', '2'])
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
self.assertIs(frame.index, idx)
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
self.assertIs(frame.index, idx)
self.assertIs(frame.columns, idx)
self.assertEqual(len(frame._series), 3)
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assertTrue(frame.index.equals(Index([])))
# GH10856
# dict with scalar values should raise error, even if columns passed
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7})
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7}, columns=['a'])
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7}, columns=['b'])
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi,columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi,columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
def test_constructor_error_msgs(self):
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with assertRaisesRegexp(ValueError, msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = "Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with assertRaisesRegexp(ValueError, msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=date_range('2000-01-01', periods=3))
# higher dim raise exception
with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
with assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(3, 1\)"):
DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1])
with assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(2, 2\)"):
DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2])
with assertRaisesRegexp(ValueError, 'If using all scalar values, you must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]})
df2 = DataFrame([df1, df1+10])
df2.dtypes
str(df2)
result = df2.loc[0,0]
assert_frame_equal(result,df1)
result = df2.loc[1,0]
assert_frame_equal(result,df1+10)
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo':['a', 'b', 'c'], 'bar':[1,2,3], 'baz':['d','e','f']}).set_index('foo')
s = DataFrame({'foo':['a', 'b', 'c', 'a'], 'fiz':['g','h','i','j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0,2,(4,4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
# In reference to PR 9632
class CustomSeries(Series):
@property
def _constructor(self):
return CustomSeries
def custom_series_function(self):
return 'OK'
class CustomDataFrame(DataFrame):
"Subclasses pandas DF, fills DF with simulation results, adds some custom plotting functions."
def __init__(self, *args, **kw):
super(CustomDataFrame, self).__init__(*args, **kw)
@property
def _constructor(self):
return CustomDataFrame
_constructor_sliced = CustomSeries
def custom_frame_function(self):
return 'OK'
data = {'col1': range(10),
'col2': range(10)}
cdf = CustomDataFrame(data)
# Did we get back our own DF class?
self.assertTrue(isinstance(cdf, CustomDataFrame))
# Do we get back our own Series class after selecting a column?
cdf_series = cdf.col1
self.assertTrue(isinstance(cdf_series, CustomSeries))
self.assertEqual(cdf_series.custom_series_function(), 'OK')
# Do we get back our own DF class after slicing row-wise?
cdf_rows = cdf[1:5]
self.assertTrue(isinstance(cdf_rows, CustomDataFrame))
self.assertEqual(cdf_rows.custom_frame_function(), 'OK')
# Make sure sliced part of multi-index frame is custom class
mcol = pd.MultiIndex.from_tuples([('A', 'A'), ('A', 'B')])
cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
self.assertTrue(isinstance(cdf_multi['A'], CustomDataFrame))
mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
self.assertTrue(isinstance(cdf_multi2['A'], CustomSeries))
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame(dict((col, dict(compat.iteritems(val)))
for col, val in compat.iteritems(data)))
assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = [[4., 3., 2., 1.]]
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
assert_almost_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.float64)
self.assertEqual(frame['A'].dtype, np.float64)
frame = DataFrame(test_data)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.object_)
self.assertEqual(frame['A'].dtype, np.float64)
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 20)
self.assertEqual(frame['A'].dtype, np.object_)
self.assertEqual(frame['B'].dtype, np.float64)
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
tm.assertIsInstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
tm.assertIsInstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame(dict((k, list(v)) for k, v in compat.iteritems(data)))
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_frame_equal(
result, expected, check_dtype=True, check_index_type=True,
check_column_type=True, check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return dict((i, {constructor(s): 2*i}) for i, s in enumerate(dates_as_str))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
assert_frame_equal(result_datetime64, expected)
assert_frame_equal(result_datetime, expected)
assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return dict((i, {constructor(s): 2*i}) for i, s in enumerate(td_as_int))
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
assert_frame_equal(result_timedelta64, expected)
assert_frame_equal(result_timedelta, expected)
assert_frame_equal(result_Timedelta, expected)
def test_nested_dict_frame_constructor(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
"mat: 2d matrix with shpae (3, 2) to input. empty - makes sized objects"
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
self.assertEqual(len(frame.index), 3)
self.assertEqual(len(frame.columns), 1)
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
self.assert_numpy_array_equal(frame.index, lrange(2))
self.assert_numpy_array_equal(frame.columns, lrange(3))
frame = DataFrame(mat, index=[1, 2])
self.assert_numpy_array_equal(frame.columns, lrange(3))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
self.assert_numpy_array_equal(frame.index, lrange(2))
# 0-length axis
frame = DataFrame(empty((0, 3)))
self.assertEqual(len(frame.index), 0)
frame = DataFrame(empty((3, 0)))
self.assertEqual(len(frame.columns), 0)
def test_constructor_ndarray(self):
mat = np.zeros((2, 3), dtype=float)
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
self.assertEqual(len(frame), 2)
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1.0, frame['A'][1])
self.assertEqual(2.0, frame['C'][2])
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertTrue(np.all(~np.asarray(frame == frame)))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
self.assertEqual(frame.values.dtype, np.float64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'][1])
self.assertEqual(2, frame['C'][2])
# masked np.datetime64 stays (use lib.NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(isnull(frame).values.all())
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'].view('i8')[1])
self.assertEqual(2, frame['C'].view('i8')[2])
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
self.assertEqual(frame.values.dtype, object)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(True, frame['A'][1])
self.assertEqual(False, frame['C'][2])
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = dict([ (k, v.filled()) if hasattr(v,'filled') else (k, v) for k, v in comb ])
expected = DataFrame(comb,columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result,expected)
# specify columns
expected = DataFrame(comb,columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result,expected)
# specify index
expected = DataFrame(comb,columns=names,index=[1,2])
result = DataFrame(mrecs, index=[1,2])
assert_fr_equal(result,expected)
def test_constructor_corner(self):
df = DataFrame(index=[])
self.assertEqual(df.values.shape, (0, 0))
# empty but with specified dtype
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)
self.assertEqual(df.values.dtype, np.object_)
# does not error but ends up float
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
self.assertEqual(df.values.dtype, np.object_)
# #1783 empty dtype object
df = DataFrame({}, columns=['foo', 'bar'])
self.assertEqual(df.values.dtype, np.object_)
df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
dtype=int)
self.assertEqual(df.values.dtype, np.object_)
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
self.assertEqual(df['int'].dtype, np.int64)
self.assertEqual(df['bool'].dtype, np.bool_)
self.assertEqual(df['float'].dtype, np.float64)
self.assertEqual(df['complex'].dtype, np.complex128)
self.assertEqual(df['object'].dtype, np.object_)
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
assert_frame_equal(df, exp)
with tm.assertRaisesRegexp(ValueError, 'must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
self.assertEqual(df_casted.values.dtype, np.int64)
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
self.assertEqual(dm.values.ndim, 2)
arr = randn(0)
dm = DataFrame(arr)
self.assertEqual(dm.values.ndim, 2)
self.assertEqual(dm.values.ndim, 2)
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 2))
dm = DataFrame(columns=['A', 'B'])
self.assertEqual(dm.values.shape, (0, 2))
dm = DataFrame(index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 0))
# corner, silly
# TODO: Fix this Exception to be better...
with assertRaisesRegexp(PandasError, 'constructor not properly called'):
DataFrame((1, 2, 3))
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with assertRaisesRegexp(ValueError, 'cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.float64)
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
l = [[1, 'a'], [2, 'b']]
df = DataFrame(data=l, columns=["num", "str"])
self.assertTrue(com.is_integer_dtype(df['num']))
self.assertEqual(df['str'].dtype, np.object_)
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({ 0: range(10) })
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
import collections
class DummyContainer(collections.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(l, columns=columns)
expected = DataFrame([[1,'a'],[2,'b']],columns=columns)
assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame.from_items([('A', array.array('i', range(10)))])
expected = DataFrame({ 'A' : list(range(10)) })
assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ array.array('i', range(10)), array.array('i',range(10)) ])
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterator(self):
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ range(10), range(10) ])
assert_frame_equal(result, expected)
def test_constructor_generator(self):
#related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ gen1, gen2 ])
assert_frame_equal(result, expected)
gen = ([ i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({ 0 : range(10), 1 : 'a' })
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with assertRaisesRegexp(ValueError, 'arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
self.assertTrue(result.index.is_monotonic)
# ordering ambiguous, raise exception
with assertRaisesRegexp(ValueError, 'ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
assert_frame_equal(rs, xp)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
self.assertTrue(df.index.equals(a.index))
# ndarray like
arr = np.random.randn(10)
s = Series(arr,name='x')
df = DataFrame(s)
expected = DataFrame(dict(x = s))
assert_frame_equal(df,expected)
s = Series(arr,index=range(3,13))
df = DataFrame(s)
expected = DataFrame({ 0 : s })
assert_frame_equal(df,expected)
self.assertRaises(ValueError, DataFrame, s, columns=[1,2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
# series with name and w/o
s1 = Series(arr,name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({ 'x' : s1, 'Unnamed 0' : arr },columns=['x','Unnamed 0'])
assert_frame_equal(df,expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1])
assert_frame_equal(df,expected)
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
self.assertEqual(df1.columns[0], 'x')
assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
self.assertEqual(df2.columns[0], 0)
self.assertTrue(df2.index.equals(other_index))
assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
self.assert_numpy_array_equal(result.index, index)
self.assert_numpy_array_equal(result.columns, columns)
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
recons = DataFrame.from_items(items)
assert_frame_equal(recons, self.frame)
# pass some columns
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
self.assertEqual(recons['A'].dtype, np.float64)
with tm.assertRaisesRegexp(TypeError,
"Must pass columns with orient='index'"):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = lib.list_to_object_array(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
tm.assertIsInstance(recons['foo'][0], tuple)
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index', columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
assert_frame_equal(rs, xp)
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])
with tm.assertRaisesRegexp(ValueError, 'does not match index length'):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
assert_frame_equal(df, expected)
def test_constructor_iterator_failure(self):
with assertRaisesRegexp(TypeError, 'iterator'):
df = DataFrame(iter([1, 2, 3]))
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
assert_frame_equal(df, edf)
idf = DataFrame.from_items(
[('a', [8]), ('a', [5])], columns=['a', 'a'])
assert_frame_equal(idf, edf)
self.assertRaises(ValueError, DataFrame.from_items,
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
assert_frame_equal(df, expected)
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result,expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = lrange(2)
df = DataFrame(arr, columns=['A', 'A'])
df.columns = idx
expected = DataFrame(arr,columns=idx)
check(df,expected)
idx = date_range('20130101',periods=4,freq='Q-NOV')
df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])
df.columns = idx
expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)
check(df,expected)
# insert
df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])
df['string'] = 'bah'
expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])
check(df,expected)
with assertRaisesRegexp(ValueError, 'Length of value'):
df.insert(0, 'AnotherColumn', range(len(df.index) - 1))
# insert same dtype
df['foo2'] = 3
expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])
check(df,expected)
# set (non-dup)
df['foo2'] = 4
expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])
check(df,expected)
df['foo2'] = 3
# delete (non dup)
del df['bar']
expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])
check(df,expected)
# try to delete again (its not consolidated)
del df['hello']
expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
check(df,expected)
# consolidate
df = df.consolidate()
expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
check(df,expected)
# insert
df.insert(2,'new_col',5.)
expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])
check(df,expected)
# insert a dup
assertRaisesRegexp(ValueError, 'cannot insert', df.insert, 2, 'new_col', 4.)
df.insert(2,'new_col',4.,allow_duplicates=True)
expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])
check(df,expected)
# delete (dup)
del df['foo']
expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])
assert_frame_equal(df,expected)
# dup across dtypes
df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])
check(df)
df['foo2'] = 7.
expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])
check(df,expected)
result = df['foo']
expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])
check(result,expected)
# multiple replacements
df['foo'] = 'string'
expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])
check(df,expected)
del df['foo']
expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])
check(df,expected)
# values
df = DataFrame([[1,2.5],[3,4.5]], index=[1,2], columns=['x','x'])
result = df.values
expected = np.array([[1,2.5],[3,4.5]])
self.assertTrue((result == expected).all().all())
# rename, GH 4403
df4 = DataFrame({'TClose': [22.02],
'RT': [0.0454],
'TExg': [0.0422]},
index=MultiIndex.from_tuples([(600809, 20130331)], names=['STK_ID', 'RPT_Date']))
df5 = DataFrame({'STK_ID': [600809] * 3,
'RPT_Date': [20120930,20121231,20130331],
'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],
'TClose': [38.05, 41.66, 30.01]},
index=MultiIndex.from_tuples([(600809, 20120930), (600809, 20121231),(600809,20130331)], names=['STK_ID', 'RPT_Date']))
k = pd.merge(df4,df5,how='inner',left_index=True,right_index=True)
result = k.rename(columns={'TClose_x':'TClose', 'TClose_y':'QT_Close'})
str(result)
result.dtypes
expected = DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809, u('饡驦'), 30.01 ]],
columns=['RT','TClose','TExg','RPT_Date','STK_ID','STK_Name','QT_Close']).set_index(['STK_ID','RPT_Date'],drop=False)
assert_frame_equal(result,expected)
# reindex is invalid!
df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
self.assertRaises(ValueError, df.reindex, columns=['bar'])
self.assertRaises(ValueError, df.reindex, columns=['bar','foo'])
# drop
df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
result = df.drop(['a'],axis=1)
expected = DataFrame([[1],[1],[1]],columns=['bar'])
check(result,expected)
result = df.drop('a',axis=1)
check(result,expected)
# describe
df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64')
result = df.describe()
s = df.iloc[:,0].describe()
expected = pd.concat([ s, s, s],keys=df.columns,axis=1)
check(result,expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'A'])
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame.from_items([('A', expected_ser),
('B', this_df['B']),
('A', expected_ser)])
this_df['A'] = index
check(this_df, expected_df)
# operations
for op in ['__add__','__mul__','__sub__','__truediv__']:
df = DataFrame(dict(A = np.arange(10), B = np.random.rand(10)))
expected = getattr(df,op)(df)
expected.columns = ['A','A']
df.columns = ['A','A']
result = getattr(df,op)(df)
check(result,expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5,2), columns=['that', 'that'])
expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])
df['that'] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5,2), columns=['that', 'that'])
expected = DataFrame(1, index=range(5), columns=['that', 'that'])
df['that'] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame({'A' : np.random.randn(5),
'B' : np.random.randn(5),
'C' : np.random.randn(5),
'D' : ['a','b','c','d','e'] })
expected = df.take([0,1,1], axis=1)
df2 = df.take([2,0,1,2,1], axis=1)
result = df2.drop('C',axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame({'A' : np.random.randn(5),
'B' : np.random.randn(5),
'C' : np.random.randn(5),
'D' : ['a','b','c','d','e'] })
df.iloc[2,[0,1,2]] = np.nan
df.iloc[0,0] = np.nan
df.iloc[1,1] = np.nan
df.iloc[:,3] = np.nan
expected = df.dropna(subset=['A','B','C'],how='all')
expected.columns = ['A','A','B','C']
df.columns = ['A','A','B','C']
result = df.dropna(subset=['A','C'],how='all')
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result,expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ['A', 'A', 'C', 'D']
df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
result = df[df.C > 6]
check(result,expected)
# where
df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
result = df[df > 6]
check(result,expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
self.assertRaises(ValueError, lambda : df[df.A > 6])
# dup aligining operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0,2,0,2,2],index=[1,1,2,2,3])
result = df1.sub(df2)
assert_frame_equal(result,expected)
# equality
df1 = DataFrame([[1,2],[2,np.nan],[3,4],[4,4]],columns=['A','B'])
df2 = DataFrame([[0,1],[2,4],[2,np.nan],[4,5]],columns=['A','A'])
# not-comparing like-labelled
self.assertRaises(ValueError, lambda : df1 == df2)
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A'])
assert_frame_equal(result,expected)
# mixed column selection
# GH 5639
dfbool = DataFrame({'one' : Series([True, True, False], index=['a', 'b', 'c']),
'two' : Series([False, False, True, False], index=['a', 'b', 'c', 'd']),
'three': Series([False, True, True, True], index=['a', 'b', 'c', 'd'])})
expected = pd.concat([dfbool['one'],dfbool['three'],dfbool['one']],axis=1)
result = dfbool[['one', 'three', 'one']]
check(result,expected)
# multi-axis dups
# GH 6121
df = DataFrame(np.arange(25.).reshape(5,5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']].copy()
expected = z.ix[['a', 'c', 'a']]
df = DataFrame(np.arange(25.).reshape(5,5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']]
result = z.ix[['a', 'c', 'a']]
check(result,expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame({'A' : np.arange(5,dtype='int64'),
'B' : np.arange(1,6,dtype='int64')},
index=[2,2,3,3,4])
result = df.B-df.A
expected = Series(1,index=[2,2,3,3,4])
assert_series_equal(result,expected)
df = DataFrame({'A' : date_range('20130101',periods=5), 'B' : date_range('20130101 09:00:00', periods=5)},index=[2,2,3,3,4])
result = df.B-df.A
expected = Series(Timedelta('9 hours'),index=[2,2,3,3,4])
assert_series_equal(result,expected)
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=lrange(N))
assert_frame_equal(df,expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('float64'), df.index,
df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'), df.index,
df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
assert_frame_equal(df, DataFrame(np.array([['a', 'a'],
['a', 'a']],
dtype=object),
index=[1, 2],
columns=['a', 'c']))
self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])
self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])
with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A' : 1, 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime(2001,1,2,0,0) },
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname : 2})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array(1.,dtype=floatname),
intname : np.array(1,dtype=intname)}, index=np.arange(10))
result = df.get_dtype_counts()
expected = { objectname : 1 }
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result.sort_index()
expected = Series(expected)
expected.sort_index()
assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname),
intname : np.array([1]*10,dtype=intname)}, index=np.arange(10))
result = df.get_dtype_counts()
result.sort_index()
assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
self.assertEqual(datetime_s.dtype, 'M8[ns]')
df = DataFrame({'datetime_s':datetime_s})
result = df.get_dtype_counts()
expected = Series({ datetime64name : 1 })
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates':dates})
result = df.get_dtype_counts()
expected = Series({ datetime64name : 1, objectname : 1 })
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
self.assertEqual(df.iat[0,0],dt)
assert_series_equal(df.dtypes,Series({'End Date' : 'datetime64[ns, US/Eastern]' }))
df = DataFrame([{'End Date': dt}])
self.assertEqual(df.iat[0,0],dt)
assert_series_equal(df.dtypes,Series({'End Date' : 'datetime64[ns, US/Eastern]' }))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101',periods=3)
df = DataFrame({ 'value' : dr})
self.assertTrue(df.iat[0,0].tz is None)
dr = date_range('20130101',periods=3,tz='UTC')
df = DataFrame({ 'value' : dr})
self.assertTrue(str(df.iat[0,0].tz) == 'UTC')
dr = date_range('20130101',periods=3,tz='US/Eastern')
df = DataFrame({ 'value' : dr})
self.assertTrue(str(df.iat[0,0].tz) == 'US/Eastern')
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz = 'US/Eastern')
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True) })
df = DataFrame()
df['a'] = i
assert_frame_equal(df, expected)
df = DataFrame( {'a' : i } )
assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame( {'a' : i, 'b' : i_no_tz } )
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True), 'b': i_no_tz })
assert_frame_equal(df, expected)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101',periods=3,tz='US/Eastern'),
name='foo')
dr = date_range('20130110',periods=3)
# construction
df = DataFrame({'A' : idx, 'B' : dr})
self.assertTrue(df['A'].dtype,'M8[ns, US/Eastern')
self.assertTrue(df['A'].name == 'A')
assert_series_equal(df['A'],Series(idx,name='A'))
assert_series_equal(df['B'],Series(dr,name='B'))
# construction from dict
df2 = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5))
assert_series_equal(df2.dtypes, Series(['datetime64[ns, US/Eastern]', 'datetime64[ns, CET]'], index=['A','B']))
# dtypes
tzframe = DataFrame({'A' : date_range('20130101',periods=3),
'B' : date_range('20130101',periods=3,tz='US/Eastern'),
'C' : date_range('20130101',periods=3,tz='CET')})
tzframe.iloc[1,1] = pd.NaT
tzframe.iloc[1,2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series([ np.dtype('datetime64[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]'),
DatetimeTZDtype('datetime64[ns, CET]') ],
['A','B','C'])
# concat
df3 = pd.concat([df2.A.to_frame(),df2.B.to_frame()],axis=1)
assert_frame_equal(df2, df3)
# select_dtypes
result = df3.select_dtypes(include=['datetime64[ns]'])
expected = df3.reindex(columns=[])
assert_frame_equal(result, expected)
# this will select based on issubclass, and these are the same class
result = df3.select_dtypes(include=['datetime64[ns, CET]'])
expected = df3
assert_frame_equal(result, expected)
# from index
idx2 = date_range('20130101',periods=3,tz='US/Eastern',name='foo')
df2 = DataFrame(idx2)
assert_series_equal(df2['foo'],Series(idx2,name='foo'))
df2 = DataFrame(Series(idx2))
assert_series_equal(df2['foo'],Series(idx2,name='foo'))
idx2 = date_range('20130101',periods=3,tz='US/Eastern')
df2 = DataFrame(idx2)
assert_series_equal(df2[0],Series(idx2,name=0))
df2 = DataFrame(Series(idx2))
assert_series_equal(df2[0],Series(idx2,name=0))
# interleave with object
result = self.tzframe.assign(D = 'foo').values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')],
['foo','foo','foo']], dtype=object).T
self.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = self.tzframe.values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')]], dtype=object).T
self.assert_numpy_array_equal(result, expected)
# astype
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')]], dtype=object).T
result = self.tzframe.astype(object)
assert_frame_equal(result, DataFrame(expected, index=self.tzframe.index, columns=self.tzframe.columns))
result = self.tzframe.astype('datetime64[ns]')
expected = DataFrame({'A' : date_range('20130101',periods=3),
'B' : date_range('20130101',periods=3,tz='US/Eastern').tz_convert('UTC').tz_localize(None),
'C' : date_range('20130101',periods=3,tz='CET').tz_convert('UTC').tz_localize(None)})
expected.iloc[1,1] = pd.NaT
expected.iloc[1,2] = pd.NaT
assert_frame_equal(result, expected)
# str formatting
result = self.tzframe.astype(str)
expected = np.array([['2013-01-01', '2013-01-01 00:00:00-05:00',
'2013-01-01 00:00:00+01:00'],
['2013-01-02', 'NaT', 'NaT'],
['2013-01-03', '2013-01-03 00:00:00-05:00',
'2013-01-03 00:00:00+01:00']], dtype=object)
self.assert_numpy_array_equal(result, expected)
result = str(self.tzframe)
self.assertTrue('0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00' in result)
self.assertTrue('1 2013-01-02 NaT NaT' in result)
self.assertTrue('2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00' in result)
# setitem
df['C'] = idx
assert_series_equal(df['C'],Series(idx,name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'],Series(idx,name='D'))
del df['D']
# assert that A & C are not sharing the same base (e.g. they
# are copies)
b1 = df._data.blocks[1]
b2 = df._data.blocks[2]
self.assertTrue(b1.values.equals(b2.values))
self.assertFalse(id(b1.values.values.base) == id(b2.values.values.base))
# with nan
df2 = df.copy()
df2.iloc[1,1] = pd.NaT
df2.iloc[1,2] = pd.NaT
result = df2['B']
assert_series_equal(notnull(result), Series([True,False,True],name='B'))
assert_series_equal(df2.dtypes, df.dtypes)
# set/reset
df = DataFrame({'A' : [0,1,2] }, index=idx)
result = df.reset_index()
self.assertTrue(result['foo'].dtype,'M8[ns, US/Eastern')
result = result.set_index('foo')
tm.assert_index_equal(df.index,idx)
def test_constructor_for_list_with_dtypes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64' : 5})
df = DataFrame([np.array(np.arange(5),dtype='int32') for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32' : 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a' : [2**31,2**31+1]})
result = df.get_dtype_counts()
expected = Series({'int64' : 1 })
assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1 })
assert_series_equal(result, expected)
df = DataFrame([1.,2.])
result = df.get_dtype_counts()
expected = Series({'float64' : 1 })
assert_series_equal(result, expected)
df = DataFrame({'a' : [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64' : 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64' : 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : 1 }, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : 1. }, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1 })
assert_series_equal(result, expected)
# with object list
df = DataFrame({'a':[1,2,4,7], 'b':[1.2, 2.3, 5.1, 6.3],
'c':list('abcd'), 'd':[datetime(2000,1,1) for i in range(4)],
'e' : [1.,2,4.,7]})
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
def test_not_hashable(self):
df = pd.DataFrame([1])
self.assertRaises(TypeError, hash, df)
self.assertRaises(TypeError, hash, self.empty)
def test_timedeltas(self):
df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),
B = Series([ timedelta(days=i) for i in range(3) ])))
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 }).sort_values()
assert_series_equal(result, expected)
df['C'] = df['A'] + df['B']
expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 }).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
# mixed int types
df['D'] = 1
expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 }).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'),
B = date_range('2012-1-2', periods=3, freq='D'),
C = Timestamp('20120101')-timedelta(minutes=5,seconds=5)))
diffs = DataFrame(dict(A = df['A']-df['C'],
B = df['A']-df['B']))
# min
result = diffs.min()
self.assertEqual(result[0], diffs.ix[0,'A'])
self.assertEqual(result[1], diffs.ix[0,'B'])
result = diffs.min(axis=1)
self.assertTrue((result == diffs.ix[0,'B']).all() == True)
# max
result = diffs.max()
self.assertEqual(result[0], diffs.ix[2,'A'])
self.assertEqual(result[1], diffs.ix[2,'B'])
result = diffs.max(axis=1)
self.assertTrue((result == diffs['A']).all() == True)
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A = df['A']-df['C'],
B = df['B']-df['A']))
assert_frame_equal(result,expected)
assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
result = mixed.min()
expected = Series([_coerce_scalar_to_timedelta_type(timedelta(seconds=5*60+5)),
_coerce_scalar_to_timedelta_type(timedelta(days=-1)),
'foo',
1,
1.0,
Timestamp('20130101')],
index=mixed.columns)
assert_series_equal(result,expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.],index=[0, 1, 2])
assert_series_equal(result,expected)
# works when only those columns are selected
result = mixed[['A','B']].min(1)
expected = Series([ timedelta(days=-1) ] * 3)
assert_series_equal(result,expected)
result = mixed[['A','B']].min()
expected = Series([ timedelta(seconds=5*60+5), timedelta(days=-1) ],index=['A','B'])
assert_series_equal(result,expected)
# GH 3106
df = DataFrame({'time' : date_range('20130102',periods=5),
'time2' : date_range('20130105',periods=5) })
df['off1'] = df['time2']-df['time']
self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')
df['off2'] = df['time']-df['time2']
df._consolidate_inplace()
self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
def test_datetimelike_setitem_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101',periods=4))
df['A'] = np.array([1*one_hour]*4, dtype='m8[ns]')
df.loc[:,'B'] = np.array([2*one_hour]*4, dtype='m8[ns]')
df.loc[:3,'C'] = np.array([3*one_hour]*3, dtype='m8[ns]')
df.ix[:,'D'] = np.array([4*one_hour]*4, dtype='m8[ns]')
df.ix[:3,'E'] = np.array([5*one_hour]*3, dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.ix[:-1,'F'] = np.array([6*one_hour]*3, dtype='m8[ns]')
df.ix[-3:,'G'] = date_range('20130101',periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))
assert_series_equal(result,expected)
def test_setitem_datetime_coercion(self):
# GH 1048
df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')]*3})
df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[0, 'c'])
self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[1, 'c'])
df.loc[2, 'c'] = date(2005, 5, 5)
self.assertEqual(pd.Timestamp('2005-05-05'), df.loc[2, 'c'])
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
df1.index.name = 'foo'
self.assertIsNone(df2.index.name)
def test_astype(self):
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
casted = self.frame.astype(np.int32)
expected = DataFrame(self.frame.values.astype(np.int32),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
self.frame['foo'] = '5'
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
# mixed casting
def _check_cast(df, v):
self.assertEqual(list(set([ s.dtype.name for _, s in compat.iteritems(df) ]))[0], v)
mn = self.all_mixed._get_numeric_data().copy()
mn['little_float'] = np.array(12345.,dtype='float16')
mn['big_float'] = np.array(123456789101112.,dtype='float64')
casted = mn.astype('float64')
_check_cast(casted, 'float64')
casted = mn.astype('int64')
_check_cast(casted, 'int64')
casted = self.mixed_float.reindex(columns = ['A','B']).astype('float32')
_check_cast(casted, 'float32')
casted = mn.reindex(columns = ['little_float']).astype('float16')
_check_cast(casted, 'float16')
casted = self.mixed_float.reindex(columns = ['A','B']).astype('float16')
_check_cast(casted, 'float16')
casted = mn.astype('float32')
_check_cast(casted, 'float32')
casted = mn.astype('int32')
_check_cast(casted, 'int32')
# to object
casted = mn.astype('O')
_check_cast(casted, 'object')
def test_astype_with_exclude_string(self):
df = self.frame.copy()
expected = self.frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, raise_on_error = False)
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
df = self.frame.copy()
expected = self.frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, raise_on_error = False)
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
def test_astype_with_view(self):
tf = self.mixed_float.reindex(columns = ['A','B','C'])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32)
# this is the only real reason to do it this way
tf = np.round(self.frame).astype(np.int32)
casted = tf.astype(np.float32, copy = False)
tf = self.frame.astype(np.float64)
casted = tf.astype(np.int64, copy = False)
def test_astype_cast_nan_int(self):
df = DataFrame(data={"Values": [1.0, 2.0, 3.0, np.nan]})
self.assertRaises(ValueError, df.astype, np.int64)
def test_astype_str(self):
# GH9757
a = Series(date_range('2010-01-04', periods=5))
b = Series(date_range('3/6/2012 00:00', periods=5, tz='US/Eastern'))
c = Series([Timedelta(x, unit='d') for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({'a' : a, 'b' : b, 'c' : c, 'd' : d, 'e' : e})
# datetimelike
# Test str and unicode on python 2.x and just str on python 3.x
for tt in set([str, compat.text_type]):
result = df.astype(tt)
expected = DataFrame({
'a' : list(map(tt, map(lambda x: Timestamp(x)._date_repr, a._values))),
'b' : list(map(tt, map(Timestamp, b._values))),
'c' : list(map(tt, map(lambda x: Timedelta(x)._repr_base(format='all'), c._values))),
'd' : list(map(tt, d._values)),
'e' : list(map(tt, e._values)),
})
assert_frame_equal(result, expected)
# float/nan
# 11302
# consistency in astype(str)
for tt in set([str, compat.text_type]):
result = DataFrame([np.NaN]).astype(tt)
expected = DataFrame(['nan'])
assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(tt)
expected = DataFrame(['1.12345678901'])
assert_frame_equal(result, expected)
def test_array_interface(self):
result = np.sqrt(self.frame)
tm.assertIsInstance(result, type(self.frame))
self.assertIs(result.index, self.frame.index)
self.assertIs(result.columns, self.frame.columns)
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_pickle(self):
unpickled = self.round_trip_pickle(self.mixed_frame)
assert_frame_equal(self.mixed_frame, unpickled)
# buglet
self.mixed_frame._data.ndim
# empty
unpickled = self.round_trip_pickle(self.empty)
repr(unpickled)
# tz frame
unpickled = self.round_trip_pickle(self.tzframe)
assert_frame_equal(self.tzframe, unpickled)
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}
tm.assert_almost_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': nan, 'B': '3'}]
tm.assert_almost_equal(recons_data, expected_records)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k2][k])
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
tm.assert_almost_equal(test_data.to_dict(
orient='records'), expected_records)
tm.assert_almost_equal(test_data_mixed.to_dict(
orient='records'), expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp]),
'B': Series([tsmp, tsmp]),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp]),
'B': Series([1, 2]),
}
tm.assert_almost_equal(test_data.to_dict(
orient='series'), expected_series)
tm.assert_almost_equal(test_data_mixed.to_dict(
orient='series'), expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_almost_equal(test_data.to_dict(
orient='split'), expected_split)
tm.assert_almost_equal(test_data_mixed.to_dict(
orient='split'), expected_split_mixed)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A':[0, 1]})
self.assertRaises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
self.assertEqual(df.to_records()['index'][0], df.index[0])
rs = df.to_records(convert_datetime64=False)
self.assertEqual(rs['index'][0], df.index.values[0])
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
self.assertTrue('bar' in r)
self.assertTrue('one' not in r)
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <user@example.com>\n'
'To: <someone_else@example.com>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all( x in frame for x in ['Type','Subject','From'])
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
frame = DataFrame.from_records(arr)
index = np.arange(len(arr))[::-1]
indexed_frame = DataFrame.from_records(arr, index=index)
self.assert_numpy_array_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2,3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
self.assertEqual(len(records.dtype.names), 3)
records = indexed_frame.to_records(index=False)
self.assertEqual(len(records.dtype.names), 2)
self.assertNotIn('index', records.dtype.names)
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
self.assertTrue(np.isnan(df['c'][0]))
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6), (7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32), ('y', np.int64), ('z', np.int32) ])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
assert_frame_equal(df, xp.reindex(columns=['x','y']), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i/length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))]
data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i/length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(10)] for j in range(len(columns_names))]
data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a')
self.assertEqual(columns, original_columns)
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
self.assertEqual(df['a'].dtype, object)
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
self.assertEqual(df['a'].dtype, np.float64)
self.assertTrue(np.isnan(df['a'].values[-1]))
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
self.assertEqual(result.index.name, 'order_id')
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
self.assertEqual(result.index.names, ('order_id', 'quantity'))
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'object' : 1 })
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a','b','c'])
expected = DataFrame(columns=['a','b','c'])
assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a','b','b'])
expected = DataFrame(columns=['a','b','b'])
assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
assert_numpy_array_equal(df.index, Index([1], name='id'))
self.assertEqual(df.index.name, 'id')
assert_numpy_array_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
assert_numpy_array_equal(df.index, Index([], name='id'))
self.assertEqual(df.index.name, 'id')
def test_from_records_with_datetimes(self):
if sys.version < LooseVersion('2.7'):
raise nose.SkipTest('rec arrays dont work properly with py2.6')
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_little_endian():
raise nose.SkipTest("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({ 'EXPIRY' : [datetime(2005, 3, 1, 0, 0), None ]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
raise nose.SkipTest("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
assert_frame_equal(result,expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
assert_frame_equal(result,expected)
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_recods_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
self.assertIn('X', rs.dtype.fields)
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
self.assertIn('index', rs.dtype.fields)
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
self.assertIn('level_0', rs.dtype.fields)
def test_join_str_datetime(self):
str_dates = ['20120209', '20120222']
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
A = DataFrame(str_dates, index=lrange(2), columns=['aa'])
C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
tst = A.join(C, on='aa')
self.assertEqual(len(tst.columns), 3)
def test_join_multiindex_leftright(self):
# GH 10741
df1 = pd.DataFrame([['a', 'x', 0.471780], ['a','y', 0.774908],
['a', 'z', 0.563634], ['b', 'x', -0.353756],
['b', 'y', 0.368062], ['b', 'z', -1.721840],
['c', 'x', 1], ['c', 'y', 2], ['c', 'z', 3]],
columns=['first', 'second', 'value1']).set_index(['first', 'second'])
df2 = pd.DataFrame([['a', 10], ['b', 20]], columns=['first', 'value2']).set_index(['first'])
exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],
[-0.353756, 20], [0.368062, 20], [-1.721840, 20],
[1.000000, np.nan], [2.000000, np.nan], [3.000000, np.nan]],
index=df1.index, columns=['value1', 'value2'])
# these must be the same results (but columns are flipped)
tm.assert_frame_equal(df1.join(df2, how='left'), exp)
tm.assert_frame_equal(df2.join(df1, how='right'), exp[['value2', 'value1']])
exp_idx = pd.MultiIndex.from_product([['a', 'b'], ['x', 'y', 'z']],
names=['first', 'second'])
exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],
[-0.353756, 20], [0.368062, 20], [-1.721840, 20]],
index=exp_idx, columns=['value1', 'value2'])
tm.assert_frame_equal(df1.join(df2, how='right'), exp)
tm.assert_frame_equal(df2.join(df1, how='left'), exp[['value2', 'value1']])
def test_from_records_sequencelike(self):
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
'B' : np.array(np.arange(6), dtype = np.int64),
'C' : ['foo'] * 6,
'D' : np.array([True, False] * 3, dtype=bool),
'E' : np.array(np.random.randn(6), dtype = np.float32),
'E1': np.array(np.random.randn(6), dtype = np.float32),
'F' : np.array(np.arange(6), dtype = np.int32) })
# this is actually tricky to create the recordlike arrays and have the dtypes be intact
blocks = df.blocks
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([ (c,np.dtype(dtype).descr[0][1]) for c in b.columns ])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(columns=df.columns)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(columns=df.columns)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(columns=df.columns)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(columns=df.columns)
assert_frame_equal(result, df, check_dtype=False)
assert_frame_equal(result2, df)
assert_frame_equal(result3, df)
assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
self.assert_numpy_array_equal(result.columns, lrange(8))
# test exclude parameter & we are casting the results here (as we don't have dtype info to recover)
columns_to_test = [ columns.index('C'), columns.index('E1') ]
exclude = list(set(range(8))-set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [ columns[i] for i in sorted(columns_to_test) ]
assert_series_equal(result['C'], df['C'])
assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 0)
self.assert_numpy_array_equal(result.columns, ['foo', 'bar', 'baz'])
result = DataFrame.from_records([])
self.assertEqual(len(result), 0)
self.assertEqual(len(result.columns), 0)
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
'B' : np.array(np.arange(6), dtype = np.int64),
'C' : ['foo'] * 6,
'D' : np.array([True, False] * 3, dtype=bool),
'E' : np.array(np.random.randn(6), dtype = np.float32),
'E1': np.array(np.random.randn(6), dtype = np.float32),
'F' : np.array(np.arange(6), dtype = np.int32) })
# columns is in a different order here than the actual items iterated from the dict
columns = []
for dtype, b in compat.iteritems(df.blocks):
columns.extend(b.columns)
asdict = dict((x, y) for x, y in compat.iteritems(df))
asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns))
for r in results:
assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
assert(df1.index.equals(Index(data)))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
assert(df1.index.equals(Index(df.C)))
df1 = DataFrame.from_records(df, index='C')
assert(df1.index.equals(Index(df.C)))
# should fail
self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])
self.assertRaises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
self.assertTrue(np.array_equal(result.columns, ['bar']))
self.assertEqual(len(result), 0)
self.assertEqual(result.index.name, 'foo')
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assertIs(cols, self.frame.columns)
idx = self.frame._get_agg_axis(1)
self.assertIs(idx, self.frame.index)
self.assertRaises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertTrue(self.empty.empty)
self.assertFalse(self.frame.empty)
self.assertFalse(self.mixed_frame.empty)
# corner case
df = DataFrame({'A': [1., 2., 3.],
'B': ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assertFalse(df.empty)
def test_repr_empty(self):
buf = StringIO()
# empty
foo = repr(self.empty)
# empty with index
frame = DataFrame(index=np.arange(1000))
foo = repr(frame)
def test_repr_mixed(self):
buf = StringIO()
# mixed
foo = repr(self.mixed_frame)
self.mixed_frame.info(verbose=False, buf=buf)
@slow
def test_repr_mixed_big(self):
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
foo = repr(biggie)
def test_repr(self):
buf = StringIO()
# small one
foo = repr(self.frame)
self.frame.info(verbose=False, buf=buf)
# even smaller
self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)
self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)
# exhausting cases in DataFrame.info
# columns but no index
no_index = DataFrame(columns=[0, 1, 3])
foo = repr(no_index)
# no columns or index
self.empty.info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
self.assertFalse("\t" in repr(df))
self.assertFalse("\r" in repr(df))
self.assertFalse("a\n" in repr(df))
def test_repr_dimensions(self):
df = DataFrame([[1, 2,], [3, 4]])
with option_context('display.show_dimensions', True):
self.assertTrue("2 rows x 2 columns" in repr(df))
with option_context('display.show_dimensions', False):
self.assertFalse("2 rows x 2 columns" in repr(df))
with option_context('display.show_dimensions', 'truncate'):
self.assertFalse("2 rows x 2 columns" in repr(df))
@slow
def test_repr_big(self):
buf = StringIO()
# big one
biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),
index=lrange(200))
foo = repr(biggie)
def test_repr_unsortable(self):
# columns are not sortable
import warnings
warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
unsortable = DataFrame({'foo': [1] * 50,
datetime.today(): [1] * 50,
'bar': ['bar'] * 50,
datetime.today(
) + timedelta(1): ['bar'] * 50},
index=np.arange(50))
foo = repr(unsortable)
fmt.set_option('display.precision', 3, 'display.column_space', 10)
repr(self.frame)
fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)
repr(self.frame)
fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)
repr(self.frame)
self.reset_display_options()
warnings.filters = warn_filters
def test_repr_unicode(self):
uval = u('\u03c3\u03c3\u03c3\u03c3')
bval = uval.encode('utf-8')
df = DataFrame({'A': [uval, uval]})
result = repr(df)
ex_top = ' A'
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
df = DataFrame({'A': [uval, uval]})
result = repr(df)
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
def test_unicode_string_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
bytes(df)
else:
str(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20),
columns=tm.rands_array(10, 20))
repr(df)
def test_repr_column_name_unicode_truncation_bug(self):
# #1906
df = DataFrame({'Id': [7117434],
'StringCol': ('Is it possible to modify drop plot code'
' so that the output graph is displayed '
'in iphone simulator, Is it possible to '
'modify drop plot code so that the '
'output graph is \xe2\x80\xa8displayed '
'in iphone simulator.Now we are adding '
'the CSV file externally. I want to Call'
' the File through the code..')})
result = repr(df)
self.assertIn('StringCol', result)
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame)
assert_frame_equal(self.frame.tail(0), self.frame)
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df)
assert_frame_equal(df.tail(0), df)
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
#test empty dataframe
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'b', 'a'])
assert_almost_equal(df['a'], df['foo'])
df.insert(2, 'bar', df['c'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'bar', 'b', 'a'])
assert_almost_equal(df['c'], df['bar'])
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64 = 5, float32 = 1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64 = 4, float32 = 2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64 = 4, float32 = 2, int32 = 1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
A = self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
foo = self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
# 10912
# inplace ops cause caching issue
a = DataFrame([[1,2,3],[4,5,6]], columns=['A','B','C'], index=['X','Y'])
b = a.pop('B')
b += 1
# original frame
expected = DataFrame([[1,3],[4,6]], columns=['A','C'], index=['X','Y'])
assert_frame_equal(a, expected)
# result
expected = Series([2,5],index=['X','Y'],name='B')+1
assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_iter(self):
self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))
def test_iterrows(self):
for i, (k, v) in enumerate(self.frame.iterrows()):
exp = self.frame.xs(self.frame.index[i])
assert_series_equal(v, exp)
for i, (k, v) in enumerate(self.mixed_frame.iterrows()):
exp = self.mixed_frame.xs(self.mixed_frame.index[i])
assert_series_equal(v, exp)
def test_itertuples(self):
for i, tup in enumerate(self.frame.itertuples()):
s = Series(tup[1:])
s.name = tup[0]
expected = self.frame.ix[i, :].reset_index(drop=True)
assert_series_equal(s, expected)
df = DataFrame({'floats': np.random.randn(5),
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
tm.assertIsInstance(tup[1], np.integer)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])
tup = next(df.itertuples(name='TestName'))
# no support for field renaming in Python 2.6, regular tuples are returned
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup._fields, ('Index', 'a', 'b'))
self.assertEqual((tup.Index, tup.a, tup.b), tup)
self.assertEqual(type(tup).__name__, 'TestName')
df.columns = ['def', 'return']
tup2 = next(df.itertuples(name='TestName'))
self.assertEqual(tup2, (0, 1, 4))
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup2._fields, ('Index', '_1', '_2'))
df3 = DataFrame(dict(('f'+str(i), [i]) for i in range(1024)))
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
self.assertFalse(hasattr(tup3, '_fields'))
self.assertIsInstance(tup3, tuple)
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
DataFrame(dtype=dtype),
DataFrame(columns=['A'], dtype=dtype),
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
self.assertTrue((df + df).equals(df))
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result,DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result,DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1,2])
assert_frame_equal(result,DataFrame(index=[1,2]))
result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(index=[1],columns=['A']))
result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
# boolean ops
result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))
def f():
DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
self.assertRaises(TypeError, f)
def f():
DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
self.assertRaises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df,df2):
for (x, y) in [(df,df2),(df2,df)]:
self.assertRaises(TypeError, lambda : x == y)
self.assertRaises(TypeError, lambda : x != y)
self.assertRaises(TypeError, lambda : x >= y)
self.assertRaises(TypeError, lambda : x > y)
self.assertRaises(TypeError, lambda : x < y)
self.assertRaises(TypeError, lambda : x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df,df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(df)), 'b': date_range('20100101', periods=len(df))})
check(df,df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
tm.assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
tm.assert_frame_equal(result, expected)
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
### this is technically wrong as the integer portion is coerced to float ###
expected = DataFrame({ 'first' : Series([0,0,0,0],dtype='float64'), 'second' : Series([np.nan,np.nan,np.nan,0]) })
result = p % p
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')
result2.iloc[0:3,1] = np.nan
assert_frame_equal(result2,expected)
result = p % 0
expected = DataFrame(np.nan,index=p.index,columns=p.columns)
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)
assert_frame_equal(result2,expected)
# not commutative with series
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s % p
res2 = p % s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_div(self):
# integer div, but deal with the 0's (GH 9144)
p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
result = p / p
expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]),
'second': Series([nan, nan, nan, 1])})
assert_frame_equal(result,expected)
result2 = DataFrame(p.values.astype('float') / p.values, index=p.index,
columns=p.columns)
assert_frame_equal(result2,expected)
result = p / 0
expected = DataFrame(inf, index=p.index, columns=p.columns)
expected.iloc[0:3, 1] = nan
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values.astype('float64') / 0, index=p.index,
columns=p.columns)
assert_frame_equal(result2,expected)
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s / p
res2 = p / s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
def test_logical_typeerror(self):
if not compat.PY3:
self.assertRaises(TypeError, self.frame.__eq__, 'foo')
self.assertRaises(TypeError, self.frame.__lt__, 'foo')
self.assertRaises(TypeError, self.frame.__gt__, 'foo')
self.assertRaises(TypeError, self.frame.__ne__, 'foo')
else:
raise nose.SkipTest('test_logical_typeerror not tested on PY3')
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
self.assertEqual(d['a'].dtype, np.object_)
self.assertFalse(d['a'][1])
def test_constructor_with_nas(self):
# GH 5016
# na's in indicies
def check(df):
for i in range(len(df.columns)):
df.iloc[:,i]
# allow single nans to succeed
indexer = np.arange(len(df.columns))[isnull(df.columns)]
if len(indexer) == 1:
assert_series_equal(df.iloc[:,indexer[0]],df.loc[:,np.nan])
# multiple nans should fail
else:
def f():
df.loc[:,np.nan]
self.assertRaises(TypeError, f)
df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])
check(df)
df = DataFrame([[1,2,3],[4,5,6]], columns=[1.1,2.2,np.nan])
check(df)
df = DataFrame([[0,1,2,3],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])
check(df)
df = DataFrame([[0.0,1,2,3.0],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])
check(df)
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False,downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
def test_neg(self):
# what to do?
assert_frame_equal(-self.frame, -1 * self.frame)
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
self.assertEqual(index, frame.index[5])
index = frame.last_valid_index()
self.assertEqual(index, frame.index[-6])
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype = dict(C = None))
# vs mix int
if op in ['add','sub','mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B = 'object', C = None)
elif op in ['add','mul']:
dtype = dict(C = None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype = dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype = dict(C = None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add','sub','mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B = 'object', C = None)
elif op in ['add','mul']:
dtype = dict(C = None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype = dtype)
except:
com.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
with assertRaisesRegexp(ValueError, 'shape'):
f(self.frame, ndim_5)
with assertRaisesRegexp(ValueError, 'shape'):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], fill_value=3)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index=MultiIndex.from_product([list('abc'),
['one','two','three'],
[1,2,3]],
names=['first','second','third'])
df = DataFrame(np.arange(27*3).reshape(27,3),
index=index,
columns=['value1','value2','value3']).sortlevel()
idx = pd.IndexSlice
for op in ['add','sub','mul','div','truediv']:
opa = getattr(operator,op,None)
if opa is None:
continue
x = Series([ 1.0, 10.0, 100.0], [1,2,3])
result = getattr(df,op)(x,level='third',axis=0)
expected = pd.concat([ opa(df.loc[idx[:,:,i],:],v) for i, v in x.iteritems() ]).sortlevel()
assert_frame_equal(result, expected)
x = Series([ 1.0, 10.0], ['two','three'])
result = getattr(df,op)(x,level='second',axis=0)
expected = pd.concat([ opa(df.loc[idx[:,i],:],v) for i, v in x.iteritems() ]).reindex_like(df).sortlevel()
assert_frame_equal(result, expected)
## GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([['A', 'B'],['a', 'b']])
df = DataFrame(np.ones((2,4), dtype='int64'), columns=midx)
s = pd.Series({'a':1, 'b':2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.ix[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
self.assertTrue(df.eq(df).values.all())
self.assertFalse(df.ne(df).values.any())
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
assert_frame_equal(f(np.nan), o(df, np.nan))
with assertRaisesRegexp(ValueError, 'shape'):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.ix[0, 0] = np.nan
rs = df.eq(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ne(df)
self.assertTrue(rs.ix[0, 0])
rs = df.gt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.lt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ge(df)
self.assertFalse(rs.ix[0, 0])
rs = df.le(df)
self.assertFalse(rs.ix[0, 0])
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
self.assertFalse(rs.values.any())
rs = df.ne(df2)
self.assertTrue(rs.values.all())
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
self.assertFalse(rs.values.any())
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64')
expected = DataFrame([[nan, inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0],axis='index')
assert_frame_equal(result,expected)
df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='float64')
expected = DataFrame([[np.nan,np.inf],[1.0,1.5],[1.0,1.25]])
result = df.div(df[0],axis='index')
assert_frame_equal(result,expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
tm.assert_dict_equal(added['A'].valid(),
self.frame['A'] * 2,
compare_keys=False)
self.assertTrue(np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())
# assert(False)
self.assertTrue(np.isnan(added['D']).all())
self_added = self.frame + self.frame
self.assertTrue(self_added.index.equals(self.frame.index))
added_rev = frame_copy + self.frame
self.assertTrue(np.isnan(added['D']).all())
# corner cases
# empty
plus_empty = self.frame + self.empty
self.assertTrue(np.isnan(plus_empty.values).all())
empty_plus = self.empty + self.frame
self.assertTrue(np.isnan(empty_plus.values).all())
empty_empty = self.empty + self.empty
self.assertTrue(empty_empty.empty)
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype = 'float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype = dict(C = None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype = dict(C = None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype = 'float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
self.assertIn('E', larger_added)
self.assertTrue(np.isnan(larger_added['E']).all())
# vs mix (upcast) as needed
added = self.mixed_float + series
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype = dict(C = None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype = dict(C = None))
#### these raise with numexpr.....as we are adding an int64 to an uint64....weird
# vs int
#added = self.mixed_int + (100*series).astype('int64')
#_check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C = 'int64', D = 'int64'))
#added = self.mixed_int + (100*series).astype('int32')
#_check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C = 'int32', D = 'int64'))
# TimeSeries
ts = self.tsframe['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explict broadcasting
added = self.tsframe.add(ts, axis='index')
for key, col in compat.iteritems(self.tsframe):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
self.assertEqual(added[key].name, key)
if col.name == ts.name:
self.assertEqual(result.name, 'A')
else:
self.assertTrue(result.name is None)
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
self.assertTrue(smaller_added.index.equals(self.tsframe.index))
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts,axis='index')
self.assertEqual(len(result), len(ts))
def test_combineFunc(self):
result = self.frame * 2
self.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
self.assert_numpy_array_equal(s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype = dict(C = None))
result = self.empty * 2
self.assertIs(result.index, self.empty.index)
self.assertEqual(len(result.columns), 0)
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
self.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with assertRaisesRegexp(ValueError, 'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
self.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
self.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with assertRaisesRegexp(ValueError, 'Can only compare '
'identically-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_string_comparison(self):
df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
assert_frame_equal(df[mask_a], df.ix[1:1, :])
assert_frame_equal(df[-mask_a], df.ix[0:0, :])
mask_b = df.b == "foo"
assert_frame_equal(df[mask_b], df.ix[0:0, :])
assert_frame_equal(df[-mask_b], df.ix[1:1, :])
def test_float_none_comparison(self):
df = DataFrame(np.random.randn(8, 3), index=lrange(8),
columns=['A', 'B', 'C'])
self.assertRaises(TypeError, df.__eq__, None)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3,2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2,2])
b_c = b_r.T
l = (2,2,2)
tup = tuple(l)
# gt
expected = DataFrame([[False,False],[False,True],[True,True]])
result = df>b
assert_frame_equal(result,expected)
result = df.values>b
assert_numpy_array_equal(result,expected.values)
result = df>l
assert_frame_equal(result,expected)
result = df>tup
assert_frame_equal(result,expected)
result = df>b_r
assert_frame_equal(result,expected)
result = df.values>b_r
assert_numpy_array_equal(result,expected.values)
self.assertRaises(ValueError, df.__gt__, b_c)
self.assertRaises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False,False],[True,False],[False,False]])
result = df == b
assert_frame_equal(result,expected)
result = df==l
assert_frame_equal(result,expected)
result = df==tup
assert_frame_equal(result,expected)
result = df == b_r
assert_frame_equal(result,expected)
result = df.values == b_r
assert_numpy_array_equal(result,expected.values)
self.assertRaises(ValueError, lambda : df == b_c)
self.assertFalse((df.values == b_c))
# with alignment
df = DataFrame(np.arange(6).reshape((3,2)),columns=list('AB'),index=list('abc'))
expected.index=df.index
expected.columns=df.columns
result = df==l
assert_frame_equal(result,expected)
result = df==tup
assert_frame_equal(result,expected)
# not shape compatible
self.assertRaises(ValueError, lambda : df == (2,2))
self.assertRaises(ValueError, lambda : df == [2,2])
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x","y"], "B": [1,2],
"C": ["w","z"]})
df1 = df0.reset_index()[["A","B","C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
self.assertTrue(df0._data.blocks[0].dtype !=
df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
self.assertTrue(df0.equals(df1))
self.assertTrue(df1.equals(df0))
def test_copy_blocks(self):
# API/ENH 9607
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.ix[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
self.assertFalse(_df[column].equals(df[column]))
def test_no_copy_blocks(self):
# API/ENH 9607
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.ix[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
self.assertTrue(_df[column].equals(df[column]))
def test_to_csv_from_csv(self):
pname = '__tmp_to_csv_from_csv__'
with ensure_clean(pname) as path:
self.frame['A'][:5] = nan
self.frame.to_csv(path)
self.frame.to_csv(path, columns=['A', 'B'])
self.frame.to_csv(path, header=False)
self.frame.to_csv(path, index=False)
# test roundtrip
self.tsframe.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.tsframe, recons)
self.tsframe.to_csv(path, index_label='index')
recons = DataFrame.from_csv(path, index_col=None)
assert(len(recons.columns) == len(self.tsframe.columns) + 1)
# no index
self.tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(self.tsframe.values, recons.values)
# corner case
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
's2': Series(lrange(2), lrange(2))})
dm.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(dm, recons)
with ensure_clean(pname) as path:
# duplicate index
df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path)
assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx,
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path, index_col=[0, 1, 2],
parse_dates=False)
assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ?
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_csv(path, header=col_aliases)
rs = DataFrame.from_csv(path)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
self.assertRaises(ValueError, self.frame2.to_csv, path,
header=['AA', 'X'])
with ensure_clean(pname) as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path,mode='a',header=False)
xp = pd.concat([df1,df2])
rs = pd.read_csv(path,index_col=0)
rs.columns = lmap(int,rs.columns)
xp.columns = lmap(int,xp.columns)
assert_frame_equal(xp,rs)
with ensure_clean() as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = pd.DataFrame({'dt_data': [i*dt for i in range(3)]},
index=pd.Index([i*dt for i in range(3)],
name='dt_index'))
df.to_csv(path)
result = pd.read_csv(path, index_col='dt_index')
result.index = pd.to_timedelta(result.index)
# TODO: remove renaming when GH 10875 is solved
result.index = result.index.rename('dt_index')
result['dt_data'] = pd.to_timedelta(result['dt_data'])
assert_frame_equal(df, result, check_index_type=True)
# tz, 8260
with ensure_clean(pname) as path:
self.tzframe.to_csv(path)
result = pd.read_csv(path, index_col=0, parse_dates=['A'])
converter = lambda c: pd.to_datetime(result[c]).dt.tz_localize('UTC').dt.tz_convert(self.tzframe[c].dt.tz)
result['B'] = converter('B')
result['C'] = converter('C')
assert_frame_equal(result, self.tzframe)
def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
chunksize=5
N = int(chunksize*2.5)
df= mkdf(N, 3)
cs = df.columns
cols = [cs[2],cs[0]]
with ensure_clean() as path:
df.to_csv(path,columns = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
assert_frame_equal(df[cols],rs_c,check_names=False)
def test_to_csv_legacy_raises_on_dupe_cols(self):
df= mkdf(10, 3)
df.columns = ['a','a','b']
with ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')
def test_to_csv_new_dupe_cols(self):
import pandas as pd
def _check_df(df,cols=None):
with ensure_clean() as path:
df.to_csv(path,columns = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df,Series):
assert_series_equal(obj_df,obj_rs)
else:
assert_frame_equal(obj_df,obj_rs,check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
assert_frame_equal(df,rs_c,check_names=False)
chunksize=5
N = int(chunksize*2.5)
# dupe cols
df= mkdf(N, 3)
df.columns = ['a','a','b']
_check_df(df,None)
# dupe cols with selection
cols = ['b','a']
_check_df(df,cols)
@slow
def test_to_csv_moar(self):
path = '__tmp_to_csv_moar__'
def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,
dupe_col=False):
kwargs = dict(parse_dates=False)
if cnlvl:
if rnlvl is not None:
kwargs['index_col'] = lrange(rnlvl)
kwargs['header'] = lrange(cnlvl)
with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)
recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs)
else:
kwargs['header'] = 0
with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8',chunksize=chunksize)
recons = DataFrame.from_csv(path,**kwargs)
def _to_uni(x):
if not isinstance(x, compat.text_type):
return x.decode('utf8')
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl-1)]
ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)
recons.index = ix
recons = recons.iloc[:,rnlvl-1:]
type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype='O'
recons.index = np.array(lmap(_to_uni,recons.index),
dtype=r_dtype)
df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype)
elif r_dtype == 'dt': # unicode
r_dtype='O'
recons.index = np.array(lmap(Timestamp,recons.index),
dtype=r_dtype)
df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype)
elif r_dtype == 'p':
r_dtype='O'
recons.index = np.array(list(map(Timestamp,
recons.index.to_datetime())),
dtype=r_dtype)
df.index = np.array(list(map(Timestamp,
df.index.to_datetime())),
dtype=r_dtype)
else:
r_dtype= type_map.get(r_dtype)
recons.index = np.array(recons.index,dtype=r_dtype )
df.index = np.array(df.index,dtype=r_dtype )
if c_dtype:
if c_dtype == 'u':
c_dtype='O'
recons.columns = np.array(lmap(_to_uni,recons.columns),
dtype=c_dtype)
df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype )
elif c_dtype == 'dt':
c_dtype='O'
recons.columns = np.array(lmap(Timestamp,recons.columns),
dtype=c_dtype )
df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype)
elif c_dtype == 'p':
c_dtype='O'
recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()),
dtype=c_dtype)
df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype )
else:
c_dtype= type_map.get(c_dtype)
recons.columns = np.array(recons.columns,dtype=c_dtype )
df.columns = np.array(df.columns,dtype=c_dtype )
assert_frame_equal(df,recons,check_names=False,check_less_precise=True)
N = 100
chunksize=1000
# GH3437
from pandas import NaT
def make_dtnat_arr(n,nnat=None):
if nnat is None:
nnat= int(n*0.1) # 10%
s=list(date_range('2000',freq='5min',periods=n))
if nnat:
for i in np.random.randint(0,len(s),nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
# N=35000
s1=make_dtnat_arr(chunksize+5)
s2=make_dtnat_arr(chunksize+5,0)
path = '1.csv'
# s3=make_dtnjat_arr(chunksize+5,0)
with ensure_clean('.csv') as pth:
df=DataFrame(dict(a=s1,b=s2))
df.to_csv(pth,chunksize=chunksize)
recons = DataFrame.from_csv(pth)._convert(datetime=True,
coerce=True)
assert_frame_equal(df, recons,check_names=False,check_less_precise=True)
for ncols in [4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
for ncols in [4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
pass
for r_idx_type,c_idx_type in [('i','i'),('s','s'),('u','dt'),('p','p')]:
for ncols in [1,2,3,4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type=r_idx_type,
c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)
for ncols in [1,2,3,4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols),path)
for nrows in [10,N-2,N-1,N,N+1,N+2]:
df = mkdf(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe","dupe"]
cols[-2:] = ["dupe","dupe"]
ix = list(df.index)
ix[:2] = ["rdupe","rdupe"]
ix[-2:] = ["rdupe","rdupe"]
df.index=ix
df.columns=cols
_do_test(df,path,dupe_col=True)
_do_test(DataFrame(index=lrange(10)),path)
_do_test(mkdf(chunksize//2+1, 2,r_idx_nlevels=2),path,rnlvl=2)
for ncols in [2,3,4]:
base = int(chunksize//ncols)
for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2)
_do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2)
_do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2),
path,rnlvl=2,cnlvl=2)
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
pname = '__tmp_to_csv_no_index__'
with ensure_clean(pname) as path:
df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
df['c3'] = Series([7,8,9],dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
pname = '__tmp_to_csv_headers__'
from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])
with ensure_clean(pname) as path:
from_df.to_csv(path, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
recons.reset_index(inplace=True)
assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self):
pname = '__tmp_to_csv_multiindex__'
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
with ensure_clean(pname) as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=['A', 'B'])
# round trip
frame.to_csv(path)
df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)
assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name
self.assertEqual(frame.index.names, df.index.names)
self.frame.index = old_index # needed if setUP becomes a classmethod
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=['time', 'foo'])
recons = DataFrame.from_csv(path, index_col=[0, 1])
assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name
# do not load index
tsframe.to_csv(path)
recons = DataFrame.from_csv(path, index_col=None)
np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)
# no index
tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(recons.values, self.tsframe.values)
self.tsframe.index = old_index # needed if setUP becomes classmethod
with ensure_clean(pname) as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ['first','second']
return DataFrame(np.random.randint(0,10,size=(3,3)),
columns=MultiIndex.from_tuples([('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')],
names=names),
dtype='int64')
# column & index are multi-index
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
assert_frame_equal(df,result)
# column is mi
df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)
assert_frame_equal(df,result)
# dup column names?
df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=[0,1,2],tupleize_cols=False)
assert_frame_equal(df,result)
# writing with no index
df = _make_frame()
df.to_csv(path,tupleize_cols=False,index=False)
result = read_csv(path,header=[0,1],tupleize_cols=False)
assert_frame_equal(df,result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False,index=False)
result = read_csv(path,header=[0,1],tupleize_cols=False)
self.assertTrue(all([ x is None for x in result.columns.names ]))
result.columns.names = df.columns.names
assert_frame_equal(df,result)
# tupleize_cols=True and index=False
df = _make_frame(True)
df.to_csv(path,tupleize_cols=True,index=False)
result = read_csv(path,header=0,tupleize_cols=True,index_col=None)
result.columns = df.columns
assert_frame_equal(df,result)
# whatsnew example
df = _make_frame()
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
assert_frame_equal(df,result)
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
assert_frame_equal(df,result)
# column & index are multi-index (compatibility)
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=True)
result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)
result.columns = df.columns
assert_frame_equal(df,result)
# invalid options
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False)
# catch invalid headers
with assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2\] are too many rows for this multi_index of columns'):
read_csv(path,tupleize_cols=False,header=lrange(3),index_col=0)
with assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2,3,4,5,6\], len of 7, but only 6 lines in file'):
read_csv(path,tupleize_cols=False,header=lrange(7),index_col=0)
for i in [4,5,6]:
with tm.assertRaises(CParserError):
read_csv(path, tupleize_cols=False, header=lrange(i), index_col=0)
# write with cols
with assertRaisesRegexp(TypeError, 'cannot specify cols with a MultiIndex'):
df.to_csv(path, tupleize_cols=False, columns=['foo', 'bar'])
with ensure_clean(pname) as path:
# empty
tsframe[:0].to_csv(path)
recons = DataFrame.from_csv(path)
exp = tsframe[:0]
exp.index = []
self.assertTrue(recons.columns.equals(exp.columns))
self.assertEqual(len(recons), 0)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
self.assertEqual(lines[1].split(',')[2], '999')
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
df.to_csv(path)
df2 = DataFrame.from_csv(path)
assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [ "%s%03d" % (name,i) for i in range(5) ]
df_float = DataFrame(np.random.randn(100, 5),dtype='float64',columns=create_cols('float'))
df_int = DataFrame(np.random.randn(100, 5),dtype='int64',columns=create_cols('int'))
df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))
df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))
# add in some nans
df_float.ix[30:50,1:3] = np.nan
#### this is a bug in read_csv right now ####
#df_dt.ix[30:50,1:3] = np.nan
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
# dtype
dtypes = dict()
for n,dtype in [('float',np.float64),('int',np.int64),('bool',np.bool),('object',np.object)]:
for c in create_cols(n):
dtypes[c] = dtype
with ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))
assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(np.random.randn(1000, 30),columns=lrange(15)+lrange(15),dtype='float64')
with ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename,index_col=0)
result.columns = df.columns
assert_frame_equal(result,df)
df_float = DataFrame(np.random.randn(1000, 3),dtype='float64')
df_int = DataFrame(np.random.randn(1000, 3),dtype='int64')
df_bool = DataFrame(True,index=df_float.index,columns=lrange(3))
df_object = DataFrame('foo',index=df_float.index,columns=lrange(3))
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=lrange(3))
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1, ignore_index=True)
cols = []
for i in range(5):
cols.extend([0,1,2])
df.columns = cols
from pandas import to_datetime
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename,index_col=0)
# date cols
for i in ['0.4','1.4','2.4']:
result[i] = to_datetime(result[i])
result.columns = df.columns
assert_frame_equal(result,df)
# GH3457
from pandas.util.testing import makeCustomDataframe as mkdf
N=10
df= mkdf(N, 3)
df.columns = ['a','a','b']
with ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename,index_col=0)
result = result.rename(columns={ 'a.1' : 'a' })
assert_frame_equal(result,df)
def test_to_csv_chunking(self):
aa=DataFrame({'A':lrange(100000)})
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000,50000,100000]:
with ensure_clean() as filename:
aa.to_csv(filename,chunksize=chunksize)
rs = read_csv(filename,index_col=0)
assert_frame_equal(rs, aa)
@slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO('a,1.0\nb,2.0')
df = DataFrame.from_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
with ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1
def test_to_csv_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
with ensure_clean() as path:
df.to_csv(path, encoding='UTF-8')
df2 = read_csv(path, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
df.to_csv(path, encoding='UTF-8', index=False)
df2 = read_csv(path, index_col=None, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
df = DataFrame(
[[u("\u05d0"), "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=[u("\u05d0"),
u("\u05d1"), u("\u05d2"), u("\u05d3")],
index=[u("\u05d0"), u("\u05d1")])
df.to_csv(buf, encoding='UTF-8')
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_stringio(self):
buf = StringIO()
self.frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
assert_frame_equal(recons, self.frame, check_names=False) # TODO to_csv drops column name
def test_to_csv_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, float_format='%.2f')
rs = read_csv(filename, index_col=0)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
assert_frame_equal(rs, xp)
def test_to_csv_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC)
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
self.assertEqual(result, expected)
# quoting windows line terminators, presents with encoding?
# #3503
text = 'a,b,c\n1,"test \r\n",3\n'
df = pd.read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
self.assertEqual(buf.getvalue(), text)
# testing if quoting parameter is passed through with multi-indexes
# related to issue #7791
df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
df = df.set_index(['a', 'b'])
expected = '"a","b","c"\n"1","3","5"\n"2","4","6"\n'
self.assertEqual(df.to_csv(quoting=csv.QUOTE_ALL), expected)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,
encoding='utf-8')
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
self.assertEqual(result, expected)
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({'A': ['hello', '{"hello"}']})
for encoding in (None, 'utf-8'):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
result = buf.getvalue()
expected = 'A\nhello\n{"hello"}\n'
self.assertEqual(result, expected)
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected = ('A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
self.assertEqual(buf.getvalue(), expected)
def test_to_csv_line_terminators(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, line_terminator='\r\n')
expected = (',A,B\r\n'
'one,1,4\r\n'
'two,2,5\r\n'
'three,3,6\r\n')
self.assertEqual(buf.getvalue(), expected)
buf = StringIO()
df.to_csv(buf) # The default line terminator remains \n
expected = (',A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
self.assertEqual(buf.getvalue(), expected)
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output as when one would add a "normal"
# Series/DataFrame.
s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
res = StringIO()
s.to_csv(res)
exp = StringIO()
s2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
df = DataFrame({"s":s})
df2 = DataFrame({"s":s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
def test_to_csv_path_is_none(self):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = self.frame.to_csv(path=None)
self.assertIsInstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
def test_to_csv_compression_gzip(self):
## GH7615
## use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="gzip")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="gzip", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is gziped
import gzip
f = gzip.open(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
self.assertIn(col, text)
def test_to_csv_compression_bz2(self):
## GH7615
## use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="bz2")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="bz2", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is bz2ed
import bz2
f = bz2.BZ2File(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
self.assertIn(col, text)
def test_to_csv_compression_value_error(self):
## GH7615
## use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
# zip compression is not supported and should raise ValueError
self.assertRaises(ValueError, df.to_csv, filename, compression="zip")
def test_info(self):
io = StringIO()
self.frame.info(buf=io)
self.tsframe.info(buf=io)
frame = DataFrame(np.random.randn(5, 3))
import sys
sys.stdout = StringIO()
frame.info()
frame.info(verbose=False)
sys.stdout = sys.__stdout__
def test_info_wide(self):
from pandas import set_option, reset_option
io = StringIO()
df = DataFrame(np.random.randn(5, 101))
df.info(buf=io)
io = StringIO()
df.info(buf=io, max_cols=101)
rs = io.getvalue()
self.assertTrue(len(rs.splitlines()) > 100)
xp = rs
set_option('display.max_info_columns', 101)
io = StringIO()
df.info(buf=io)
self.assertEqual(rs, xp)
reset_option('display.max_info_columns')
def test_info_duplicate_columns(self):
io = StringIO()
# it works!
frame = DataFrame(np.random.randn(1500, 4),
columns=['a', 'a', 'b', 'b'])
frame.info(buf=io)
def test_info_shows_column_dtypes(self):
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
df.info(buf=buf)
res = buf.getvalue()
for i, dtype in enumerate(dtypes):
name = '%d %d non-null %s' % (i, n, dtype)
assert name in res
def test_info_max_cols(self):
df = DataFrame(np.random.randn(10, 5))
for len_, verbose in [(5, None), (5, False), (10, True)]:
# For verbose always ^ setting ^ summarize ^ full output
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
for len_, verbose in [(10, None), (5, False), (10, True)]:
# max_cols no exceeded
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
for len_, max_cols in [(10, 5), (5, 4)]:
# setting truncates
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
# setting wouldn't truncate
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
def test_info_memory_usage(self):
# Ensure memory usage is displayed, when asserted, on the last line
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
# display memory usage case
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
self.assertTrue("memory usage: " in res[-1])
# do not display memory usage cas
df.info(buf=buf, memory_usage=False)
res = buf.getvalue().splitlines()
self.assertTrue("memory usage: " not in res[-1])
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# memory usage is a lower bound, so print it as XYZ+ MB
self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
df.iloc[:, :5].info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# excluded column with object dtype, so estimate is accurate
self.assertFalse(re.match(r"memory usage: [^+]+\+", res[-1]))
df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])
df_with_object_index.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
df_with_object_index.info(buf=buf, memory_usage='deep')
res = buf.getvalue().splitlines()
self.assertTrue(re.match(r"memory usage: [^+]+$", res[-1]))
self.assertTrue(df_with_object_index.memory_usage(index=True, deep=True).sum() \
> df_with_object_index.memory_usage(index=True).sum())
df_object = pd.DataFrame({'a': ['a']})
self.assertTrue(df_object.memory_usage(deep=True).sum() \
> df_object.memory_usage().sum())
# Test a DataFrame with duplicate columns
dtypes = ['int64', 'int64', 'int64', 'float64']
data = {}
n = 100
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
df.columns = dtypes
# Ensure df size is as expected
df_size = df.memory_usage().sum()
exp_size = len(dtypes) * n * 8 # cols * rows * bytes
self.assertEqual(df_size, exp_size)
# Ensure number of cols in memory_usage is the same as df
size_df = np.size(df.columns.values) # index=False; default
self.assertEqual(size_df, np.size(df.memory_usage()))
# assert deep works only on object
self.assertEqual(df.memory_usage().sum(),df.memory_usage(deep=True).sum())
# test for validity
DataFrame(1,index=['a'],columns=['A']).memory_usage(index=True)
DataFrame(1,index=['a'],columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).memory_usage(index=True)
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes
def test_dtypes(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
expected = Series(dict((k, v.dtype)
for k, v in compat.iteritems(self.mixed_frame)),
index=result.index)
assert_series_equal(result, expected)
# compat, GH 8722
with option_context('use_inf_as_null',True):
df = DataFrame([[1]])
result = df.dtypes
assert_series_equal(result,Series({0:np.dtype('int64')}))
def test_convert_objects(self):
oops = self.mixed_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, self.mixed_frame)
self.assertEqual(converted['A'].dtype, np.float64)
# force numeric conversion
self.mixed_frame['H'] = '1.'
self.mixed_frame['I'] = '1'
# add in some items that will be nan
l = len(self.mixed_frame)
self.mixed_frame['J'] = '1.'
self.mixed_frame['K'] = '1'
self.mixed_frame.ix[0:5,['J','K']] = 'garbled'
converted = self.mixed_frame._convert(datetime=True, numeric=True)
self.assertEqual(converted['H'].dtype, 'float64')
self.assertEqual(converted['I'].dtype, 'int64')
self.assertEqual(converted['J'].dtype, 'float64')
self.assertEqual(converted['K'].dtype, 'float64')
self.assertEqual(len(converted['J'].dropna()), l-5)
self.assertEqual(len(converted['K'].dropna()), l-5)
# via astype
converted = self.mixed_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
self.assertEqual(converted['H'].dtype, 'float64')
self.assertEqual(converted['I'].dtype, 'int64')
# via astype, but errors
converted = self.mixed_frame.copy()
with assertRaisesRegexp(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s = Series([1, 'na', 3 ,4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s = Series([1, np.nan, 3 ,4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
series = df.ix[4]
with assertRaisesRegexp(ValueError, 'Indexes have overlapping values'):
df.append(series, verify_integrity=True)
series.name = None
with assertRaisesRegexp(TypeError, 'Can only append a Series if '
'ignore_index=True'):
df.append(series, verify_integrity=True)
result = df.append(series[::-1], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T,
ignore_index=True)
assert_frame_equal(result, expected)
# dict
result = df.append(series.to_dict(), ignore_index=True)
assert_frame_equal(result, expected)
result = df.append(series[::-1][:3], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1][:3]}).T,
ignore_index=True)
assert_frame_equal(result, expected.ix[:, result.columns])
# can append when name set
row = df.ix[4]
row.name = 5
result = df.append(row)
expected = df.append(df[-1:], ignore_index=True)
assert_frame_equal(result, expected)
def test_append_list_of_series_dicts(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
dicts = [x.to_dict() for idx, x in df.iterrows()]
result = df.append(dicts, ignore_index=True)
expected = df.append(df, ignore_index=True)
assert_frame_equal(result, expected)
# different columns
dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4},
{'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}]
result = df.append(dicts, ignore_index=True)
expected = df.append(DataFrame(dicts), ignore_index=True)
assert_frame_equal(result, expected)
def test_append_empty_dataframe(self):
# Empty df append empty df
df1 = DataFrame([])
df2 = DataFrame([])
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
def test_append_dtypes(self):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))
df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })
assert_frame_equal(result, expected)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)
rule_monthly = self.tsframe.asfreq('BM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad')
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad')
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
self.assertIsNot(result, zero_length)
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
tm.assertIsInstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
tm.assertIsInstance(ts.index, DatetimeIndex)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.ix[akey]
expected2 = df.ix[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
self.assertEqual(len(result), 4)
result = df.between_time(bkey.start, bkey.stop)
expected = df.ix[bkey]
expected2 = df.ix[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
self.assertEqual(len(result), 12)
result = df.copy()
result.ix[akey] = 0
result = result.ix[akey]
expected = df.ix[akey].copy()
expected.ix[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.ix[akey] = 0
result.ix[akey] = df.ix[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.ix[bkey] = 0
result = result.ix[bkey]
expected = df.ix[bkey].copy()
expected.ix[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.ix[bkey] = 0
result.ix[bkey] = df.ix[binds]
assert_frame_equal(result, df)
def test_as_matrix(self):
frame = self.frame
mat = frame.as_matrix()
frameCols = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][i]))
else:
self.assertEqual(value, frame[col][i])
# mixed type
mat = self.mixed_frame.as_matrix(['foo', 'A'])
self.assertEqual(mat[0, 0], 'bar')
df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})
mat = df.as_matrix()
self.assertEqual(mat[0, 0], 1j)
# single block corner case
mat = self.frame.as_matrix(['A', 'B'])
expected = self.frame.reindex(columns=['A', 'B']).values
assert_almost_equal(mat, expected)
def test_as_matrix_duplicates(self):
df = DataFrame([[1, 2, 'a', 'b'],
[1, 2, 'a', 'b']],
columns=['one', 'one', 'two', 'two'])
result = df.values
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
self.assertTrue(np.array_equal(result, expected))
def test_ftypes(self):
frame = self.mixed_float
expected = Series(dict(A = 'float32:dense',
B = 'float32:dense',
C = 'float16:dense',
D = 'float64:dense')).sort_values()
result = frame.ftypes.sort_values()
assert_series_equal(result,expected)
def test_values(self):
self.frame.values[:, 0] = 5.
self.assertTrue((self.frame.values[:, 0] == 5).all())
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in compat.iteritems(series):
self.assertNotEqual(self.frame['A'][idx], value)
def test_copy(self):
cop = self.frame.copy()
cop['E'] = cop['A']
self.assertNotIn('E', self.frame)
# copy objects
copy = self.mixed_frame.copy()
self.assertIsNot(copy._data, self.mixed_frame._data)
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan
def test_corr_pearson(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
def test_corr_kendall(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
def test_corr_spearman(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def test_corr_non_numeric(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()
assert_frame_equal(result, expected)
def test_corr_nooverlap(self):
tm._skip_if_no_scipy()
# nothing in common
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.ix['A', 'B']))
self.assertTrue(isnull(rs.ix['B', 'A']))
self.assertEqual(rs.ix['A', 'A'], 1)
self.assertEqual(rs.ix['B', 'B'], 1)
self.assertTrue(isnull(rs.ix['C', 'C']))
def test_corr_constant(self):
tm._skip_if_no_scipy()
# constant --> all NA
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.values).all())
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
# it works!
df3.cov()
df3.corr()
def test_corr_int_and_boolean(self):
tm._skip_if_no_scipy()
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=['a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
assert_frame_equal(df.corr(meth), expected)
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
self.assertTrue(isnull(result.values).all())
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.ix['A', 'B'] = np.nan
expected.ix['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()
assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0,1.0,10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1,1)),
index=df.columns,columns=df.columns)
assert_frame_equal(result, expected)
df.ix[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1,1)),
index=df.columns,columns=df.columns)
assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe + noise
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
self.assertNotIn('B', dropped)
dropped = a.corrwith(b, axis=1, drop=True)
self.assertNotIn(a.index[-1], dropped.index)
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])
assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)
assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000)**2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'],df2['a'])[0][1]
assert_almost_equal(c1, c2)
self.assertTrue(c1 < 1)
def test_drop_names(self):
df = DataFrame([[1, 2, 3],[3, 4, 5],[5, 6, 7]], index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
self.assert_numpy_array_equal(smaller_frame['foo'], mat[5:])
self.assert_numpy_array_equal(inp_frame1['foo'], mat[5:])
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assertTrue(samesize_frame.index.equals(self.frame.index))
self.assertTrue(inp_frame2.index.equals(self.frame.index))
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
self.assertRaises(KeyError, self.frame.dropna, subset=['A','X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.ix[[]]
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('AAA', take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.ix[[0]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AAA', 'B'), take_last=True)
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
# consider everything
df2 = df.ix[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df2.drop_duplicates(take_last=True)
with tm.assert_produces_warning(FutureWarning):
expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0,2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C',keep='last')
expected = df.iloc[[-2,-1]]
assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0,2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('E',keep='last')
expected = df.iloc[[-2,-1]]
assert_frame_equal(result, expected)
# GH 11376
df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
assert_frame_equal(df.drop_duplicates(), expected)
df = pd.DataFrame([[1 , 0], [0, 2]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-2, 0], [0, -4]])
assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = pd.DataFrame([[-x, x], [0, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-x, x], [x, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_deprecated_warning(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
expected = df[:2]
# Raises warning
with tm.assert_produces_warning(False):
result = df.drop_duplicates(subset='AAA')
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(cols='AAA')
assert_frame_equal(result, expected)
# Does not allow both subset and cols
self.assertRaises(TypeError, df.drop_duplicates,
kwargs={'cols': 'AAA', 'subset': 'B'})
# Does not allow unknown kwargs
self.assertRaises(TypeError, df.drop_duplicates,
kwargs={'subset': 'AAA', 'bad_arg': True})
# deprecate take_last
# Raises warning
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(take_last=False, subset='AAA')
assert_frame_equal(result, expected)
self.assertRaises(ValueError, df.drop_duplicates, keep='invalid_name')
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.ix[[]] # empty df
self.assertEqual(len(result), 0)
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AA', 'AB'), take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.ix[[0, 2, 3]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.ix[[]] # empty df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('A', take_last=True)
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.ix[[0, 2, 3, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.ix[[6]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['A', 'B'], take_last=True)
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.ix[[]] # empty df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('C', take_last=True)
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.ix[[0, 1, 2, 4]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.ix[[1]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['C', 'B'], take_last=True)
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.ix[[]]
result = df
assert_frame_equal(result, expected)
self.assertEqual(len(df), 0)
# deprecate take_last
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates('A', take_last=True, inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.ix[[0, 1, 2, 3]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.ix[[0]]
result = df
assert_frame_equal(result, expected)
# deprecate take_last
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
# consider everything
orig2 = orig.ix[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
assert_frame_equal(result, expected)
# deprecate take_last
df2 = orig2.copy()
with tm.assert_produces_warning(FutureWarning):
df2.drop_duplicates(take_last=True, inplace=True)
with tm.assert_produces_warning(FutureWarning):
expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
result = df2
assert_frame_equal(result, expected)
def test_duplicated_deprecated_warning(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# Raises warning
with tm.assert_produces_warning(False):
result = df.duplicated(subset='AAA')
with tm.assert_produces_warning(FutureWarning):
result = df.duplicated(cols='AAA')
# Does not allow both subset and cols
self.assertRaises(TypeError, df.duplicated,
kwargs={'cols': 'AAA', 'subset': 'B'})
# Does not allow unknown kwargs
self.assertRaises(TypeError, df.duplicated,
kwargs={'subset': 'AAA', 'bad_arg': True})
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
#non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10,3), columns=list('abc'))
expected = df[~(df.b>0)]
df.drop(labels=df[df.b>0].index, inplace=True)
assert_frame_equal(df,expected)
def test_fillna(self):
self.tsframe.ix[:5,'A'] = nan
self.tsframe.ix[-5:,'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5,'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5,'A']).all())
self.assertTrue((padded.ix[-5:,'A'] == padded.ix[-5,'A']).all())
# mixed type
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A','B','D'])
mf.ix[-10:,'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype = dict(C = None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype = dict(C = None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad','backfill']:
df.x.fillna(method=m,inplace=1)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a','a',np.nan,'a'],['b','b',np.nan,'b'],['c','c',np.nan,'c']])
result = df.fillna({ 2: 'foo' })
expected = DataFrame([['a','a','foo','a'],['b','b','foo','b'],['c','c','foo','c']])
assert_frame_equal(result, expected)
df.fillna({ 2: 'foo' }, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10,3))
df.iloc[2:7,0] = np.nan
df.iloc[3:5,2] = np.nan
expected = df.copy()
expected.iloc[2,0] = 999
expected.iloc[3,2] = 999
result = df.fillna(999,limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date':[pd.NaT, Timestamp("2014-1-1")],
'Date2':[ Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0,'Date2'])
result = df.fillna(value={'Date':df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A","B","C"], columns = [1,2,3,4,5])
result = df.get_dtype_counts().sort_values()
expected = Series({ 'object' : 5 })
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A","B","C"], columns = [1,2,3,4,5])
result = result.get_dtype_counts().sort_values()
expected = Series({ 'int64' : 5 })
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3),columns=['A','B'],dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan',index=lrange(3),columns=['A','B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A = [1,np.nan], B = [1.,2.]))
for v in ['',1,np.nan,1.0]:
expected = df.replace(np.nan,v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH 7095
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'), pd.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'), '?'],
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
self.assertIsNot(expected, df)
df.fillna(value=0, inplace=True)
assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
self.assertIsNot(expected, df)
df.fillna(method='ffill', inplace=True)
assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with assertRaisesRegexp(NotImplementedError, 'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index = list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar']*5},
index = list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index = list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with assertRaisesRegexp(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
self.assertRaises(TypeError, self.frame.fillna, [1, 2])
# tuple
self.assertRaises(TypeError, self.frame.fillna, (1, 2))
# frame with series
self.assertRaises(ValueError, self.frame.iloc[:,0].fillna, self.frame)
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)
self.assertRaises(TypeError, self.tsframe.replace, nan)
# mixed type
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
### simplest cases
## regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
### simplest cases
## regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
### same as above with inplace=True
## lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
## mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
## dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})
expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})
result = df.replace({'Type': {'Q':0,'T':1}})
assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
res = df.replace(0, 'a')
assert_frame_equal(res, expec)
self.assertEqual(res.a.dtype, np.object_)
def test_replace_regex_metachar(self):
metachars = '[]', '()', '\d', '\w', '\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
tm.assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([ np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64') })
result = df.replace(0, 0.5)
assert_frame_equal(result,expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df,expected)
# int block splitting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64'), 'C' : Series([1,2],dtype='int64') })
expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64'), 'C' : Series([1,2],dtype='int64') })
result = df.replace(0, 0.5)
assert_frame_equal(result,expected)
# to object block upcasting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
expected = DataFrame({ 'A' : Series([1,'foo'],dtype='object'), 'B' : Series([0,1],dtype='int64') })
result = df.replace(2, 'foo')
assert_frame_equal(result,expected)
expected = DataFrame({ 'A' : Series(['foo','bar'],dtype='object'), 'B' : Series([0,'foo'],dtype='object') })
result = df.replace([1,2], ['foo','bar'])
assert_frame_equal(result,expected)
# test case from
df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0,0] = m[0]
expected.iloc[1,1] = m[1]
assert_frame_equal(result,expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
tm.assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
tm.assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
tm.assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
tm.assert_frame_equal(expected, result)
def test_interpolate(self):
pass
def test_replace_value_is_none(self):
self.assertRaises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
# int
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
self.assertTrue(result.values.all())
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
# datetime blocks
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# dict to scalar
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
self.assertRaises(ValueError, df.replace, to_rep, values[1:])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
tm.assert_frame_equal(res1, res2)
tm.assert_frame_equal(res2, res3)
tm.assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
from pandas.compat import StringIO
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({'\D': 1})
tm.assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
tm.assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
tm.assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
tm.assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
tm.assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
tm.assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
tm.assert_frame_equal(res, expect)
def test_replace_period(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json': pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json': pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json': pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json': pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json': pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json': pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
'B' : [0, np.nan, 2]})
result = df.replace(np.nan,1)
expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
'B' : Series([0, 1, 2],dtype='float64')})
assert_frame_equal(result, expected)
result = df.fillna(1)
assert_frame_equal(result, expected)
result = df.replace(0,np.nan)
expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
'B' : [np.nan, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.replace(Timestamp('20130102',tz='US/Eastern'),Timestamp('20130104',tz='US/Eastern'))
expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
Timestamp('20130104',tz='US/Eastern'),
Timestamp('20130103',tz='US/Eastern')],
'B' : [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1,0] = np.nan
result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Eastern'))
assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1,0] = np.nan
result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Pacific'))
expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
Timestamp('20130104',tz='US/Pacific'),
Timestamp('20130103',tz='US/Eastern')],
'B' : [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1,0] = np.nan
result = result.replace({'A' : np.nan }, Timestamp('20130104'))
expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
Timestamp('20130104'),
Timestamp('20130103',tz='US/Eastern')],
'B' : [0, np.nan, 2]})
assert_frame_equal(result, expected)
def test_combine_multiple_frames_dtypes(self):
# GH 2759
A = DataFrame(data=np.ones((10, 2)), columns=['foo', 'bar'], dtype=np.float64)
B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
results = pd.concat((A, B), axis=1).get_dtype_counts()
expected = Series(dict( float64 = 2, float32 = 2 ))
assert_series_equal(results,expected)
def test_ops(self):
# tst ops and reversed ops in evaluation
# GH7198
# smaller hits python, larger hits numexpr
for n in [ 4, 4000 ]:
df = DataFrame(1,index=range(n),columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+','__add__','__radd__'),
('-','__sub__','__rsub__'),
('*','__mul__','__rmul__'),
('/','__truediv__','__rtruediv__')]:
base = DataFrame(np.tile(m.values,n).reshape(n,-1),columns=list('abcd'))
expected = eval("base{op}df".format(op=op_str))
# ops as strings
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result,expected)
# these are commutative
if op in ['+','*']:
result = getattr(df,op)(m)
assert_frame_equal(result,expected)
# these are not
elif op in ['-','/']:
result = getattr(df,rop)(m)
assert_frame_equal(result,expected)
# GH7192
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1-np.isnan(df.iloc[0:25]))
result = (1-np.isnan(df)).iloc[0:25]
assert_frame_equal(result,expected)
def test_truncate(self):
offset = datetools.bday
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
self.assertRaises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] +1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
self.assertFalse((self.tsframe.values[5:11] == 5).any())
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
self.assertTrue(np.isnan(self.frame[item][idx]))
else:
self.assertEqual(value, self.frame[item][idx])
# mixed-type xs
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
self.assertEqual(xs.dtype, np.object_)
self.assertEqual(xs['A'], 1)
self.assertEqual(xs['B'], '1')
with tm.assertRaises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - datetools.bday)
# xs get column
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
self.assertTrue((expected == 5).all())
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
assert_almost_equal(xs, [1., 'foo', 2., 'bar', 3.])
# no columns but Index(dtype=object)
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([], name='a', index=pd.Index([], dtype=object))
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.iloc[2]
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day'])
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(pivoted, expected)
# name tracking
self.assertEqual(pivoted.index.name, 'index')
self.assertEqual(pivoted.columns.name, 'columns')
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
self.assertEqual(pivoted.index.name, 'index')
self.assertEqual(pivoted.columns.names, (None, 'columns'))
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with assertRaisesRegexp(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
self.assert_numpy_array_equal(result.columns, ['A', 'B'])
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
assert_frame_equal(result, expected, check_names=False)
self.assertEqual(result.index.name, 'index',)
self.assertEqual(result.columns.names, (None, 'columns'))
expected.columns = expected.columns.droplevel(0)
data = {
'index': range(7),
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assertTrue(newFrame.index.equals(self.ts1.index))
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result,self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5], columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
tm.assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other':['a', 'b', np.nan, 'c'],
'date':['2015-03-22', np.nan, '2012-01-08', np.nan],
'amount':[2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
from datetime import datetime
df = DataFrame(np.ones((3, 3)), index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr,index=[1,2,3,4,5,1,2,3,4,5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr,index=list(range(len(df))))
assert_frame_equal(result,expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assertTrue(bf.columns.equals(other.columns))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assertTrue(bf.columns.equals(other.columns))
self.assertTrue(bf.index.equals(other.index))
self.assertTrue(af.index.equals(other.index))
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assertTrue(bf.columns.equals(self.frame.columns))
self.assertTrue(bf.index.equals(other.index))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(self.mixed_frame.columns))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
self.assertTrue(isinstance(right, Series))
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(right, expected)
# GH 9558
df = DataFrame({'a':[1,2,3], 'b':[4,5,6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a':[0, 2, 0], 'b':[0, 5, 0]})
assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10*10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12,dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2,dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),columns=['A','B','C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'
return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in compat.iteritems(df) ]))
def _check_get(df, cond, check_dtypes = True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
exp = Series(np.where(cond[k], df[k], other1[k]),index=v.index)
assert_series_equal(v, exp, check_names=False)
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
self.assertTrue((rs.dtypes == df.dtypes).all() == True)
# check getting
for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame(dict([ (c,Series([1]*3,dtype=c)) for c in ['int64','int32','float32','float64'] ]))
df.ix[1,:] = 0
result = df.where(df>=0).get_dtype_counts()
#### when we don't preserve boolean casts ####
#expected = Series({ 'float32' : 1, 'float64' : 3 })
expected = Series({ 'float32' : 1, 'float64' : 1, 'int32' : 1, 'int64' : 1 })
assert_series_equal(result, expected)
# aligning
def _check_align(df, cond, other, check_dtypes = True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if np.isscalar(other):
o = other
else:
if isinstance(other,np.ndarray):
o = Series(other[:,i],index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other,np.ndarray):
self.assertTrue((rs.dtypes == df.dtypes).all() == True)
for df in [ self.mixed_frame, self.mixed_float, self.mixed_int ]:
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all([ not issubclass(s.type,np.integer) for s in df.dtypes ])
_check_align(df, cond, np.nan, check_dtypes = check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
self.assertRaises(ValueError, df.where, cond, err1)
err2 = cond.ix[:2, :].values
other1 = _safe_add(df)
self.assertRaises(ValueError, df.where, err2, other1)
self.assertRaises(ValueError, df.mask, True)
self.assertRaises(ValueError, df.mask, 0)
# where inplace
def _check_set(df, cond, check_dtypes = True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type,np.integer) and not cond[k].all():
v = np.dtype('float64')
self.assertEqual(dfi[k].dtype, v)
for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({'a': range(3), 'b': range(4, 7)})
result = df.where(df['a'] == 1)
expected = df[df['a'] == 1].reindex(df.index)
assert_frame_equal(result, expected)
def test_where_bug(self):
# GH 2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [4.0, 3.0, 2.0, 1.0]}, dtype = 'float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# mixed
for dtype in ['int16','int8','int32','int64']:
df = DataFrame({'a': np.array([1, 2, 3, 4],dtype=dtype), 'b': np.array([4.0, 3.0, 2.0, 1.0], dtype = 'float64') })
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# transpositional issue
# GH7506
a = DataFrame({ 0 : [1,2], 1 : [3,4], 2 : [5,6]})
b = DataFrame({ 0 : [np.nan,8], 1:[9,np.nan], 2:[np.nan,np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace,b)
assert_frame_equal(result,expected)
a = DataFrame({ 0 : [4,6], 1 : [1,0]})
b = DataFrame({ 0 : [np.nan,3],1:[3,np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace,b)
assert_frame_equal(result,expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A = date_range('20130102',periods=5),
B = date_range('20130104',periods=5),
C = np.random.randn(5)))
stamp = datetime(2013,1,3)
result = df[df>stamp]
expected = df.copy()
expected.loc[[0,1],'A'] = np.nan
assert_frame_equal(result,expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame({'series': Series([0,1,2,3,4,5,6,7,np.nan,np.nan]) })
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {'A': np.nan, 'B': 'Test', 'C': np.nan}])
expected = df.where(~isnull(df), None)
with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):
df.where(~isnull(df), None, inplace=True)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10,3))
df.iloc[3:5,0] = np.nan
df.iloc[4:6,1] = np.nan
df.iloc[5:8,2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notnull(df),df.mean(),axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notnull(df),df.mean(),inplace=True,axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x>0,y), y=df[0])
result = df.where(df>0,df[0],axis='index')
assert_frame_equal(result, expected)
result = df.where(df>0,df[0],axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notnull(df),DataFrame(1,index=df.index,columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame([[1+1j, 2], [np.nan, 4+1j]], columns=['a', 'b'])
df = DataFrame([[1+1j, 2], [5+1j, 4+1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df,expected)
def test_where_axis(self):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
s = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype='int64')
mask = DataFrame([[False, False], [False, False]])
s = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
expected = DataFrame({0 : np.array([0, 0], dtype='int64'),
1 : np.array([np.nan, np.nan], dtype='float64')})
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Multiple dtypes (=> multiple Blocks)
df = pd.concat([DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)))],
ignore_index=True, axis=1)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis='columns')
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype(int)
expected[3] = expected[3].astype(int)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s1, axis='columns', inplace=True)
assert_frame_equal(result, expected)
result = df.where(mask, s2, axis='index')
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype(int)
expected[3] = expected[3].astype(int)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s2, axis='index', inplace=True)
assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
assert_frame_equal(result, expected)
result = df.where(mask, d1, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True, axis='index')
assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
assert_frame_equal(result, expected)
result = df.where(mask, d2, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True, axis='columns')
assert_frame_equal(result, expected)
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
other = DataFrame(np.random.randn(5, 3))
rs = df.where(cond, other)
assert_frame_equal(rs, df.mask(df <= 0, other))
assert_frame_equal(rs, df.mask(~cond, other))
def test_mask_inplace(self):
# GH8801
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rdf = df.copy()
rdf.where(cond, inplace=True)
assert_frame_equal(rdf, df.where(cond))
assert_frame_equal(rdf, df.mask(~cond))
rdf = df.copy()
rdf.where(cond, -df, inplace=True)
assert_frame_equal(rdf, df.where(cond, -df))
assert_frame_equal(rdf, df.mask(~cond, -df))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[nan, 2]])
assert_frame_equal(res, expec)
#----------------------------------------------------------------------
# Transposing
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][idx]))
else:
self.assertEqual(value, frame[col][idx])
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in compat.iteritems(mixed_T):
self.assertEqual(s.dtype, np.object_)
def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
self.assertTrue((self.frame.values[5:10] == 5).all())
#----------------------------------------------------------------------
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
self.assert_numpy_array_equal(renamed.index, ['foo', 'bar'])
renamed = df.rename(index=str.upper)
self.assert_numpy_array_equal(renamed.index, ['BAR', 'FOO'])
# have to pass something
self.assertRaises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
self.assert_numpy_array_equal(renamed.columns, ['A', 'B', 'foo', 'bar'])
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
self.assert_numpy_array_equal(renamed.index, ['A', 'B', 'foo', 'bar'])
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
self.assert_numpy_array_equal(renamed.index, ['bar', 'foo'])
self.assertEqual(renamed.index.name, renamer.index.name)
# MultiIndex
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(tuples_columns, names=['fizz', 'buzz'])
renamer = DataFrame([(0,0),(1,1)], index=index, columns=columns)
renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')])
self.assert_numpy_array_equal(renamed.index, new_index)
self.assert_numpy_array_equal(renamed.columns, new_columns)
self.assertEqual(renamed.index.names, renamer.index.names)
self.assertEqual(renamed.columns.names, renamer.columns.names)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
self.assertTrue((self.frame['C'] == 1.).all())
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
self.assertIn('C', self.frame)
self.assertNotIn('foo', self.frame)
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
self.assertNotIn('C', frame)
self.assertIn('foo', frame)
self.assertNotEqual(id(frame['foo']), c_id)
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({ 0 : ['foo','bar'], 1 : ['bah','bas'], 2 : [1,2]})
df = df.rename(columns={0 : 'a'})
df = df.rename(columns={1 : 'b'})
df = df.set_index(['a','b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1],[2]],index=MultiIndex.from_tuples([('foo','bah'),('bar','bas')],
names=['a','b']),
columns=['2001-01-01'])
assert_frame_equal(df,expected)
#----------------------------------------------------------------------
# Time series related
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x':np.nan, 'y':pd.Series(1), 'z':pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0,2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame([[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame([[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=datetools.BDay())
self.assertEqual(len(shiftedFrame), len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + datetools.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1)
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
self.assertTrue(shifted.index.equals(ps.index))
tm.assert_dict_equal(unshifted.ix[:, 0].valid(), ps.ix[:, 0],
compare_keys=False)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, datetools.bday)
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10,5))
expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)
result = df.shift(1,axis=1)
assert_frame_equal(result,expected)
# shift named axis
df = DataFrame(np.random.rand(10,5))
expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)
result = df.shift(1,axis='columns')
assert_frame_equal(result,expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=datetools.bday)
assert_frame_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.ix[[0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_apply(self):
# ufunc
applied = self.frame.apply(np.sqrt)
assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
self.assertEqual(applied['A'], np.mean(self.frame['A']))
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
self.assertEqual(applied[d], np.mean(self.frame.xs(d)))
self.assertIs(applied.index, self.frame.index) # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
self.assertRaises(ValueError, df.apply, lambda x: x, 2)
# GH9573
df = DataFrame({'c0':['A','A','B','B'], 'c1':['C','C','D','D']})
df = df.apply(lambda ts: ts.astype('category'))
self.assertEqual(df.shape, (4, 2))
self.assertTrue(isinstance(df['c0'].dtype, com.CategoricalDtype))
self.assertTrue(isinstance(df['c1'].dtype, com.CategoricalDtype))
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({ 'A' : date_range('20130101',periods=3), 'B' : pd.to_timedelta(np.arange(3),unit='s') })
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
self.assertTrue(applied.empty)
applied = self.empty.apply(np.mean)
self.assertTrue(applied.empty)
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.ix[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series([], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series([], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
self.assertEqual(x, [])
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_apply_broadcast(self):
broadcasted = self.frame.apply(np.mean, broadcast=True)
agged = self.frame.apply(np.mean)
for col, ts in compat.iteritems(broadcasted):
self.assertTrue((ts == agged[col]).all())
broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
agged = self.frame.apply(np.mean, axis=1)
for idx in broadcasted.index:
self.assertTrue((broadcasted.xs(idx) == agged[idx]).all())
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
self.assertEqual(tapplied[d], np.mean(self.frame.xs(d)))
def test_apply_ignore_failures(self):
result = self.mixed_frame._apply_standard(np.mean, 0,
ignore_failures=True)
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'],index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.],index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
tm.assertIsInstance(res, Series)
self.assertIs(res.index, agg_axis)
else:
tm.assertIsInstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), broadcast=True)
tm.assertIsInstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.ix[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4,'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notnull(row['C']) and row['C'].startswith('shin')
and row['A'] == 'foo'):
row['D'] = 7
return row
try:
transformed = data.apply(transform, axis=1)
except AttributeError as e:
self.assertEqual(len(e.args), 2)
self.assertEqual(e.args[1], 'occurred at index 4')
self.assertEqual(e.args[0], "'float' object has no attribute 'startswith'")
def test_apply_bug(self):
# GH 6125
import datetime
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime.datetime(2013, 1, 1), 'ABC0', 50],
[datetime.datetime(2013, 1, 2), 'YUM0', 20],
[datetime.datetime(2013, 1, 3), 'DEF0', 20],
[datetime.datetime(2013, 1, 4), 'ABC1', 50],
[datetime.datetime(2013, 1, 5), 'YUM1', 20],
[datetime.datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result,expected)
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
assert_frame_equal(df.T, df.swapaxes(0, 1))
assert_frame_equal(df.T, df.swapaxes(1, 0))
assert_frame_equal(df, df.swapaxes(0, 0))
self.assertRaises(ValueError, df.swapaxes, 2, 5)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = DataFrame(np.tile(self.frame.index,
(len(self.frame.columns), 1)).T,
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
def test_apply_multi_index(self):
s = DataFrame([[1,2], [3,4], [5,6]])
s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])
s.columns = ['col1','col2']
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
tm.assertIsInstance(res.index, MultiIndex)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = pd.Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = pd.Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, reduce=True)
reduce_false = df.apply(fn, reduce=False)
reduce_none = df.apply(fn, reduce=None)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
assert_frame_equal(applied, self.frame * 2)
result = self.frame.applymap(type)
# GH #465, function returning tuples
result = self.frame.applymap(lambda x: (x, x))
tm.assertIsInstance(result['A'][0], tuple)
# GH 2909, object conversion to float in constructor?
df = DataFrame(data=[1,'a'])
result = df.applymap(lambda x: x)
self.assertEqual(result.dtypes[0], object)
df = DataFrame(data=[1.,'a'])
result = df.applymap(lambda x: x)
self.assertEqual(result.dtypes[0], object)
# GH2786
df = DataFrame(np.random.random((3,4)))
df2 = df.copy()
cols = ['a','a','a','a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
assert_frame_equal(result,expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime','timedelta']:
self.assertEqual(result.loc[0,f],str(df.loc[0,f]))
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
# other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
filtered = expected.filter(regex='^[0-9]+$') # shouldn't remove anything
assert_frame_equal(filtered, expected)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False) # TODO should reindex check_names?
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_sort_values(self):
# API for 9816
# sort_index
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
frame.sort(columns='A')
with tm.assert_produces_warning(FutureWarning):
frame.sort()
unordered = frame.ix[[3, 2, 4, 1]]
expected = unordered.sort_index()
result = unordered.sort_index(axis=0)
assert_frame_equal(result, expected)
unordered = frame.ix[:, [2, 1, 3, 0]]
expected = unordered.sort_index(axis=1)
result = unordered.sort_index(axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result, expected)
# sortlevel
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level='A', sort_remaining=False)
expected = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(result, expected)
df = df.T
result = df.sort_index(level='A', axis=1, sort_remaining=False)
expected = df.sortlevel('A', axis=1, sort_remaining=False)
assert_frame_equal(result, expected)
# MI sort, but no by
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(sort_remaining=False)
expected = df.sort_index()
assert_frame_equal(result, expected)
def test_sort_index(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
sorted_df = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(sorted_df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
sorted_df = unordered.sort_index(axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(axis=1, ascending=False)
expected = frame.ix[:, ::-1]
assert_frame_equal(sorted_df, expected)
# by column
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# check for now
sorted_df = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected[::-1])
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
sorted_df = frame.sort_values(by=['A', 'B'])
assert_frame_equal(sorted_df, expected[::-1])
self.assertRaises(ValueError, lambda : frame.sort_values(by=['A','B'], axis=2, inplace=True))
msg = 'When sorting by column, axis must be 0'
with assertRaisesRegexp(ValueError, msg):
frame.sort_values(by='A', axis=1)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with assertRaisesRegexp(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_index_categorical_index(self):
df = DataFrame({'A' : np.arange(6,dtype='int64'),
'B' : Series(list('aabbca')).astype('category',categories=list('cab')) }).set_index('B')
result = df.sort_index()
expected = df.iloc[[4,0,1,5,2,3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3,2,5,1,0,4]]
assert_frame_equal(result, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A','B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A','B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A','B'], ascending=[1,0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A','B'], ascending=[0,1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index = [nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index = [6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index = [nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A','B'], ascending=[0,1], na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A','B'], ascending=[0,0], na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_index_multicolumn(self):
import random
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
self.assertNotEqual(a_id, id(df['A']))
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.ix[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_index_duplicates(self):
### with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5,9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_values(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_values(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)]))
with assertRaisesRegexp(ValueError, 'levels'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'levels'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a',1)])
expected = df.sort_values(by=[('a',1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a',1))
result = df.sort_values(by=('a',1))
assert_frame_equal(result, expected)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sortlevel(['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a','a','a','b','c','d','e','f','g'],
columns=['A'],
index=date_range('20130101',periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11','2004-01-21','2004-01-26',
'2005-09-20','2010-10-04','2009-05-12',
'2008-11-12','2010-09-28','2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1,df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1,df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with assertRaisesRegexp(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_combine_first(self):
# disjoint
head, tail = self.frame[:5], self.frame[5:]
combined = head.combine_first(tail)
reordered_frame = self.frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
self.assertTrue(tm.equalContents(combined.columns, self.frame.columns))
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
fcopy = self.frame.copy()
fcopy['A'] = 1
del fcopy['C']
fcopy2 = self.frame.copy()
fcopy2['B'] = 0
del fcopy2['D']
combined = fcopy.combine_first(fcopy2)
self.assertTrue((combined['A'] == 1).all())
assert_series_equal(combined['B'], fcopy['B'])
assert_series_equal(combined['C'], fcopy2['C'])
assert_series_equal(combined['D'], fcopy['D'])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head['A'] = 1
combined = head.combine_first(tail)
self.assertTrue((combined['A'][:10] == 1).all())
# reverse overlap
tail['A'][:10] = 0
combined = tail.combine_first(head)
self.assertTrue((combined['A'][:10] == 0).all())
# no overlap
f = self.frame[:10]
g = self.frame[10:]
combined = f.combine_first(g)
assert_series_equal(combined['A'].reindex(f.index), f['A'])
assert_series_equal(combined['A'].reindex(g.index), g['A'])
# corner cases
comb = self.frame.combine_first(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combine_first(self.frame)
assert_frame_equal(comb, self.frame)
comb = self.frame.combine_first(DataFrame(index=["faz", "boo"]))
self.assertTrue("faz" in comb.index)
# #2525
df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame({}, columns=['b'])
result = df.combine_first(df2)
self.assertTrue('b' in result)
def test_combine_first_mixed_bug(self):
idx = Index(['a', 'b', 'c', 'e'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'e'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1,
"col2": ser2,
"col3": ser3})
idx = Index(['a', 'b', 'c', 'f'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'f'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1,
"col2": ser2,
"col5": ser3})
combined = frame1.combine_first(frame2)
self.assertEqual(len(combined.columns), 5)
# gh 3016 (same as in update)
df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
other = DataFrame([[45,45]],index=[0],columns=['A','B'])
result = df.combine_first(other)
assert_frame_equal(result, df)
df.ix[0,'A'] = np.nan
result = df.combine_first(other)
df.ix[0,'A'] = 45
assert_frame_equal(result, df)
# doc example
df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan],
'B' : [np.nan, 2., 3., np.nan, 6.]})
df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],
'B' : [np.nan, np.nan, 3., 4., 6., 8.]})
result = df1.combine_first(df2)
expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })
assert_frame_equal(result,expected)
# GH3552, return object dtype with bools
df1 = DataFrame([[np.nan, 3.,True], [-4.6, np.nan, True], [np.nan, 7., False]])
df2 = DataFrame([[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])
result = df1.combine_first(df2)[2]
expected = Series([True, True, False], name=2)
assert_series_equal(result, expected)
# GH 3593, converting datetime64[ns] incorrecly
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
df1 = DataFrame({"a":[None, None, None]})
df2 = df1.combine_first(df0)
assert_frame_equal(df2, df0)
df2 = df0.combine_first(df1)
assert_frame_equal(df2, df0)
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
df1 = DataFrame({"a":[datetime(2000, 1, 2), None, None]})
df2 = df1.combine_first(df0)
result = df0.copy()
result.iloc[0,:] = df1.iloc[0,:]
assert_frame_equal(df2, result)
df2 = df0.combine_first(df1)
assert_frame_equal(df2, df0)
def test_update(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame([[1.5, nan, 3],
[3.6, 2, 3],
[1.5, nan, 3],
[1.5, nan, 7.]])
assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
other = DataFrame([[45,45]],index=[0],columns=['A','B'])
df.update(other)
expected = DataFrame([[45.,45.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame([[1.5, nan, 3],
[1.5, 2, 3],
[1.5, nan, 3],
[1.5, nan, 3.]])
assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame([[1.5, nan, 3],
[1.5, nan, 3],
[1.5, nan, 3],
[1.5, nan, 7.]])
assert_frame_equal(df, expected)
def test_update_raise(self):
df = DataFrame([[1.5, 1, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[2., nan],
[nan, 7]], index=[1, 3], columns=[1, 2])
with assertRaisesRegexp(ValueError, "Data overlaps"):
df.update(other, raise_conflict=True)
def test_update_from_non_df(self):
d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])}
df = DataFrame(d)
d['a'] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}
df = DataFrame(d)
d['a'] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
def test_combineAdd(self):
with tm.assert_produces_warning(FutureWarning):
# trivial
comb = self.frame.combineAdd(self.frame)
assert_frame_equal(comb, self.frame * 2)
# more rigorous
a = DataFrame([[1., nan, nan, 2., nan]],
columns=np.arange(5))
b = DataFrame([[2., 3., nan, 2., 6., nan]],
columns=np.arange(6))
expected = DataFrame([[3., 3., nan, 4., 6., nan]],
columns=np.arange(6))
result = a.combineAdd(b)
assert_frame_equal(result, expected)
result2 = a.T.combineAdd(b.T)
assert_frame_equal(result2, expected.T)
expected2 = a.combine(b, operator.add, fill_value=0.)
assert_frame_equal(expected, expected2)
# corner cases
comb = self.frame.combineAdd(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineAdd(self.frame)
assert_frame_equal(comb, self.frame)
# integer corner case
df1 = DataFrame({'x': [5]})
df2 = DataFrame({'x': [1]})
df3 = DataFrame({'x': [6]})
comb = df1.combineAdd(df2)
assert_frame_equal(comb, df3)
# mixed type GH2191
df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})
df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})
rs = df1.combineAdd(df2)
xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})
assert_frame_equal(xp, rs)
# TODO: test integer fill corner?
def test_combineMult(self):
with tm.assert_produces_warning(FutureWarning):
# trivial
comb = self.frame.combineMult(self.frame)
assert_frame_equal(comb, self.frame ** 2)
# corner cases
comb = self.frame.combineMult(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineMult(self.frame)
assert_frame_equal(comb, self.frame)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.ix[:-5, ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
self.assertTrue(combined['D'].isnull().all())
self.assertTrue(combined2['D'].isnull().all())
chunk = combined.ix[:-5, ['A', 'B', 'C']]
chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]
exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
self.assertFalse((capped.values > median).any())
floored = self.frame.clip_lower(median)
self.assertFalse((floored.values < median).any())
double = self.frame.clip(upper=median, lower=median)
self.assertFalse((double.values != median).any())
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000,2))
for lb, ub in [(-1,1),(1,-1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb,ub), max(ub,lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
self.assertTrue((clipped_df.values[lb_mask] == lb).all() == True)
self.assertTrue((clipped_df.values[ub_mask] == ub).all() == True)
self.assertTrue((clipped_df.values[mask] == df.values[mask]).all() == True)
def test_clip_against_series(self):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=0)
for i in range(2):
lb_mask = df.iloc[:, i] <= lb
ub_mask = df.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
assert_series_equal(result, lb[lb_mask], check_names=False)
self.assertEqual(result.name, i)
result = clipped_df.loc[ub_mask, i]
assert_series_equal(result, ub[ub_mask], check_names=False)
self.assertEqual(result.name, i)
assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
def test_clip_against_frame(self):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
assert_frame_equal(clipped_df[mask], df[mask])
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b' : [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
self.assert_numpy_array_equal(df._get_numeric_data().columns,
['a', 'b', 'e'])
def test_is_mixed_type(self):
self.assertFalse(self.frame._is_mixed_type)
self.assertTrue(self.mixed_frame._is_mixed_type)
def test_get_numeric_data(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'f' : Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd' : np.array([1.]*10,dtype='float32'),
'e' : np.array([1]*10,dtype='int32'),
'f' : np.array([1]*10,dtype='int16'),
'g' : Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.ix[:, ['a', 'b','d','e','f']]
assert_frame_equal(result, expected)
only_obj = df.ix[:, ['c','g']]
result = only_obj._get_numeric_data()
expected = df.ix[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict({'a':[1,2], 'b':['foo','bar'],'c':[np.pi,np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a':[1,2], 'c':[np.pi,np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Boolean data and integer data is included in .describe() output, string data isn't
self.assert_numpy_array_equal(df.describe().columns, ['bool_data', 'int_data'])
bool_describe = df.describe()['bool_data']
# Both the min and the max values should stay booleans
self.assertEqual(bool_describe['min'].dtype, np.bool_)
self.assertEqual(bool_describe['max'].dtype, np.bool_)
self.assertFalse(bool_describe['min'])
self.assertTrue(bool_describe['max'])
# For numeric operations, like mean or median, the values True/False are cast to
# the integer values 1 and 0
assert_almost_equal(bool_describe['mean'], 0.4)
assert_almost_equal(bool_describe['50%'], 0)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
assert_almost_equal(test.values, [2, 150, 'abcde'])
assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
tm.assertIsInstance(ct1, Series)
ct2 = frame.count(0)
tm.assertIsInstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
assert_series_equal(result, expected)
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False, check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
# GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
for meth in methods:
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
if not tm._incompat_bottleneck_version(meth):
assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
assert_frame_equal(cummin, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin()
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))
def test_cummax(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
assert_frame_equal(cummax, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax()
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))
def test_max(self):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_numeric_only_flag(self):
# GH #9201
methods = ['sem', 'var', 'std']
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.ix[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.ix[0, 'foo'] = 'a'
for meth in methods:
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
self.assertRaises(TypeError, lambda : getattr(df1, meth)(axis=1, numeric_only=False))
self.assertRaises(TypeError, lambda : getattr(df2, meth)(axis=1, numeric_only=False))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4)/np.sqrt(len(x)))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
assert_series_equal(kurt, kurt2, check_names=False)
self.assertTrue(kurt.name is None)
self.assertEqual(kurt2.name, 'bar')
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True, check_dates=False,
check_less_precise=False):
if frame is None:
frame = self.frame
# set some NAs
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
self.assertIsInstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
self.assertIsInstance(result, Series)
self.assertTrue(len(result))
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if not tm._incompat_bottleneck_version(name):
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
self.assertEqual(lcd_dtype, result0.dtype)
self.assertEqual(lcd_dtype, result1.dtype)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if not tm._incompat_bottleneck_version(name):
self.assertTrue(np.isnan(r0).all())
self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
"B": [10, 10, 10, np.nan, 3, 4],
"C": [8, 8, 8, 9, 9, 9],
"D": np.arange(6,dtype='int64'),
"E": [8, 8, 1, 1, 3, 3]})
assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
expected = pd.Series([], dtype='int64', name='D').to_frame()
assert_frame_equal(df[["D"]].mode(), expected)
expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()
assert_frame_equal(df[["E"]].mode(), expected)
assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
assert_frame_equal(df.mode(),
pd.DataFrame({"A": [12, np.nan, np.nan],
"B": [10, np.nan, np.nan],
"C": [8, 9, np.nan],
"D": [np.nan, np.nan, np.nan],
"E": [1, 3, 8]}))
# outputs in sorted order
df["C"] = list(reversed(df["C"]))
com.pprint_thing(df["C"])
com.pprint_thing(df["C"].mode())
a, b = (df[["A", "B", "C"]].mode(),
pd.DataFrame({"A": [12, np.nan],
"B": [10, np.nan],
"C": [8, 9]}))
com.pprint_thing(a)
com.pprint_thing(b)
assert_frame_equal(a, b)
# should work with heterogeneous types
df = pd.DataFrame({"A": np.arange(6,dtype='int64'),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
"B": pd.Series([], dtype=df["B"].dtype),
"C": pd.Series([], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
# and also when not empty
df.loc[1, "A"] = 0
df.loc[4, "B"] = df.loc[3, "B"]
df.loc[5, "C"] = 'e'
exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
"B": pd.Series([df.loc[3, "B"]], dtype=df["B"].dtype),
"C": pd.Series(['e'], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
tm.assertIsInstance(axis0, Series)
tm.assertIsInstance(axis1, Series)
self.assertEqual(len(axis0), 0)
self.assertEqual(len(axis1), 0)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
self.assertEqual(means['bool'], self.frame['bool'].values.mean())
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.tsframe.quantile(0.9, axis=1)
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1':['A','A','B','B'], 'col2':[1,2,3,4]})
rs = df.quantile(0.5)
xp = df.median()
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=[0.5, 0.75])
assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1])
assert_series_equal(result, expected)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=0)
expected = Series([2., 3.], index=["A", "B"])
assert_series_equal(result, expected)
expected = df.quantile(.5, axis="index")
assert_series_equal(result, expected)
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
self.assertRaises(ValueError, df.quantile, 0.1, axis=-1)
self.assertRaises(ValueError, df.quantile, 0.1, axis="column")
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'])
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1])
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.tsframe.quantile(invalid)
def test_cumsum(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum()
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))
def test_cumprod(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
assert_frame_equal(result, exp)
def test_rank2(self):
from datetime import datetime
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
assert_frame_equal(result, expected)
df = DataFrame({"a":[1e-20, -5, 1e-20+1e-40, 10, 1e60, 1e80, 1e-30]})
exp = DataFrame({"a":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]})
assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
def test_axis_aliases(self):
f = self.frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis='index')
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis='columns')
assert_series_equal(result, expected)
def test_combine_first_mixed(self):
a = Series(['a', 'b'], index=lrange(2))
b = Series(lrange(2), index=lrange(2))
f = DataFrame({'A': a, 'B': b})
a = Series(['a', 'b'], index=lrange(5, 7))
b = Series(lrange(2), index=lrange(5, 7))
g = DataFrame({'A': a, 'B': b})
combined = f.combine_first(g)
def test_more_asMatrix(self):
values = self.mixed_frame.as_matrix()
self.assertEqual(values.shape[1], len(self.mixed_frame.columns))
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[0][1]))
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[1]).all())
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
self.assertIn('foo', reindexed)
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
self.assertNotIn('foo', reindexed)
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
self.assertTrue(reindexed.columns.equals(index))
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
self.assertEqual(smaller['E'].dtype, np.float64)
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
newFrame = self.frame.reindex_axis(cols, axis=1)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(2), lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
self.assertIn('FOO', renamed)
self.assertNotIn('foo', renamed)
def test_fill_corner(self):
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
filled = self.mixed_frame.fillna(value=0)
self.assertTrue((filled.ix[5:20,'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
result = empty_float.fillna(value=0)
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
result = dm.cumsum()
#----------------------------------------------------------------------
# Stacking / unstacking
def test_stack_unstack(self):
stacked = self.frame.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, self.frame)
assert_frame_equal(unstacked_df['bar'], self.frame)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, self.frame)
assert_frame_equal(unstacked_cols_df['bar'].T, self.frame)
def test_stack_ints(self):
df = DataFrame(
np.random.randn(30, 27),
columns=MultiIndex.from_tuples(
list(itertools.product(range(3), repeat=3))
)
)
assert_frame_equal(
df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1)
)
assert_frame_equal(
df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1)
)
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(
df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
self.assertRaises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False )
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
self.assertTrue(isinstance(data, Series))
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64' : 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A','B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64' : 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A','B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64' : 2, 'float64' : 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64' : 2, 'object' : 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a']*5, 'C':c, 'D':d,
'B':pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
self.assertEqual(left.shape, (3, 2))
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with tm.assertRaises(ValueError):
df.unstack('c1')
with tm.assertRaises(ValueError):
df.T.stack('c1')
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notnull().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
self.assertEqual(left, right)
df = DataFrame({'jim':['a', 'b', nan, 'd'],
'joe':['w', 'x', 'y', 'z'],
'jolie':['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
self.assertEqual(udf.notnull().values.sum(), len(df))
verify(udf['jolie'])
df = DataFrame({'1st':['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd':['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd':[67,39,53,72,57,80,31,18,11,30,59,
50,62,59,76,52,14,53,60,51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
self.assertEqual(udf.notnull().values.sum(), 2 * len(df))
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame({'A': list('aaaabbbb'),'B':range(8), 'C':range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B':list(range(4))*2,
'C':range(8)})
df.iloc[2,1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'),'B':list(range(4))*2,
'C':range(8)})
df.iloc[3,1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C':np.arange(10),
'B':date_range('2012-01-01', periods=5).tolist()*2 })
df.iloc[3,1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id','dosage','agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.ix[17264:].copy().set_index(['s_id','dosage','agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st':[1, 2, 1, 2, 1, 2],
'2nd':pd.date_range('2014-02-01', periods=6, freq='D'),
'jim':100 + np.arange(6),
'joe':(np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
self.assertEqual(left.notnull().values.sum(), 2 * len(df))
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
self.assertEqual(r[col], left.loc[key])
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_repr_with_mi_nat(self):
df = DataFrame({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
res = repr(df)
exp = ' X\nNaT a 1\n2013-01-01 b 2'
nose.tools.assert_equal(res, exp)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
assert_almost_equal(values, deleveled[name])
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
self.assert_numpy_array_equal(deleveled['first'],
deleveled2['level_0'])
self.assert_numpy_array_equal(deleveled['second'],
deleveled2['level_1'])
# default name assigned
rdf = self.frame.reset_index()
self.assert_numpy_array_equal(rdf['index'], self.frame.index.values)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
self.assert_numpy_array_equal(rdf['level_0'], self.frame.index.values)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
self.assert_numpy_array_equal(deleveled['index'],
self.frame.index.values)
self.assert_numpy_array_equal(deleveled.index,
np.arange(len(deleveled)))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
self.assertEqual(resetted.columns.name, 'columns')
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
assert_frame_equal(rs, self.frame, check_names=False) # TODO should reset_index check_names ?
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
resetted = df.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
#----------------------------------------------------------------------
# Tests to cope with refactored internals
def test_as_matrix_numeric_cols(self):
self.frame['foo'] = 'bar'
values = self.frame.as_matrix(['A', 'B', 'C', 'D'])
self.assertEqual(values.dtype, np.float64)
def test_as_matrix_lcd(self):
# mixed lcd
values = self.mixed_float.as_matrix(['A', 'B', 'C', 'D'])
self.assertEqual(values.dtype, np.float64)
values = self.mixed_float.as_matrix(['A', 'B', 'C' ])
self.assertEqual(values.dtype, np.float32)
values = self.mixed_float.as_matrix(['C'])
self.assertEqual(values.dtype, np.float16)
values = self.mixed_int.as_matrix(['A','B','C','D'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A','D'])
self.assertEqual(values.dtype, np.int64)
# guess all ints are cast to uints....
values = self.mixed_int.as_matrix(['A','B','C'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A','C'])
self.assertEqual(values.dtype, np.int32)
values = self.mixed_int.as_matrix(['C','D'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A'])
self.assertEqual(values.dtype, np.int32)
values = self.mixed_int.as_matrix(['C'])
self.assertEqual(values.dtype, np.uint8)
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A' : [2**63-1] })
result = df['A']
expected = Series(np.asarray([2**63-1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [2**63] })
result = df['A']
expected = Series(np.asarray([2**63], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [datetime(2005, 1, 1), True] })
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [None, 1] })
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0, 2] })
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, 3] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, 3.0] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, True] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0, None] })
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, None] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [2.0, 1, True, None] })
result = df['A']
expected = Series(np.asarray([2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [2.0, 1, datetime(2006, 1, 1), None] })
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({ 'datetime64[ns]' : 3 })
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
self.assertEqual(self.mixed_frame['datetime'].dtype, 'M8[ns]')
self.assertEqual(self.mixed_frame['timedelta'].dtype, 'm8[ns]')
result = self.mixed_frame.get_dtype_counts().sort_values()
expected = Series({ 'float64' : 4,
'object' : 1,
'datetime64[ns]' : 1,
'timedelta64[ns]' : 1}).sort_values()
assert_series_equal(result,expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1,2,3],dtype='timedelta64[s]')
s = Series(arr)
expected = Series(timedelta_range('00:00:01',periods=3,freq='s'))
assert_series_equal(s,expected)
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A' : timedelta_range('00:00:01',periods=3,freq='s')},
index=range(3))
assert_frame_equal(df,expected)
# convert from a numpy array of non-ns datetime64
#### note that creating a numpy datetime64 is in LOCAL time!!!!
#### seems to work for M8[D], but not for M8[s]
s = Series(np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]'))
assert_series_equal(s,Series(date_range('20130101',periods=3,freq='D')))
#s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
#assert_series_equal(s,date_range('20130101 00:00:01',period=3,freq='s'))
expected = DataFrame({
'dt1' : Timestamp('20130101'),
'dt2' : date_range('20130101',periods=3),
#'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
},index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]')
#df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
self.assertTrue((cop['A'] == 5).all())
self.assertFalse((self.frame['A'] == 5).all())
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
self.assertTrue((df.values[5] == 5).all())
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
self.assertFalse((df.values[6] == 6).all())
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
self.assertFalse((series['A'] == 5).all())
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return DataFrame(data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)),
columns=["A", "B", "C"], dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A","datetime64[h]"), ("B","str"), ("C","int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not is_platform_windows():
f('M8[ns]')
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'], check_names=False)
assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1,2]], columns=['a','a'])
df.columns = ['a','a.1']
str(df)
expected = DataFrame([[1,2]], columns=['a','a.1'])
assert_frame_equal(df, expected)
df = DataFrame([[1,2,3]], columns=['b','a','a'])
df.columns = ['b','a','a.1']
str(df)
expected = DataFrame([[1,2,3]], columns=['b','a','a.1'])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1,2]], columns=['a','a'])
df.columns = ['b','b']
str(df)
expected = DataFrame([[1,2]], columns=['b','b'])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c'])
df.columns = list('ABCDEFG')
str(df)
expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG'))
assert_frame_equal(df, expected)
# this is an error because we cannot disambiguate the dup columns
self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a']))
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3),dtype='float64')
df_int = DataFrame(np.random.randn(10, 3),dtype='int64')
df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)
df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
self.assertEqual(len(df._data._blknos), len(df.columns))
self.assertEqual(len(df._data._blklocs), len(df.columns))
# testing iget
for i in range(len(df.columns)):
df.iloc[:,i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.], [2, -2, 3.]]
rs = DataFrame(vals, columns=['A', 'A', 'B'])
xp = DataFrame(vals)
xp.columns = ['A', 'A', 'B']
assert_frame_equal(rs, xp)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1,1.1],[2, 2.2]],columns=['a','b'])
assert_frame_equal(result,expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3,1,1.1],[2.3,2, 2.2]],columns=['c','a','b'])
assert_frame_equal(result,expected)
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
expected = DataFrame(self.frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(self.frame._data, dtype=np.int32)
expected = DataFrame(self.frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self):
self.frame['E'] = 7.
consolidated = self.frame.consolidate()
self.assertEqual(len(consolidated._data.blocks), 1)
# Ensure copy, do I want this?
recons = consolidated.consolidate()
self.assertIsNot(recons, consolidated)
assert_frame_equal(recons, consolidated)
self.frame['F'] = 8.
self.assertEqual(len(self.frame._data.blocks), 3)
self.frame.consolidate(inplace=True)
self.assertEqual(len(self.frame._data.blocks), 1)
def test_consolidate_inplace(self):
frame = self.frame.copy()
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
self.frame[chr(letter)] = chr(letter)
def test_as_matrix_consolidate(self):
self.frame['E'] = 7.
self.assertFalse(self.frame._data.is_consolidated())
_ = self.frame.as_matrix()
self.assertTrue(self.frame._data.is_consolidated())
def test_modify_values(self):
self.frame.values[5] = 5
self.assertTrue((self.frame.values[5] == 5).all())
# unconsolidated
self.frame['E'] = 7.
self.frame.values[6] = 6
self.assertTrue((self.frame.values[6] == 6).all())
def test_boolean_set_uncons(self):
self.frame['E'] = 7.
expected = self.frame.values.copy()
expected[expected > 1] = 2
self.frame[self.frame > 1] = 2
assert_almost_equal(expected, self.frame.values)
def test_xs_view(self):
"""
in 0.14 this will return a view if possible
a copy otherwise, but this is numpy dependent
"""
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
self.assertTrue((dm.xs(2) == 10).all())
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A','B','C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with assertRaisesRegexp(ValueError, 'Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame(
{long(0): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987, 49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2>0.3] = 1
expected = df.copy()
expected.loc[40,1] = 1
expected.loc[49,1] = 1
expected.loc[50,1] = 1
expected.loc[35,4] = 1
assert_frame_equal(df2,expected)
df['foo'] = 'test'
with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):
df[df > 0.3] = 1
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isnull(df)
self.assertEqual(bools.sum(axis=1)[0], 10)
def test_fillna_col_reordering(self):
idx = lrange(20)
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
self.assertEqual(df.columns.tolist(), filled.columns.tolist())
def test_take(self):
# homogeneous
#----------------------------------------
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# neg indicies
order = [2,1,-1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
self.assertRaises(IndexError, df.take, [3,1,2,30], axis=0)
self.assertRaises(IndexError, df.take, [3,1,2,-31], axis=0)
self.assertRaises(IndexError, df.take, [3,1,2,5], axis=1)
self.assertRaises(IndexError, df.take, [3,1,2,-5], axis=1)
# mixed-dtype
#----------------------------------------
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# neg indicies
order = [4,1,-2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float,self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_iterkv_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
self.mixed_float.iterkv()
def test_iterkv_names(self):
for k, v in compat.iteritems(self.mixed_frame):
self.assertEqual(v.name, k)
def test_series_put_names(self):
series = self.mixed_frame._series
for k, v in compat.iteritems(series):
self.assertEqual(v.name, k)
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
result = a.dot(b1['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
# can pass correct-length arrays
row = a.ix[0].values
result = a.dot(row)
exp = a.dot(a.ix[0])
assert_series_equal(result, exp)
with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
B = DataFrame(b)
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)
def test_idxmin(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmin, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmax, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmax, axis=2)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment',None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum()
exp = Y['g'].sum()
self.assertTrue(isnull(Y['g']['c']))
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
result = df.ix[IndexType("foo", "bar")]["A"]
self.assertEqual(result, 1)
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
self.assertFalse(df.empty)
df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
self.assertTrue(df.empty)
self.assertTrue(df.T.empty)
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
df = DataFrame(randn(10, 4)) > 0
df.any(1)
df.all(1)
df.any(1, bool_only=True)
df.all(1, bool_only=True)
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper))
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper))
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
self.assertRaises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail:
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
self.assertFalse(r0.any())
self.assertFalse(r1.any())
else:
self.assertTrue(r0.all())
self.assertTrue(r1.all())
def test_strange_column_corruption_issue(self):
df = DataFrame(index=[0, 1])
df[0] = nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if not col in wasCol:
wasCol[col] = 1
df[col] = nan
df[col][dt] = i
myid = 100
first = len(df.ix[isnull(df[myid]), [myid]])
second = len(df.ix[isnull(df[myid]), [myid]])
self.assertTrue(first == second == 0)
def test_inplace_return_self(self):
# re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
'c': [1, 2, 3, 4]})
def _check_f(base, f):
result = f(base)
self.assertTrue(result is None)
# -----DataFrame-----
# set_index
f = lambda x: x.set_index('a', inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index('a'), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# sortlevel
f = lambda x: x.sortlevel(0, inplace=True)
_check_f(data.set_index(['a', 'b']), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index('a')['c'], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(d.copy(), f)
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
assert_frame_equal(result, expected)
def test_isin_empty(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
result = df.isin([])
expected = pd.DataFrame(False, df.index, df.columns)
assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
#GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with tm.assertRaises(TypeError):
df.isin('a')
with tm.assertRaises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
assert_frame_equal(result, expected)
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with tm.assertRaises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with tm.assertRaises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with tm.assertRaises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A','A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
assert_frame_equal(result, expected)
def test_to_csv_date_format(self):
from pandas import to_datetime
pname = '__tmp_to_csv_date_format__'
with ensure_clean(pname) as path:
for engine in [None, 'python']:
w = FutureWarning if engine == 'python' else None
dt_index = self.tsframe.index
datetime_frame = DataFrame({'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
with tm.assert_produces_warning(w, check_stacklevel=False):
datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(lambda x: int(x.strftime('%Y%m%d')))
datetime_frame_int.index = datetime_frame_int.index.map(lambda x: int(x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_int)
with tm.assert_produces_warning(w, check_stacklevel=False):
datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(lambda x: x.strftime('%Y-%m-%d'))
datetime_frame_str.index = datetime_frame_str.index.map(lambda x: x.strftime('%Y-%m-%d'))
assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
with tm.assert_produces_warning(w, check_stacklevel=False):
datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(lambda x: int(x.strftime('%Y%m%d')))
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(lambda x: x.strftime('%Y%m%d'))
assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
with tm.assert_produces_warning(w, check_stacklevel=False):
nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
test = read_csv(path, parse_dates=[0, 1], index_col=0)
assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with ensure_clean('csv_date_format_with_dst') as path:
# make sure we are not failing on transitions
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times+pd.Timedelta('10s')]:
time_range = np.array(range(len(i)), dtype='int64')
df = DataFrame({'A' : time_range}, index=i)
df.to_csv(path,index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path,index_col=0)
result.index = pd.to_datetime(result.index).tz_localize('UTC').tz_convert('Europe/London')
assert_frame_equal(result,df)
# GH11619
idx = pd.date_range('2015-01-01', '2015-12-31', freq = 'H', tz='Europe/Paris')
df = DataFrame({'values' : 1, 'idx' : idx},
index=idx)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_csv(path,index=True)
result = read_csv(path,index_col=0)
result.index = pd.to_datetime(result.index).tz_localize('UTC').tz_convert('Europe/Paris')
result['idx'] = pd.to_datetime(result['idx']).astype('datetime64[ns, Europe/Paris]')
assert_frame_equal(result,df)
# assert working
df.astype(str)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_pickle(path)
result = pd.read_pickle(path)
assert_frame_equal(result,df)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df['a'] = df['a'].astype(np.bool_)
df['b'] = df['b'].astype(np.int32)
df['c'] = df['c'].astype(np.float64)
result = pd.concat([df, df])
self.assertEqual(result['a'].dtype, np.bool_)
self.assertEqual(result['b'].dtype, np.int32)
self.assertEqual(result['c'].dtype, np.float64)
result = pd.concat([df, df.astype(np.float64)])
self.assertEqual(result['a'].dtype, np.object_)
self.assertEqual(result['b'].dtype, np.float64)
self.assertEqual(result['c'].dtype, np.float64)
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1,2,3])
assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list("abc")))
assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list("abc")))
assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list("abc")))
odict = OrderedDict
df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])
assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)])))
assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')])))
# same but for empty slice of df
assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)])))
assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')])))
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_), ('b', np.float_),
('c', np.float_),])))
assert_series_equal(df.iloc[:,2:].dtypes,
pd.Series(odict([('c', np.float_)])))
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_), ('b', np.float_),
('c', np.float_),])))
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
self.assertEqual(df.set_index(df.index).index.names, ['name'])
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])
# Check that set_index isn't converting a MultiIndex into an Index
self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
self.assertTrue(isinstance(df.set_index([df.index, df.index]).index, MultiIndex))
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_select_dtypes_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc'))})
ri = df.select_dtypes(include=[np.number])
ei = df[['b', 'c', 'd']]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number,'category'])
ei = df[['b', 'c', 'd', 'f']]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_exclude(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True]})
re = df.select_dtypes(exclude=[np.number])
ee = df[['a', 'e']]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
exclude = np.datetime64,
include = np.bool_, 'integer'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'c', 'e']]
tm.assert_frame_equal(r, e)
exclude = 'datetime',
include = 'bool', 'int64', 'int32'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'e']]
tm.assert_frame_equal(r, e)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
df['g'] = df.f.diff()
assert not hasattr(np, 'u8')
r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])
e = df[['a', 'b']]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])
e = df[['a', 'b', 'g']]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(ValueError, 'at least one of include or '
'exclude must be nonempty'):
df.select_dtypes()
def test_select_dtypes_raises_on_string(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(include='object')
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(exclude='object')
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(include=int, exclude='object')
def test_select_dtypes_bad_datetime64(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):
df.select_dtypes(include=['datetime64[D]'])
with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):
df.select_dtypes(exclude=['datetime64[as]'])
def test_select_dtypes_str_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
string_dtypes = set((str, 'str', np.string_, 'S1',
'unicode', np.unicode_, 'U1'))
try:
string_dtypes.add(unicode)
except NameError:
pass
for dt in string_dtypes:
with tm.assertRaisesRegexp(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(include=[dt])
with tm.assertRaisesRegexp(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(exclude=[dt])
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'):
df.select_dtypes(['blargy, blarg, blarg'])
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with tm.assertRaises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
with tm.assertRaises(KeyError):
df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])
with tm.assertRaises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
def test_dataframe_metadata(self):
df = SubclassedDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]},
index=['a', 'b', 'c'])
df.testattr = 'XXX'
self.assertEqual(df.testattr, 'XXX')
self.assertEqual(df[['X']].testattr, 'XXX')
self.assertEqual(df.loc[['a', 'b'], :].testattr, 'XXX')
self.assertEqual(df.iloc[[0, 1], :].testattr, 'XXX')
# GH9776
self.assertEqual(df.iloc[0:1, :].testattr, 'XXX')
# GH10553
unpickled = self.round_trip_pickle(df)
assert_frame_equal(df, unpickled)
self.assertEqual(df._metadata, unpickled._metadata)
self.assertEqual(df.testattr, unpickled.testattr)
def test_nlargest(self):
# GH10393
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nlargest(5, 'a')
expected = df.sort_values('a', ascending=False).head(5)
tm.assert_frame_equal(result, expected)
def test_nlargest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nlargest(5, ['a', 'b'])
expected = df.sort_values(['a', 'b'], ascending=False).head(5)
tm.assert_frame_equal(result, expected)
def test_nsmallest(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nsmallest(5, 'a')
expected = df.sort_values('a').head(5)
tm.assert_frame_equal(result, expected)
def test_nsmallest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nsmallest(5, ['a', 'c'])
expected = df.sort_values(['a', 'c']).head(5)
tm.assert_frame_equal(result, expected)
def test_to_panel_expanddim(self):
# GH 9762
class SubclassedFrame(DataFrame):
@property
def _constructor_expanddim(self):
return SubclassedPanel
class SubclassedPanel(Panel):
pass
index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)])
df = SubclassedFrame({'X':[1, 2, 3], 'Y': [4, 5, 6]}, index=index)
result = df.to_panel()
self.assertTrue(isinstance(result, SubclassedPanel))
expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]],
items=['X', 'Y'], major_axis=[0],
minor_axis=[0, 1, 2],
dtype='int64')
tm.assert_panel_equal(result, expected)
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
raise nose.SkipTest("cannot query engine numexpr when numexpr not "
"installed")
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
raise nose.SkipTest("cannot evaluate with parser {0!r}".format(parser))
class TestDataFrameQueryWithMultiIndex(object):
def check_query_with_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = tm.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_named_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_named_multiindex, parser, engine
def check_query_with_unnamed_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = tm.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
#### LEVEL 1 ####
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_unnamed_multiindex, parser, engine
def check_query_with_partially_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_with_partially_named_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_partially_named_multiindex, parser, engine
def test_query_multiindex_get_index_resolvers(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_multiindex_get_index_resolvers, parser, engine
def check_query_multiindex_get_index_resolvers(self, parser, engine):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
def test_raise_on_panel_with_multiindex(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_raise_on_panel_with_multiindex, parser, engine
def check_raise_on_panel_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with tm.assertRaises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
def test_raise_on_panel4d_with_multiindex(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_raise_on_panel4d_with_multiindex, parser, engine
def check_raise_on_panel4d_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p4d = tm.makePanel4D(7)
p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)
with tm.assertRaises(NotImplementedError):
pd.eval('p4d + 1', parser=parser, engine=engine)
class TestDataFrameQueryNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne(cls.engine)
@classmethod
def tearDownClass(cls):
super(TestDataFrameQueryNumExprPandas, cls).tearDownClass()
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine, parser=parser)
expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
ops = '==', '!=', '<', '>', '<=', '>='
for op in ops:
with tm.assertRaises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with tm.assertRaises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
tm.assert_frame_equal(res, expected)
# no local variable c
with tm.assertRaises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with tm.assertRaises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
# we don't pick up the local 'sin'
with tm.assertRaises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assertRaisesRegexp(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
# "index" should refer to the index
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
# test against a scalar
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with tm.assertRaises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assertRaisesRegexp(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
tm.assert_frame_equal(result, expected)
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
tm.assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
cls.frame = _frame.copy()
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with tm.assertRaises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1
result = pd.eval('x + 1', engine=engine, parser=parser)
self.assertEqual(result, 2)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with tm.assertRaises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with tm.assertRaises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
tm.assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = _frame.copy()
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryPythonPython, cls).setUpClass()
cls.engine = cls.parser = 'python'
cls.frame = _frame.copy()
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
PARSERS = 'python', 'pandas'
ENGINES = 'python', 'numexpr'
class TestDataFrameQueryStrings(object):
def check_str_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
assertRaises(NotImplementedError, df.query, ex, engine=engine,
parser=parser, local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_query_method(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_str_query_method, parser, engine
def test_str_list_query_method(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_str_list_query_method, parser, engine
def check_str_list_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with tm.assertRaises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def check_query_with_string_columns(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with assertRaises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with assertRaises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_query_with_string_columns(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_string_columns, parser, engine
def check_object_array_eq_ne(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_object_array_eq_ne(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_object_array_eq_ne, parser, engine
def check_query_with_nested_strings(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
from pandas.compat import StringIO
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
tm.assert_frame_equal(expected, res)
def test_query_with_nested_string(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_nested_strings, parser, engine
def check_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
tm.skip_if_no_ne(engine)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
tm.assert_frame_equal(res, expec)
def test_query_with_nested_special_character(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_nested_special_character, parser, engine
def check_query_lex_compare_strings(self, parser, engine):
tm.skip_if_no_ne(engine=engine)
import operator as opr
a = Series(tm.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_lex_compare_strings(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_lex_compare_strings, parser, engine
def check_query_single_element_booleans(self, parser, engine):
tm.skip_if_no_ne(engine)
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_single_element_booleans, parser, engine
def check_query_string_scalar_variable(self, parser, engine):
tm.skip_if_no_ne(engine)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US'
r = df.query('Symbol == @symb', parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
def test_query_string_scalar_variable(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_string_scalar_variable, parser, engine
class TestDataFrameEvalNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne()
def setUp(self):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def tearDown(self):
del self.frame
def test_simple_expr(self):
res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self):
res = self.frame.eval('a[a < 1] + b', engine=self.engine,
parser=self.parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
def test_invalid_type_for_operator_raises(self):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
ops = '+', '-', '*', '/'
for op in ops:
with tm.assertRaisesRegexp(TypeError,
"unsupported operand type\(s\) for "
".+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=self.engine,
parser=self.parser)
class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalPythonPython, cls).tearDownClass()
cls.engine = cls.parser = 'python'
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 37.840043 | 155 | 0.539242 |
from __future__ import print_function
from copy import deepcopy
from datetime import datetime, timedelta, time, date
import sys
import operator
import re
import csv
import nose
import functools
import itertools
from itertools import product, permutations
from distutils.version import LooseVersion
from pandas.compat import(
map, zip, range, long, lrange, lmap, lzip,
OrderedDict, u, StringIO, is_platform_windows
)
from pandas import compat
from numpy import random, nan, inf
from numpy.random import randn
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
import pandas.core.nanops as nanops
import pandas.core.common as com
import pandas.core.format as fmt
import pandas.core.datetools as datetools
from pandas import (DataFrame, Index, Series, Panel, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp, date_range,
read_csv, timedelta_range, Timedelta, option_context, period_range)
from pandas.core.dtypes import DatetimeTZDtype
import pandas as pd
from pandas.parser import CParserError
from pandas.util.misc import is_little_endian
from pandas.util.testing import (assert_almost_equal,
assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp,
assertRaises,
makeCustomDataframe as mkdf,
ensure_clean,
SubclassedDataFrame)
from pandas.core.indexing import IndexingError
from pandas.core.common import PandasError
import pandas.util.testing as tm
import pandas.lib as lib
from numpy.testing.decorators import slow
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
MIXED_FLOAT_DTYPES = ['float16','float32','float64']
MIXED_INT_DTYPES = ['uint8','uint16','uint32','uint64','int8','int16',
'int32','int64']
def _check_mixed_float(df, dtype = None):
dtypes = dict(A = 'float32', B = 'float32', C = 'float16', D = 'float64')
if isinstance(dtype, compat.string_types):
dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
def _check_mixed_int(df, dtype = None):
dtypes = dict(A = 'int32', B = 'uint64', C = 'uint8', D = 'int64')
if isinstance(dtype, compat.string_types):
dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
sl = self.frame[:20]
self.assertEqual(20, len(sl.index))
for _, series in compat.iteritems(sl):
self.assertEqual(20, len(series.index))
self.assertTrue(tm.equalContents(series.index, sl.index))
for key, _ in compat.iteritems(self.frame._series):
self.assertIsNotNone(self.frame[key])
self.assertNotIn('random', self.frame)
with assertRaisesRegexp(KeyError, 'random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
df['@awesome_domain'] = ad
self.assertRaises(KeyError, df.__getitem__, 'df["$10"]')
res = df['@awesome_domain']
assert_numpy_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
try:
df[['baf']]
except KeyError:
pass
else:
self.fail("Dataframe failed to raise KeyError")
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
self.assertIsNone(self.frame.get('foo'))
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
for df in [DataFrame(), DataFrame(columns=list('AB')), DataFrame(columns=list('AB'),index=range(3)) ]:
result = df.get(None)
self.assertIsNone(result)
def test_getitem_iterator(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.ix[:, idx]
expected = self.frame.ix[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_getitem_list(self):
self.frame.columns.name = 'foo'
result = self.frame[['B', 'A']]
result2 = self.frame[Index(['B', 'A'])]
expected = self.frame.ix[:, ['B', 'A']]
expected.columns.name = 'foo'
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
self.assertEqual(result.columns.name, 'foo')
with assertRaisesRegexp(KeyError, 'not in index'):
self.frame[['B', 'A', 'food']]
with assertRaisesRegexp(KeyError, 'not in index'):
self.frame[Index(['B', 'A', 'foo'])]
df = DataFrame(randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')], name=['sth', 'sth2']))
result = df[[('foo', 'bar'), ('baz', 'qux')]]
expected = df.ix[:, :2]
assert_frame_equal(result, expected)
self.assertEqual(result.columns.names, ['sth', 'sth2'])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'], check_names=False)
assert_series_equal(self.frame['A'], data['B'], check_names=False)
with assertRaisesRegexp(ValueError, 'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with assertRaisesRegexp(ValueError, 'Length of values does not match '
'length of index'):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.ix[1, ['tt1', 'tt2']] = [1, 2]
result = df.ix[1, ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.ix[1, ['tt1', 'tt2']] = ['1', '2']
result = df.ix[1, ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns, name=1)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006',periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_inplace_ops_alignment(self):
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10*len(columns)).reshape(-1,len(columns)), columns=columns, index=range(10))
Z = 100*X_orig.iloc[:,1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0,5,size=10).reshape(-1,5))
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s,s2)
assert_series_equal(s_orig+1,s)
self.assertIs(s,s2)
self.assertIs(s._data,s2._data)
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df,df2)
assert_frame_equal(df_orig+1,df)
self.assertIs(df,df2)
self.assertIs(df._data,df2._data)
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s,s2)
assert_series_equal(s_orig+1.5,s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df,df2)
assert_frame_equal(df_orig+1.5,df)
self.assertIs(df,df2)
self.assertIs(df._data,df2._data)
arr = np.random.randint(0,10,size=5)
df_orig = DataFrame({'A' : arr.copy(), 'B' : 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A' : arr.copy()+1, 'B' : 'foo'})
assert_frame_equal(df,expected)
assert_frame_equal(df2,expected)
self.assertIs(df._data,df2._data)
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A' : arr.copy()+1.5, 'B' : 'foo'})
assert_frame_equal(df,expected)
assert_frame_equal(df2,expected)
self.assertIs(df._data,df2._data)
def test_getitem_boolean(self):
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
self.assert_numpy_array_equal(subindex, subframe.index)
with assertRaisesRegexp(ValueError, 'Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assertRaisesRegexp(ValueError, 'boolean values only'):
self.tsframe[self.tsframe]
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(dict([ (c,np.where(data[c] > 0, data[c], np.nan)) for c in data.columns ]),
index=data.index, columns=data.columns)
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns = df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
self.assertEqual(bif[c].dtype, df[c].dtype)
def test_getitem_boolean_casting(self):
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df>0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32' : 2, 'int64' : 2})
assert_series_equal(result, expected)
df.ix[1:3,['E1','F1']] = 0
casted = df[df>0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})
assert_series_equal(result, expected)
df = DataFrame(data = np.random.randn(100, 50))
df = df.where(df > 0)
bools = df > 0
mask = isnull(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result,expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.ix[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = randn(5, 5)
df = DataFrame(arr.copy(), columns = ['A','B','C','D','E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.ix[:-1]
expected = df.ix[df.index[:-1]]
assert_frame_equal(result, expected)
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
df = pd.DataFrame({ "rna": (1.5,2.2,3.2,4.5),
-1000: [11,21,36,40],
0: [10,22,43,34],
1000:[0, 10, 20, 30] },columns=['rna',-1000,0,1000])
result = df[[1000]]
expected = df.iloc[:,[3]]
assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:,[1]]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
self.frame.ix[:, [-1]] = 0
self.assertTrue((self.frame['D'] == 0).all())
df = DataFrame(np.random.randn(8, 4))
self.assertTrue(isnull(df.ix[:, [-1]].values).all())
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
a.ix[-1] = a.ix[-2]
assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)
self.assertEqual(a.ix[-1].name, 'T')
self.assertEqual(a.ix[-2].name, 'S')
def test_getattr(self):
tm.assert_series_equal(self.frame.A, self.frame['A'])
self.assertRaises(AttributeError, getattr, self.frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
self.assertTrue((df.foobar == 5).all())
def test_setitem(self):
series = self.frame['A'][::2]
self.frame['col5'] = series
self.assertIn('col5', self.frame)
tm.assert_dict_equal(series, self.frame['col5'],
compare_keys=False)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_dict_equal(series, self.frame['col6'],
compare_keys=False)
with tm.assertRaises(KeyError):
self.frame[randn(len(self.frame) + 1)] = 1
arr = randn(len(self.frame))
self.frame['col9'] = arr
self.assertTrue((self.frame['col9'] == arr).all())
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
smaller = self.frame[:2]
def f():
smaller['col10'] = ['1', '2']
self.assertRaises(com.SettingWithCopyError, f)
self.assertEqual(smaller['col10'].dtype, np.object_)
self.assertTrue((smaller['col10'] == ['1', '2']).all())
for dtype in ['int32','int64','float32','float64']:
self.frame[dtype] = np.array(arr,dtype=dtype)
self.assertEqual(self.frame[dtype].dtype.name, dtype)
df = DataFrame([[0,0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan,np.nan]])
assert_frame_equal(df,expected)
df = DataFrame([[0,0]])
df.loc[0] = np.nan
assert_frame_equal(df,expected)
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame['A'], check_names=False)
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = nan
self.assertTrue(notnull(s[5:10]).all())
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'Must pass DataFrame with boolean '
'values only'):
df[df * 0] = 2
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = nan
expected.values[mask.values] = nan
assert_frame_equal(df, expected)
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
self.assertEqual(self.frame['D'].dtype, np.int64)
self.frame['B'] = 0
self.assertEqual(self.frame['B'].dtype, np.int64)
self.frame['B'] = np.arange(len(self.frame))
self.assertTrue(issubclass(self.frame['B'].dtype.type, np.integer))
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
self.assertEqual(self.frame['foo'].dtype, np.int64)
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
self.assertEqual(self.frame['foo'].dtype, np.float64)
self.frame['something'] = 0
self.assertEqual(self.frame['something'].dtype, np.int64)
self.frame['something'] = 2
self.assertEqual(self.frame['something'].dtype, np.int64)
self.frame['something'] = 2.5
self.assertEqual(self.frame['something'].dtype, np.float64)
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10,'event'] = 'foo'
result = df.get_dtype_counts().sort_values()
expected = Series({'float64' : 3, 'object' : 1 }).sort_values()
assert_series_equal(result, expected)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.ix[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_setitem_corner(self):
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
self.assertIn('B', df)
self.assertEqual(len(df.columns), 2)
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.object_)
dm['C'] = 1
self.assertEqual(dm['C'].dtype, np.int64)
dm['E'] = 1.
self.assertEqual(dm['E'].dtype, np.float64)
dm['A'] = 'bar'
self.assertEqual('bar', dm['A'][0])
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
self.assertEqual(dm['foo'].dtype, np.object_)
dm['coercable'] = ['1', '2', '3']
self.assertEqual(dm['coercable'].dtype, np.object_)
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.ix[ix, ['title']] = 'foobar'
df.ix[ix, ['cruft']] = 0
assert(df.ix[1, 'title'] == 'foobar')
assert(df.ix[1, 'cruft'] == 0)
def test_setitem_ambig(self):
from decimal import Decimal
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
self.assertEqual(len(dm.columns), 3)
dm[1] = coercable_series
self.assertEqual(len(dm.columns), 3)
dm[2] = uncoercable_series
self.assertEqual(len(dm.columns), 3)
self.assertEqual(dm[2].dtype, np.object_)
def test_setitem_clear_caches(self):
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
foo = df['z']
df.ix[2:, 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z')
self.assertIsNot(df['z'], foo)
assert_series_equal(df['z'], expected)
def test_setitem_None(self):
self.frame[None] = self.frame['A']
assert_series_equal(self.frame.iloc[:,-1], self.frame['A'], check_names=False)
assert_series_equal(self.frame.loc[:,None], self.frame['A'], check_names=False)
assert_series_equal(self.frame[None], self.frame['A'], check_names=False)
repr(self.frame)
def test_setitem_empty(self):
df = pd.DataFrame({'a': ['1', '2', '3'],
'b': ['11', '22', '33'],
'c': ['111', '222', '333']})
result = df.copy()
result.loc[result.b.isnull(), 'a'] = result.a
assert_frame_equal(result, df)
def test_setitem_empty_frame_with_boolean(self):
for dtype in ('float', 'int64'):
for df in [
pd.DataFrame(dtype=dtype),
pd.DataFrame(dtype=dtype, index=[1]),
pd.DataFrame(dtype=dtype, columns=['A']),
]:
df2 = df.copy()
df[df > df2] = 47
assert_frame_equal(df, df2)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
self.assertEqual(len(f.columns), 3)
self.assertRaises(KeyError, f.__delitem__, 'D')
del f['B']
self.assertEqual(len(f.columns), 2)
def test_getitem_fancy_2d(self):
f = self.frame
ix = f.ix
assert_frame_equal(ix[:, ['B', 'A']], f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
assert_frame_equal(ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
assert_frame_equal(ix[5:10], f[5:10])
assert_frame_equal(ix[5:10, :], f[5:10])
assert_frame_equal(ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5], columns=['A', 'B']))
expected = ix[5:11]
result = ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
assert_frame_equal(ix[:, :2], f.reindex(columns=['A', 'B']))
exp = f.copy()
ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
self.assertRaises(ValueError, ix.__getitem__, f > 0.5)
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.ix[52195.1:52196.5]
self.assertEqual(len(s1), 2)
s1 = df.ix[52195.1:52196.6]
self.assertEqual(len(s1), 2)
s1 = df.ix[52195.1:52198.9]
self.assertEqual(len(s1), 3)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
result = df.ix[:8:2]
df.ix[:8:2] = np.nan
self.assertTrue(isnull(df.ix[:8:2]).values.all())
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
cp = df.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).values.all())
cp = df.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = df.ix[4:10]
result2 = df.ix[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, df2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, df2.ix.__setitem__, slice(3, 11), 0)
def test_setitem_fancy_2d(self):
f = self.frame
ix = f.ix
frame = self.frame.copy()
expected = frame.copy()
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = randn(3, 2)
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
frame = self.frame.copy()
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
expected2 = self.frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.ix[:, -3:]
self.assertEqual(sliced['D'].dtype, np.float64)
sliced = self.frame.ix[:, -3:]
def f():
sliced['C'] = 4.
self.assertRaises(com.SettingWithCopyError, f)
self.assertTrue((self.frame['C'] == 4).all())
def test_fancy_setitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
self.assertRaises(KeyError, df.ix.__setitem__,
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
self.assertRaises(KeyError,
self.frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
self.assertRaises(KeyError,
self.frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
#self.assertRaises(KeyError,
# self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
self.assertTrue((result.values == 5).all())
self.mixed_frame.ix[5] = np.nan
self.assertTrue(isnull(self.mixed_frame.ix[5]).all())
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
check_names=False)
# #1432
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
self.assertTrue(df._is_mixed_type)
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(randn(10), name=0).sort_values()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.ix[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.ix[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
sub = df.ix[:'d']
assert_frame_equal(sub, df)
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
ix = f.ix
# return self if no slicing...for now
self.assertIs(ix[:, :], f)
# low dimensional slice
xs1 = ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
assert_series_equal(xs1, xs2)
ts1 = ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
assert_series_equal(ts1, ts2)
# positional xs
xs1 = ix[0]
xs2 = f.xs(f.index[0])
assert_series_equal(xs1, xs2)
xs1 = ix[f.index[5]]
xs2 = f.xs(f.index[5])
assert_series_equal(xs1, xs2)
# single column
assert_series_equal(ix[:, 'A'], f['A'])
# return view
exp = f.copy()
exp.values[5] = 4
ix[5][:] = 4
assert_frame_equal(exp, f)
exp.values[:, 1] = 6
ix[:, 1][:] = 6
assert_frame_equal(exp, f)
# slice of mixed-frame
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
vals = randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.ix
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert_almost_equal(ix[idx, col], ts[idx])
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.ix
# individual value
for j, col in enumerate(f.columns):
ts = f[col]
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.ix
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[5:10, [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, 2:]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.ix[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.ix[[1, 4, 7]]
expected = self.frame.ix[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.ix[:, [2, 0, 1]]
expected = self.frame.ix[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.ix
with assertRaisesRegexp(IndexingError, 'Too many indexers'):
ix[:, :, :]
with assertRaises(IndexingError):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.ix[mask]
expected = self.frame.ix[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.ix[mask] = 0
expected.ix[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.ix[k1, k2]
expected = df.ix[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.ix[np.array([True, False, True]),
np.array([False, True])] = 5
expected.ix[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.ix[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.ix[4:5]
expected = df.reindex([4, 5]) # reindex with int
assert_frame_equal(result, expected, check_index_type=False)
self.assertEqual(len(result), 2)
result = df.ix[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
assert_frame_equal(result, expected)
self.assertEqual(len(result), 2)
# loc_float changes this to work properly
result = df.ix[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.ix[1:2] = 0
result = df[1:2]
self.assertTrue((result==0).all().all())
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
# stacklevel=False -> needed stacklevel depends on index type
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.iloc[1.0:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 1)
# GH 4892, float indexers in iloc are deprecated
import warnings
warnings.filterwarnings(action='error', category=FutureWarning)
cp = df.copy()
def f():
cp.iloc[1.0:5] = 0
self.assertRaises(FutureWarning, f)
def f():
result = cp.iloc[1.0:5] == 0
self.assertRaises(FutureWarning, f)
self.assertTrue(result.values.all())
self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all())
warnings.filterwarnings(action='default', category=FutureWarning)
cp = df.copy()
cp.iloc[4:5] = 0
self.assertTrue((cp.iloc[4:5] == 0).values.all())
self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all())
# float slicing
result = df.ix[1.0:5]
expected = df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 5)
result = df.ix[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.ix[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 1)
result = df.ix[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 5)
cp = df.copy()
cp.ix[1.0:5.0] = 0
result = cp.ix[1.0:5.0]
self.assertTrue((result == 0).values.all())
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.ix[::2, 'str'] = nan
expected = [nan, 'qux', nan, 'qux', nan]
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
from pandas import tslib
df.ix['b', 'timestamp'] = tslib.iNaT
self.assertTrue(com.isnull(df.ix['b', 'timestamp']))
# allow this syntax
df.ix['c', 'timestamp'] = nan
self.assertTrue(com.isnull(df.ix['c', 'timestamp']))
# allow this syntax
df.ix['d', :] = nan
self.assertTrue(com.isnull(df.ix['c', :]).all() == False)
# as of GH 3216 this will now work!
# try to set with a list like item
#self.assertRaises(
# Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_frame(self):
piece = self.frame.ix[:2, ['A', 'B']]
self.frame.ix[-2:, ['A', 'B']] = piece.values
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[ 1, 2], [3, 4]], index=f.index[0:2],columns=['A', 'B'])
key = (slice(None,2), ['A', 'B'])
f.ix[key] = piece
assert_almost_equal(f.ix[0:2, ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[ 1, 2 ], [3, 4], [5, 6], [7, 8]], index=list(f.index[0:2]) + ['foo','bar'],columns=['A', 'B'])
key = (slice(None,2), ['A', 'B'])
f.ix[key] = piece
assert_almost_equal(f.ix[0:2:, ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.ix[:2, ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.ix[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.ix[:2, ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.ix[key] = piece.values
assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])
df2 = df.copy()
df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5
expected = df.reindex(columns=['A','B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.ix[:2, ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.ix[-2:, ['A', 'B']] = piece
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
def test_setitem_fancy_exceptions(self):
pass
def test_getitem_boolean_missing(self):
pass
def test_setitem_boolean_missing(self):
pass
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.ix['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.ix['bar']
expected = df.ix[[2, 4]]
assert_frame_equal(result, expected)
result = df.ix['baz']
expected = df.ix[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.ix[['bar']]
exp = df.ix[[2, 4]]
assert_frame_equal(result, exp)
result = df.ix[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.ix[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
self.assertRaises(KeyError, df.ix.__getitem__, False)
self.assertRaises(KeyError, df.ix.__getitem__, True)
self.assertRaises(KeyError, df.ix.__setitem__, False, 0)
self.assertRaises(KeyError, df.ix.__setitem__, True, 0)
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
self.assertEqual(result.columns.name, 'foo')
expected = df.ix[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
for k, v in compat.iteritems(df):
self.assertEqual(type(v), Series)
def test_lookup(self):
def alt(df, rows, cols):
result = []
for r, c in zip(rows, cols):
result.append(df.get_value(r, c))
return result
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols)
assert_almost_equal(result, expected)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'])
assert_almost_equal(df['mask'], exp_mask)
self.assertEqual(df['mask'].dtype, np.bool_)
with tm.assertRaises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with tm.assertRaises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with tm.assertRaisesRegexp(ValueError, 'same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
self.frame.set_value(idx, col, 1)
assert_almost_equal(self.frame[col][idx], 1)
def test_set_value_resize(self):
res = self.frame.set_value('foobar', 'B', 0)
self.assertIs(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 0)
self.frame.loc['foobar','qux'] = 0
self.assertEqual(self.frame.get_value('foobar', 'qux'), 0)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 'sam')
self.assertEqual(res3['baz'].dtype, np.object_)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', True)
self.assertEqual(res3['baz'].dtype, np.object_)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['baz']))
self.assertTrue(isnull(res3['baz'].drop(['foobar'])).all())
self.assertRaises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
df.set_value('C', 2, 1.0)
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
#self.assertEqual(list(df.columns), list(df_orig.columns) + [2])
df = df_orig.copy()
df.loc['C', 2] = 1.0
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
#self.assertEqual(list(df.columns), list(df_orig.columns) + [2])
# create both new
df = df_orig.copy()
df.set_value('C', 'D', 1.0)
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
self.assertRaises(KeyError, df.get_value, 0, 1)
# self.assertRaises(KeyError, df.set_value, 0, 1, 0)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
self.assertTrue(issubclass(self.frame['E'].dtype.type,
(int, np.integer)))
result = self.frame.ix[self.frame.index[5], 'E']
self.assertTrue(com.is_integer(result))
def test_irow(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
df.irow(1)
result = df.iloc[1]
exp = df.ix[2]
assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.ix[4]
assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.ix[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
def f():
result[2] = 0.
self.assertRaises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_icol(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
df.icol(1)
result = df.iloc[:, 1]
exp = df.ix[:, 2]
assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.ix[:, 4]
assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.ix[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
def f():
result[8] = 0.
self.assertRaises(com.SettingWithCopyError, f)
self.assertTrue((df[8] == 0).all())
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_irow_icol_duplicates(self):
# 10711, deprecated
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.iloc[0]
result2 = df.ix[0]
tm.assertIsInstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
tm.assertIsInstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_icol_sparse_propegate_fill_value(self):
from pandas.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
self.assertTrue(len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values))
def test_iget_value(self):
# 10711 deprecated
with tm.assert_produces_warning(FutureWarning):
self.frame.iget_value(0,0)
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iat[i,j]
expected = self.frame.at[row, col]
assert_almost_equal(result, expected)
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8,
9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ["a", "b"]
df.index = l
try:
repr(df)
except Exception as e:
self.assertNotEqual(type(e), UnboundLocalError)
def test_reindex_methods(self):
df = pd.DataFrame({'x': list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
for method, expected_values in [('nearest', [0, 1, 1, 2]),
('pad', [np.nan, 0, 1, 1]),
('backfill', [0, 1, 2, 2])]:
expected = pd.DataFrame({'x': expected_values}, index=target)
actual = df.reindex(target, method=method)
assert_frame_equal(expected, actual)
actual = df.reindex_like(df, method=method, tolerance=0)
assert_frame_equal(df, actual)
actual = df.reindex(target, method=method, tolerance=1)
assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
assert_frame_equal(e2, actual)
switched_method = ('pad' if method == 'backfill'
else 'backfill' if method == 'pad'
else method)
actual = df[::-1].reindex(target, method=switched_method)
assert_frame_equal(expected, actual)
expected = pd.DataFrame({'x': [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method='nearest', tolerance=0.2)
assert_frame_equal(expected, actual)
def test_non_monotonic_reindex_methods(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6,1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],
columns=list('A'))
self.assertRaises(ValueError, df_rev.reindex, df.index, method='pad')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='ffill')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='bfill')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='nearest')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx, check_index_type=True):
f = lambda val: np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
def verify(df, level, idx, indexer, check_index_type=True):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
df = pd.DataFrame({'jim':list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe':list('abcdeabcd')[::-1],
'jolie':[10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C']]
for idx in target:
verify_first_level(df, 'jim', idx)
for idx in [['D', 'F'], ['A', 'C', 'B']]:
verify_first_level(df, 'jim', idx, check_index_type=False)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [], check_index_type=False)
df = DataFrame({'jim':['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe':['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
'jolie': np.concatenate([np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]]),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i+1])
i = [2,3,4,0,1,8,9,5,6,7,10,11,12,13,14,18,19,15,16,17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0,1,2,3,4,10,11,12,5,6,7,8,9,15,16,17,18,19,13,14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0,1,5,6,7,10,11,12,18,19,15,16,17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0,1,2,3,4,10,11,12,8,9,15,16,17,13,14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
def test_setitem_with_sparse_value(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
df['new_column'] = sp_series
tm.assert_series_equal(df['new_column'], sp_series, check_names=False)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
.to_sparse(fill_value=0))
df['new_column'] = sp_series
exp = pd.Series([1, 0, 0], name='new_column')
tm.assert_series_equal(df['new_column'], exp)
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(int))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class SafeForSparse(object):
_multiprocess_can_split_ = True
def test_copy_index_name_checking(self):
# making a copy
for attr in ('index', 'columns'):
ind = getattr(self.frame, attr)
ind.name = None
cp = self.frame.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.frame, attr).name)
def test_getitem_pop_assign_name(self):
s = self.frame['A']
self.assertEqual(s.name, 'A')
s = self.frame.pop('A')
self.assertEqual(s.name, 'A')
s = self.frame.ix[:, 'B']
self.assertEqual(s.name, 'B')
s2 = s.ix[:]
self.assertEqual(s2.name, 'B')
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
def test_join_index(self):
# left / right
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2)
self.assertTrue(f.index.equals(joined.index))
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='left')
self.assertTrue(joined.index.equals(f.index))
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='right')
self.assertTrue(joined.index.equals(f2.index))
self.assertEqual(len(joined.columns), 4)
# inner
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='inner')
self.assertTrue(joined.index.equals(f.index.intersection(f2.index)))
self.assertEqual(len(joined.columns), 4)
# outer
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='outer')
self.assertTrue(tm.equalContents(self.frame.index, joined.index))
self.assertEqual(len(joined.columns), 4)
assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with assertRaisesRegexp(ValueError, 'columns overlap but no suffix'):
self.frame.join(self.frame, how=how)
def test_join_index_more(self):
af = self.frame.ix[:, ['A', 'B']]
bf = self.frame.ix[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = self.frame['C'][::2]
expected['D'] = self.frame['D'][::2]
result = af.join(bf)
assert_frame_equal(result, expected)
result = af.join(bf, how='right')
assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
assert_frame_equal(result, expected.ix[:, result.columns])
def test_join_index_series(self):
df = self.frame.copy()
s = df.pop(self.frame.columns[-1])
joined = df.join(s)
assert_frame_equal(joined, self.frame, check_names=False) # TODO should this check_names ?
s.name = None
assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
def test_join_overlap(self):
df1 = self.frame.ix[:, ['A', 'B', 'C']]
df2 = self.frame.ix[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')
no_overlap = self.frame.ix[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo
expected = ['foo
self.assert_numpy_array_equal(with_prefix.columns, expected)
with_suffix = self.frame.add_suffix('
expected = ['%s
self.assert_numpy_array_equal(with_suffix.columns, expected)
class TestDataFrame(tm.TestCase, CheckIndexing,
SafeForSparse):
klass = DataFrame
_multiprocess_can_split_ = True
def setUp(self):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
# force these all to int64 to avoid platform testing issues
self.intframe = DataFrame(dict([ (c,s) for c,s in compat.iteritems(_intframe) ]), dtype = np.int64)
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.mixed_float = DataFrame({ 'A': _frame['A'].copy().astype('float32'),
'B': _frame['B'].copy().astype('float32'),
'C': _frame['C'].copy().astype('float16'),
'D': _frame['D'].copy().astype('float64') })
self.mixed_float2 = DataFrame({ 'A': _frame2['A'].copy().astype('float32'),
'B': _frame2['B'].copy().astype('float32'),
'C': _frame2['C'].copy().astype('float16'),
'D': _frame2['D'].copy().astype('float64') })
self.mixed_int = DataFrame({ 'A': _intframe['A'].copy().astype('int32'),
'B': np.ones(len(_intframe['B']),dtype='uint64'),
'C': _intframe['C'].copy().astype('uint8'),
'D': _intframe['D'].copy().astype('int64') })
self.all_mixed = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'float32' : np.array([1.]*10,dtype='float32'),
'int32' : np.array([1]*10,dtype='int32'),
}, index=np.arange(10))
self.tzframe = DataFrame({'A' : date_range('20130101',periods=3),
'B' : date_range('20130101',periods=3,tz='US/Eastern'),
'C' : date_range('20130101',periods=3,tz='CET')})
self.tzframe.iloc[1,1] = pd.NaT
self.tzframe.iloc[1,2] = pd.NaT
self.ts1 = tm.makeTimeSeries()
self.ts2 = tm.makeTimeSeries()[5:]
self.ts3 = tm.makeTimeSeries()[-5:]
self.ts4 = tm.makeTimeSeries()[1:-1]
self.ts_dict = {
'col1': self.ts1,
'col2': self.ts2,
'col3': self.ts3,
'col4': self.ts4,
}
self.empty = DataFrame({})
arr = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
self.simple = DataFrame(arr, columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
def test_get_axis(self):
f = self.frame
self.assertEqual(f._get_axis_number(0), 0)
self.assertEqual(f._get_axis_number(1), 1)
self.assertEqual(f._get_axis_number('index'), 0)
self.assertEqual(f._get_axis_number('rows'), 0)
self.assertEqual(f._get_axis_number('columns'), 1)
self.assertEqual(f._get_axis_name(0), 'index')
self.assertEqual(f._get_axis_name(1), 'columns')
self.assertEqual(f._get_axis_name('index'), 'index')
self.assertEqual(f._get_axis_name('rows'), 'index')
self.assertEqual(f._get_axis_name('columns'), 'columns')
self.assertIs(f._get_axis(0), f.index)
self.assertIs(f._get_axis(1), f.columns)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)
assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')
assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, None)
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo']
self.mixed_frame.index = idx
self.assertIs(self.mixed_frame['foo'].index, idx)
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]},
index = [2010,2011,2012])
expected = df.ix[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.ix[2010]
assert_series_equal(result,expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.ix[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.name, index.name)
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.ix[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.names, index.names)
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
self.assertEqual(result.index.name, 'C')
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
self.assertIn('A', df)
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
assert_frame_equal(result, expected, check_names=False) # TODO should set_index check_names ?
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
# with Categorical
df = DataFrame({'A' : np.random.randn(10),
'B' : ci.values })
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
# from a CategoricalIndex
df = DataFrame({'A' : np.random.randn(10),
'B' : ci })
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
idf = df.set_index('B').reset_index().set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
new_df = idf.reset_index()
new_df.index = df.B
tm.assert_index_equal(new_df.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
tm.assertIsInstance(idf.index, DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
i = pd.DatetimeIndex(pd.tseries.tools.to_datetime(['2013-1-1 13:00','2013-1-2 14:00'], errors="raise")).tz_localize('US/Pacific')
df = DataFrame(np.random.randn(2,1),columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')], dtype="object"))
result = Series(i)
assert_series_equal(result, expected)
df['B'] = i
result = df['B']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'B')
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = DatetimeIndex(expected.values).copy()
comp.tz = None
self.assert_numpy_array_equal(result.values, comp.values)
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'D')
import pytz
df = DataFrame([{'ts':datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo':1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx')
df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2),
datetime(2011, 1, 3), datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.ix[:, 1:]
xp.index = df.ix[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
def test_keys(self):
getkeys = self.frame.keys
self.assertIs(getkeys(), self.frame.columns)
def test_column_contains_typeerror(self):
try:
self.frame.columns in self.frame
except TypeError:
pass
def test_constructor(self):
df = DataFrame()
self.assertEqual(len(df.index), 0)
df = DataFrame(data={})
self.assertEqual(len(df.index), 0)
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
indexed_frame = DataFrame(data, index=index)
unindexed_frame = DataFrame(data)
self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
self.assertEqual(foo['a'].dtype, object)
df = DataFrame(np.ones((4,2)))
df['foo'] = np.ones((4,2)).tolist()
self.assertRaises(ValueError, df.__setitem__, tuple(['test']), np.ones((4,2)))
df['foo2'] = np.ones((4,2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
self.assertEqual(orig_df['col1'][0], 1.)
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
self.assertEqual(df.values[0, 0], 99)
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
self.assertEqual(df.values[0, 0], 97)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
self.assertIsNone(df.ix[1, 0])
self.assertEqual(df.ix[0, 1], '2')
def test_constructor_list_frames(self):
result = DataFrame([DataFrame([])])
self.assertEqual(result.shape, (1,0))
result = DataFrame([DataFrame(dict(A = lrange(5)))])
tm.assertIsInstance(result.iloc[0,0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad = None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [ np.array(np.random.rand(10), dtype = d) for d in dtypes ]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [ np.array(np.random.randint(10, size=10), dtype = d) for d in dtypes ]
zipper = lzip(dtypes,arrays)
for d,a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update(dict([ (d,a) for d,a in zipper ]))
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes = None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
df = _make_mixed_dtypes_df('float', dict(A = 1, B = 'foo', C = 'bar'))
_check_mixed_dtypes(df)
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
self.assertEqual(a.dtype, df.a.dtype)
self.assertEqual(b.dtype, df.b.dtype)
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
index = self.frame.index
df = DataFrame(rec)
self.assert_numpy_array_equal(df.columns, rec.dtype.names)
df2 = DataFrame(rec, index=index)
self.assert_numpy_array_equal(df2.columns, rec.dtype.names)
self.assertTrue(df2.index.equals(index))
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
self.assertEqual(df.values.dtype, np.bool_)
def test_constructor_overflow_int64(self):
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
self.assertEqual(result['a'].dtype, object)
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45), (long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
self.assertEqual(df_crawls['uid'].dtype, object)
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
self.assertEqual(expected, list(df.columns))
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
tm.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)
tm.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
self.assertEqual(len(frame), len(self.ts2))
self.assertNotIn('col1', frame)
self.assertTrue(isnull(frame['col3']).all())
self.assertEqual(len(DataFrame({})), 0)
with tm.assertRaises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
frame = DataFrame({'A': {'1': 1, '2': 2}})
self.assert_numpy_array_equal(frame.index, ['1', '2'])
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
self.assertIs(frame.index, idx)
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
self.assertIs(frame.index, idx)
self.assertIs(frame.columns, idx)
self.assertEqual(len(frame._series), 3)
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assertTrue(frame.index.equals(Index([])))
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7})
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7}, columns=['a'])
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7}, columns=['b'])
def test_constructor_multi_index(self):
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi,columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi,columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
def test_constructor_error_msgs(self):
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
with assertRaisesRegexp(ValueError, msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
msg = "Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with assertRaisesRegexp(ValueError, msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=date_range('2000-01-01', periods=3))
with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
with assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(3, 1\)"):
DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1])
with assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(2, 2\)"):
DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2])
with assertRaisesRegexp(ValueError, 'If using all scalar values, you must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]})
df2 = DataFrame([df1, df1+10])
df2.dtypes
str(df2)
result = df2.loc[0,0]
assert_frame_equal(result,df1)
result = df2.loc[1,0]
assert_frame_equal(result,df1+10)
def test_insert_error_msmgs(self):
df = DataFrame({'foo':['a', 'b', 'c'], 'bar':[1,2,3], 'baz':['d','e','f']}).set_index('foo')
s = DataFrame({'foo':['a', 'b', 'c', 'a'], 'fiz':['g','h','i','j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
df = DataFrame(np.random.randint(0,2,(4,4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_frame_subclassing_and_slicing(self):
class CustomSeries(Series):
@property
def _constructor(self):
return CustomSeries
def custom_series_function(self):
return 'OK'
class CustomDataFrame(DataFrame):
def __init__(self, *args, **kw):
super(CustomDataFrame, self).__init__(*args, **kw)
@property
def _constructor(self):
return CustomDataFrame
_constructor_sliced = CustomSeries
def custom_frame_function(self):
return 'OK'
data = {'col1': range(10),
'col2': range(10)}
cdf = CustomDataFrame(data)
self.assertTrue(isinstance(cdf, CustomDataFrame))
cdf_series = cdf.col1
self.assertTrue(isinstance(cdf_series, CustomSeries))
self.assertEqual(cdf_series.custom_series_function(), 'OK')
cdf_rows = cdf[1:5]
self.assertTrue(isinstance(cdf_rows, CustomDataFrame))
self.assertEqual(cdf_rows.custom_frame_function(), 'OK')
mcol = pd.MultiIndex.from_tuples([('A', 'A'), ('A', 'B')])
cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
self.assertTrue(isinstance(cdf_multi['A'], CustomDataFrame))
mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
self.assertTrue(isinstance(cdf_multi2['A'], CustomSeries))
def test_constructor_subclass_dict(self):
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame(dict((col, dict(compat.iteritems(val)))
for col, val in compat.iteritems(data)))
assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
assert_frame_equal(refdf, df)
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = [[4., 3., 2., 1.]]
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
assert_almost_equal(df.values, expected)
def test_constructor_dict_cast(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.float64)
self.assertEqual(frame['A'].dtype, np.float64)
frame = DataFrame(test_data)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.object_)
self.assertEqual(frame['A'].dtype, np.float64)
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 20)
self.assertEqual(frame['A'].dtype, np.object_)
self.assertEqual(frame['B'].dtype, np.float64)
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
tm.assertIsInstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
tm.assertIsInstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame(dict((k, list(v)) for k, v in compat.iteritems(data)))
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_frame_equal(
result, expected, check_dtype=True, check_index_type=True,
check_column_type=True, check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return dict((i, {constructor(s): 2*i}) for i, s in enumerate(dates_as_str))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
assert_frame_equal(result_datetime64, expected)
assert_frame_equal(result_datetime, expected)
assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return dict((i, {constructor(s): 2*i}) for i, s in enumerate(td_as_int))
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
assert_frame_equal(result_timedelta64, expected)
assert_frame_equal(result_timedelta, expected)
assert_frame_equal(result_Timedelta, expected)
def test_nested_dict_frame_constructor(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
self.assertEqual(len(frame.index), 3)
self.assertEqual(len(frame.columns), 1)
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
self.assert_numpy_array_equal(frame.index, lrange(2))
self.assert_numpy_array_equal(frame.columns, lrange(3))
frame = DataFrame(mat, index=[1, 2])
self.assert_numpy_array_equal(frame.columns, lrange(3))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
self.assert_numpy_array_equal(frame.index, lrange(2))
# 0-length axis
frame = DataFrame(empty((0, 3)))
self.assertEqual(len(frame.index), 0)
frame = DataFrame(empty((3, 0)))
self.assertEqual(len(frame.columns), 0)
def test_constructor_ndarray(self):
mat = np.zeros((2, 3), dtype=float)
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
self.assertEqual(len(frame), 2)
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1.0, frame['A'][1])
self.assertEqual(2.0, frame['C'][2])
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertTrue(np.all(~np.asarray(frame == frame)))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
self.assertEqual(frame.values.dtype, np.float64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'][1])
self.assertEqual(2, frame['C'][2])
# masked np.datetime64 stays (use lib.NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(isnull(frame).values.all())
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'].view('i8')[1])
self.assertEqual(2, frame['C'].view('i8')[2])
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
self.assertEqual(frame.values.dtype, object)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(True, frame['A'][1])
self.assertEqual(False, frame['C'][2])
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = dict([ (k, v.filled()) if hasattr(v,'filled') else (k, v) for k, v in comb ])
expected = DataFrame(comb,columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result,expected)
# specify columns
expected = DataFrame(comb,columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result,expected)
# specify index
expected = DataFrame(comb,columns=names,index=[1,2])
result = DataFrame(mrecs, index=[1,2])
assert_fr_equal(result,expected)
def test_constructor_corner(self):
df = DataFrame(index=[])
self.assertEqual(df.values.shape, (0, 0))
# empty but with specified dtype
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)
self.assertEqual(df.values.dtype, np.object_)
# does not error but ends up float
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
self.assertEqual(df.values.dtype, np.object_)
# #1783 empty dtype object
df = DataFrame({}, columns=['foo', 'bar'])
self.assertEqual(df.values.dtype, np.object_)
df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
dtype=int)
self.assertEqual(df.values.dtype, np.object_)
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
self.assertEqual(df['int'].dtype, np.int64)
self.assertEqual(df['bool'].dtype, np.bool_)
self.assertEqual(df['float'].dtype, np.float64)
self.assertEqual(df['complex'].dtype, np.complex128)
self.assertEqual(df['object'].dtype, np.object_)
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
assert_frame_equal(df, exp)
with tm.assertRaisesRegexp(ValueError, 'must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
self.assertEqual(df_casted.values.dtype, np.int64)
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
self.assertEqual(dm.values.ndim, 2)
arr = randn(0)
dm = DataFrame(arr)
self.assertEqual(dm.values.ndim, 2)
self.assertEqual(dm.values.ndim, 2)
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 2))
dm = DataFrame(columns=['A', 'B'])
self.assertEqual(dm.values.shape, (0, 2))
dm = DataFrame(index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 0))
# corner, silly
# TODO: Fix this Exception to be better...
with assertRaisesRegexp(PandasError, 'constructor not properly called'):
DataFrame((1, 2, 3))
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with assertRaisesRegexp(ValueError, 'cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.float64)
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
assert_frame_equal(df, expected)
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
assert_frame_equal(df, expected)
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
l = [[1, 'a'], [2, 'b']]
df = DataFrame(data=l, columns=["num", "str"])
self.assertTrue(com.is_integer_dtype(df['num']))
self.assertEqual(df['str'].dtype, np.object_)
expected = DataFrame({ 0: range(10) })
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
import collections
class DummyContainer(collections.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(l, columns=columns)
expected = DataFrame([[1,'a'],[2,'b']],columns=columns)
assert_frame_equal(result, expected, check_dtype=False)
import array
result = DataFrame.from_items([('A', array.array('i', range(10)))])
expected = DataFrame({ 'A' : list(range(10)) })
assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ array.array('i', range(10)), array.array('i',range(10)) ])
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterator(self):
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ range(10), range(10) ])
assert_frame_equal(result, expected)
def test_constructor_generator(self):
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ gen1, gen2 ])
assert_frame_equal(result, expected)
gen = ([ i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({ 0 : range(10), 1 : 'a' })
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected)
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result.sort_index(), expected)
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with assertRaisesRegexp(ValueError, 'arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
self.assertTrue(result.index.is_monotonic)
with assertRaisesRegexp(ValueError, 'ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
assert_frame_equal(result, expected)
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
assert_frame_equal(recons, expected)
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
assert_frame_equal(rs, xp)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
self.assertTrue(df.index.equals(a.index))
arr = np.random.randn(10)
s = Series(arr,name='x')
df = DataFrame(s)
expected = DataFrame(dict(x = s))
assert_frame_equal(df,expected)
s = Series(arr,index=range(3,13))
df = DataFrame(s)
expected = DataFrame({ 0 : s })
assert_frame_equal(df,expected)
self.assertRaises(ValueError, DataFrame, s, columns=[1,2])
a = Series([], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
s1 = Series(arr,name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({ 'x' : s1, 'Unnamed 0' : arr },columns=['x','Unnamed 0'])
assert_frame_equal(df,expected)
df = DataFrame([arr, s1]).T
expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1])
assert_frame_equal(df,expected)
def test_constructor_Series_differently_indexed(self):
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
self.assertEqual(df1.columns[0], 'x')
assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
self.assertEqual(df2.columns[0], 0)
self.assertTrue(df2.index.equals(other_index))
assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
self.assert_numpy_array_equal(result.index, index)
self.assert_numpy_array_equal(result.columns, columns)
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
recons = DataFrame.from_items(items)
assert_frame_equal(recons, self.frame)
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
self.assertEqual(recons['A'].dtype, np.float64)
with tm.assertRaisesRegexp(TypeError,
"Must pass columns with orient='index'"):
DataFrame.from_items(row_items, orient='index')
arr = lib.list_to_object_array(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
tm.assertIsInstance(recons['foo'][0], tuple)
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index', columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
assert_frame_equal(rs, xp)
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])
with tm.assertRaisesRegexp(ValueError, 'does not match index length'):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
assert_frame_equal(df, expected)
def test_constructor_iterator_failure(self):
with assertRaisesRegexp(TypeError, 'iterator'):
df = DataFrame(iter([1, 2, 3]))
def test_constructor_column_duplicates(self):
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
assert_frame_equal(df, edf)
idf = DataFrame.from_items(
[('a', [8]), ('a', [5])], columns=['a', 'a'])
assert_frame_equal(idf, edf)
self.assertRaises(ValueError, DataFrame.from_items,
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
def test_constructor_empty_with_string_dtype(self):
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
assert_frame_equal(df, expected)
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result,expected)
result.dtypes
str(result)
arr = np.random.randn(3, 2)
idx = lrange(2)
df = DataFrame(arr, columns=['A', 'A'])
df.columns = idx
expected = DataFrame(arr,columns=idx)
check(df,expected)
idx = date_range('20130101',periods=4,freq='Q-NOV')
df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])
df.columns = idx
expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)
check(df,expected)
df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])
df['string'] = 'bah'
expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])
check(df,expected)
with assertRaisesRegexp(ValueError, 'Length of value'):
df.insert(0, 'AnotherColumn', range(len(df.index) - 1))
df['foo2'] = 3
expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])
check(df,expected)
df['foo2'] = 4
expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])
check(df,expected)
df['foo2'] = 3
del df['bar']
expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])
check(df,expected)
del df['hello']
expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
check(df,expected)
df = df.consolidate()
expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
check(df,expected)
df.insert(2,'new_col',5.)
expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])
check(df,expected)
assertRaisesRegexp(ValueError, 'cannot insert', df.insert, 2, 'new_col', 4.)
df.insert(2,'new_col',4.,allow_duplicates=True)
expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])
check(df,expected)
del df['foo']
expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])
assert_frame_equal(df,expected)
df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])
check(df)
df['foo2'] = 7.
expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])
check(df,expected)
result = df['foo']
expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])
check(result,expected)
df['foo'] = 'string'
expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])
check(df,expected)
del df['foo']
expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])
check(df,expected)
df = DataFrame([[1,2.5],[3,4.5]], index=[1,2], columns=['x','x'])
result = df.values
expected = np.array([[1,2.5],[3,4.5]])
self.assertTrue((result == expected).all().all())
df4 = DataFrame({'TClose': [22.02],
'RT': [0.0454],
'TExg': [0.0422]},
index=MultiIndex.from_tuples([(600809, 20130331)], names=['STK_ID', 'RPT_Date']))
df5 = DataFrame({'STK_ID': [600809] * 3,
'RPT_Date': [20120930,20121231,20130331],
'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],
'TClose': [38.05, 41.66, 30.01]},
index=MultiIndex.from_tuples([(600809, 20120930), (600809, 20121231),(600809,20130331)], names=['STK_ID', 'RPT_Date']))
k = pd.merge(df4,df5,how='inner',left_index=True,right_index=True)
result = k.rename(columns={'TClose_x':'TClose', 'TClose_y':'QT_Close'})
str(result)
result.dtypes
expected = DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809, u('饡驦'), 30.01 ]],
columns=['RT','TClose','TExg','RPT_Date','STK_ID','STK_Name','QT_Close']).set_index(['STK_ID','RPT_Date'],drop=False)
assert_frame_equal(result,expected)
df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
self.assertRaises(ValueError, df.reindex, columns=['bar'])
self.assertRaises(ValueError, df.reindex, columns=['bar','foo'])
df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
result = df.drop(['a'],axis=1)
expected = DataFrame([[1],[1],[1]],columns=['bar'])
check(result,expected)
result = df.drop('a',axis=1)
check(result,expected)
df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64')
result = df.describe()
s = df.iloc[:,0].describe()
expected = pd.concat([ s, s, s],keys=df.columns,axis=1)
check(result,expected)
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'A'])
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame.from_items([('A', expected_ser),
('B', this_df['B']),
('A', expected_ser)])
this_df['A'] = index
check(this_df, expected_df)
# operations
for op in ['__add__','__mul__','__sub__','__truediv__']:
df = DataFrame(dict(A = np.arange(10), B = np.random.rand(10)))
expected = getattr(df,op)(df)
expected.columns = ['A','A']
df.columns = ['A','A']
result = getattr(df,op)(df)
check(result,expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5,2), columns=['that', 'that'])
expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])
df['that'] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5,2), columns=['that', 'that'])
expected = DataFrame(1, index=range(5), columns=['that', 'that'])
df['that'] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame({'A' : np.random.randn(5),
'B' : np.random.randn(5),
'C' : np.random.randn(5),
'D' : ['a','b','c','d','e'] })
expected = df.take([0,1,1], axis=1)
df2 = df.take([2,0,1,2,1], axis=1)
result = df2.drop('C',axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame({'A' : np.random.randn(5),
'B' : np.random.randn(5),
'C' : np.random.randn(5),
'D' : ['a','b','c','d','e'] })
df.iloc[2,[0,1,2]] = np.nan
df.iloc[0,0] = np.nan
df.iloc[1,1] = np.nan
df.iloc[:,3] = np.nan
expected = df.dropna(subset=['A','B','C'],how='all')
expected.columns = ['A','A','B','C']
df.columns = ['A','A','B','C']
result = df.dropna(subset=['A','C'],how='all')
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result,expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ['A', 'A', 'C', 'D']
df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
result = df[df.C > 6]
check(result,expected)
# where
df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
result = df[df > 6]
check(result,expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
self.assertRaises(ValueError, lambda : df[df.A > 6])
# dup aligining operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0,2,0,2,2],index=[1,1,2,2,3])
result = df1.sub(df2)
assert_frame_equal(result,expected)
# equality
df1 = DataFrame([[1,2],[2,np.nan],[3,4],[4,4]],columns=['A','B'])
df2 = DataFrame([[0,1],[2,4],[2,np.nan],[4,5]],columns=['A','A'])
# not-comparing like-labelled
self.assertRaises(ValueError, lambda : df1 == df2)
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A'])
assert_frame_equal(result,expected)
# mixed column selection
# GH 5639
dfbool = DataFrame({'one' : Series([True, True, False], index=['a', 'b', 'c']),
'two' : Series([False, False, True, False], index=['a', 'b', 'c', 'd']),
'three': Series([False, True, True, True], index=['a', 'b', 'c', 'd'])})
expected = pd.concat([dfbool['one'],dfbool['three'],dfbool['one']],axis=1)
result = dfbool[['one', 'three', 'one']]
check(result,expected)
# multi-axis dups
# GH 6121
df = DataFrame(np.arange(25.).reshape(5,5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']].copy()
expected = z.ix[['a', 'c', 'a']]
df = DataFrame(np.arange(25.).reshape(5,5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']]
result = z.ix[['a', 'c', 'a']]
check(result,expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame({'A' : np.arange(5,dtype='int64'),
'B' : np.arange(1,6,dtype='int64')},
index=[2,2,3,3,4])
result = df.B-df.A
expected = Series(1,index=[2,2,3,3,4])
assert_series_equal(result,expected)
df = DataFrame({'A' : date_range('20130101',periods=5), 'B' : date_range('20130101 09:00:00', periods=5)},index=[2,2,3,3,4])
result = df.B-df.A
expected = Series(Timedelta('9 hours'),index=[2,2,3,3,4])
assert_series_equal(result,expected)
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=lrange(N))
assert_frame_equal(df,expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('float64'), df.index,
df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'), df.index,
df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
assert_frame_equal(df, DataFrame(np.array([['a', 'a'],
['a', 'a']],
dtype=object),
index=[1, 2],
columns=['a', 'c']))
self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])
self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])
with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A' : 1, 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime(2001,1,2,0,0) },
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname : 2})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array(1.,dtype=floatname),
intname : np.array(1,dtype=intname)}, index=np.arange(10))
result = df.get_dtype_counts()
expected = { objectname : 1 }
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result.sort_index()
expected = Series(expected)
expected.sort_index()
assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname),
intname : np.array([1]*10,dtype=intname)}, index=np.arange(10))
result = df.get_dtype_counts()
result.sort_index()
assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
self.assertEqual(datetime_s.dtype, 'M8[ns]')
df = DataFrame({'datetime_s':datetime_s})
result = df.get_dtype_counts()
expected = Series({ datetime64name : 1 })
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates':dates})
result = df.get_dtype_counts()
expected = Series({ datetime64name : 1, objectname : 1 })
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
self.assertEqual(df.iat[0,0],dt)
assert_series_equal(df.dtypes,Series({'End Date' : 'datetime64[ns, US/Eastern]' }))
df = DataFrame([{'End Date': dt}])
self.assertEqual(df.iat[0,0],dt)
assert_series_equal(df.dtypes,Series({'End Date' : 'datetime64[ns, US/Eastern]' }))
# GH 8411
dr = date_range('20130101',periods=3)
df = DataFrame({ 'value' : dr})
self.assertTrue(df.iat[0,0].tz is None)
dr = date_range('20130101',periods=3,tz='UTC')
df = DataFrame({ 'value' : dr})
self.assertTrue(str(df.iat[0,0].tz) == 'UTC')
dr = date_range('20130101',periods=3,tz='US/Eastern')
df = DataFrame({ 'value' : dr})
self.assertTrue(str(df.iat[0,0].tz) == 'US/Eastern')
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz = 'US/Eastern')
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True) })
df = DataFrame()
df['a'] = i
assert_frame_equal(df, expected)
df = DataFrame( {'a' : i } )
assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame( {'a' : i, 'b' : i_no_tz } )
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True), 'b': i_no_tz })
assert_frame_equal(df, expected)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101',periods=3,tz='US/Eastern'),
name='foo')
dr = date_range('20130110',periods=3)
# construction
df = DataFrame({'A' : idx, 'B' : dr})
self.assertTrue(df['A'].dtype,'M8[ns, US/Eastern')
self.assertTrue(df['A'].name == 'A')
assert_series_equal(df['A'],Series(idx,name='A'))
assert_series_equal(df['B'],Series(dr,name='B'))
# construction from dict
df2 = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5))
assert_series_equal(df2.dtypes, Series(['datetime64[ns, US/Eastern]', 'datetime64[ns, CET]'], index=['A','B']))
# dtypes
tzframe = DataFrame({'A' : date_range('20130101',periods=3),
'B' : date_range('20130101',periods=3,tz='US/Eastern'),
'C' : date_range('20130101',periods=3,tz='CET')})
tzframe.iloc[1,1] = pd.NaT
tzframe.iloc[1,2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series([ np.dtype('datetime64[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]'),
DatetimeTZDtype('datetime64[ns, CET]') ],
['A','B','C'])
# concat
df3 = pd.concat([df2.A.to_frame(),df2.B.to_frame()],axis=1)
assert_frame_equal(df2, df3)
# select_dtypes
result = df3.select_dtypes(include=['datetime64[ns]'])
expected = df3.reindex(columns=[])
assert_frame_equal(result, expected)
# this will select based on issubclass, and these are the same class
result = df3.select_dtypes(include=['datetime64[ns, CET]'])
expected = df3
assert_frame_equal(result, expected)
# from index
idx2 = date_range('20130101',periods=3,tz='US/Eastern',name='foo')
df2 = DataFrame(idx2)
assert_series_equal(df2['foo'],Series(idx2,name='foo'))
df2 = DataFrame(Series(idx2))
assert_series_equal(df2['foo'],Series(idx2,name='foo'))
idx2 = date_range('20130101',periods=3,tz='US/Eastern')
df2 = DataFrame(idx2)
assert_series_equal(df2[0],Series(idx2,name=0))
df2 = DataFrame(Series(idx2))
assert_series_equal(df2[0],Series(idx2,name=0))
# interleave with object
result = self.tzframe.assign(D = 'foo').values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')],
['foo','foo','foo']], dtype=object).T
self.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = self.tzframe.values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')]], dtype=object).T
self.assert_numpy_array_equal(result, expected)
# astype
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')]], dtype=object).T
result = self.tzframe.astype(object)
assert_frame_equal(result, DataFrame(expected, index=self.tzframe.index, columns=self.tzframe.columns))
result = self.tzframe.astype('datetime64[ns]')
expected = DataFrame({'A' : date_range('20130101',periods=3),
'B' : date_range('20130101',periods=3,tz='US/Eastern').tz_convert('UTC').tz_localize(None),
'C' : date_range('20130101',periods=3,tz='CET').tz_convert('UTC').tz_localize(None)})
expected.iloc[1,1] = pd.NaT
expected.iloc[1,2] = pd.NaT
assert_frame_equal(result, expected)
# str formatting
result = self.tzframe.astype(str)
expected = np.array([['2013-01-01', '2013-01-01 00:00:00-05:00',
'2013-01-01 00:00:00+01:00'],
['2013-01-02', 'NaT', 'NaT'],
['2013-01-03', '2013-01-03 00:00:00-05:00',
'2013-01-03 00:00:00+01:00']], dtype=object)
self.assert_numpy_array_equal(result, expected)
result = str(self.tzframe)
self.assertTrue('0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00' in result)
self.assertTrue('1 2013-01-02 NaT NaT' in result)
self.assertTrue('2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00' in result)
# setitem
df['C'] = idx
assert_series_equal(df['C'],Series(idx,name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'],Series(idx,name='D'))
del df['D']
# assert that A & C are not sharing the same base (e.g. they
# are copies)
b1 = df._data.blocks[1]
b2 = df._data.blocks[2]
self.assertTrue(b1.values.equals(b2.values))
self.assertFalse(id(b1.values.values.base) == id(b2.values.values.base))
# with nan
df2 = df.copy()
df2.iloc[1,1] = pd.NaT
df2.iloc[1,2] = pd.NaT
result = df2['B']
assert_series_equal(notnull(result), Series([True,False,True],name='B'))
assert_series_equal(df2.dtypes, df.dtypes)
# set/reset
df = DataFrame({'A' : [0,1,2] }, index=idx)
result = df.reset_index()
self.assertTrue(result['foo'].dtype,'M8[ns, US/Eastern')
result = result.set_index('foo')
tm.assert_index_equal(df.index,idx)
def test_constructor_for_list_with_dtypes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64' : 5})
df = DataFrame([np.array(np.arange(5),dtype='int32') for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32' : 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a' : [2**31,2**31+1]})
result = df.get_dtype_counts()
expected = Series({'int64' : 1 })
assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1 })
assert_series_equal(result, expected)
df = DataFrame([1.,2.])
result = df.get_dtype_counts()
expected = Series({'float64' : 1 })
assert_series_equal(result, expected)
df = DataFrame({'a' : [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64' : 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64' : 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : 1 }, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : 1. }, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1 })
assert_series_equal(result, expected)
# with object list
df = DataFrame({'a':[1,2,4,7], 'b':[1.2, 2.3, 5.1, 6.3],
'c':list('abcd'), 'd':[datetime(2000,1,1) for i in range(4)],
'e' : [1.,2,4.,7]})
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
def test_not_hashable(self):
df = pd.DataFrame([1])
self.assertRaises(TypeError, hash, df)
self.assertRaises(TypeError, hash, self.empty)
def test_timedeltas(self):
df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),
B = Series([ timedelta(days=i) for i in range(3) ])))
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 }).sort_values()
assert_series_equal(result, expected)
df['C'] = df['A'] + df['B']
expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 }).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
# mixed int types
df['D'] = 1
expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 }).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'),
B = date_range('2012-1-2', periods=3, freq='D'),
C = Timestamp('20120101')-timedelta(minutes=5,seconds=5)))
diffs = DataFrame(dict(A = df['A']-df['C'],
B = df['A']-df['B']))
# min
result = diffs.min()
self.assertEqual(result[0], diffs.ix[0,'A'])
self.assertEqual(result[1], diffs.ix[0,'B'])
result = diffs.min(axis=1)
self.assertTrue((result == diffs.ix[0,'B']).all() == True)
# max
result = diffs.max()
self.assertEqual(result[0], diffs.ix[2,'A'])
self.assertEqual(result[1], diffs.ix[2,'B'])
result = diffs.max(axis=1)
self.assertTrue((result == diffs['A']).all() == True)
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A = df['A']-df['C'],
B = df['B']-df['A']))
assert_frame_equal(result,expected)
assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
result = mixed.min()
expected = Series([_coerce_scalar_to_timedelta_type(timedelta(seconds=5*60+5)),
_coerce_scalar_to_timedelta_type(timedelta(days=-1)),
'foo',
1,
1.0,
Timestamp('20130101')],
index=mixed.columns)
assert_series_equal(result,expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.],index=[0, 1, 2])
assert_series_equal(result,expected)
# works when only those columns are selected
result = mixed[['A','B']].min(1)
expected = Series([ timedelta(days=-1) ] * 3)
assert_series_equal(result,expected)
result = mixed[['A','B']].min()
expected = Series([ timedelta(seconds=5*60+5), timedelta(days=-1) ],index=['A','B'])
assert_series_equal(result,expected)
# GH 3106
df = DataFrame({'time' : date_range('20130102',periods=5),
'time2' : date_range('20130105',periods=5) })
df['off1'] = df['time2']-df['time']
self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')
df['off2'] = df['time']-df['time2']
df._consolidate_inplace()
self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
def test_datetimelike_setitem_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101',periods=4))
df['A'] = np.array([1*one_hour]*4, dtype='m8[ns]')
df.loc[:,'B'] = np.array([2*one_hour]*4, dtype='m8[ns]')
df.loc[:3,'C'] = np.array([3*one_hour]*3, dtype='m8[ns]')
df.ix[:,'D'] = np.array([4*one_hour]*4, dtype='m8[ns]')
df.ix[:3,'E'] = np.array([5*one_hour]*3, dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.ix[:-1,'F'] = np.array([6*one_hour]*3, dtype='m8[ns]')
df.ix[-3:,'G'] = date_range('20130101',periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))
assert_series_equal(result,expected)
def test_setitem_datetime_coercion(self):
# GH 1048
df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')]*3})
df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[0, 'c'])
self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[1, 'c'])
df.loc[2, 'c'] = date(2005, 5, 5)
self.assertEqual(pd.Timestamp('2005-05-05'), df.loc[2, 'c'])
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
df1.index.name = 'foo'
self.assertIsNone(df2.index.name)
def test_astype(self):
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
casted = self.frame.astype(np.int32)
expected = DataFrame(self.frame.values.astype(np.int32),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
self.frame['foo'] = '5'
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
# mixed casting
def _check_cast(df, v):
self.assertEqual(list(set([ s.dtype.name for _, s in compat.iteritems(df) ]))[0], v)
mn = self.all_mixed._get_numeric_data().copy()
mn['little_float'] = np.array(12345.,dtype='float16')
mn['big_float'] = np.array(123456789101112.,dtype='float64')
casted = mn.astype('float64')
_check_cast(casted, 'float64')
casted = mn.astype('int64')
_check_cast(casted, 'int64')
casted = self.mixed_float.reindex(columns = ['A','B']).astype('float32')
_check_cast(casted, 'float32')
casted = mn.reindex(columns = ['little_float']).astype('float16')
_check_cast(casted, 'float16')
casted = self.mixed_float.reindex(columns = ['A','B']).astype('float16')
_check_cast(casted, 'float16')
casted = mn.astype('float32')
_check_cast(casted, 'float32')
casted = mn.astype('int32')
_check_cast(casted, 'int32')
# to object
casted = mn.astype('O')
_check_cast(casted, 'object')
def test_astype_with_exclude_string(self):
df = self.frame.copy()
expected = self.frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, raise_on_error = False)
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
df = self.frame.copy()
expected = self.frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, raise_on_error = False)
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
def test_astype_with_view(self):
tf = self.mixed_float.reindex(columns = ['A','B','C'])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32)
# this is the only real reason to do it this way
tf = np.round(self.frame).astype(np.int32)
casted = tf.astype(np.float32, copy = False)
tf = self.frame.astype(np.float64)
casted = tf.astype(np.int64, copy = False)
def test_astype_cast_nan_int(self):
df = DataFrame(data={"Values": [1.0, 2.0, 3.0, np.nan]})
self.assertRaises(ValueError, df.astype, np.int64)
def test_astype_str(self):
# GH9757
a = Series(date_range('2010-01-04', periods=5))
b = Series(date_range('3/6/2012 00:00', periods=5, tz='US/Eastern'))
c = Series([Timedelta(x, unit='d') for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({'a' : a, 'b' : b, 'c' : c, 'd' : d, 'e' : e})
# datetimelike
# Test str and unicode on python 2.x and just str on python 3.x
for tt in set([str, compat.text_type]):
result = df.astype(tt)
expected = DataFrame({
'a' : list(map(tt, map(lambda x: Timestamp(x)._date_repr, a._values))),
'b' : list(map(tt, map(Timestamp, b._values))),
'c' : list(map(tt, map(lambda x: Timedelta(x)._repr_base(format='all'), c._values))),
'd' : list(map(tt, d._values)),
'e' : list(map(tt, e._values)),
})
assert_frame_equal(result, expected)
# float/nan
# 11302
# consistency in astype(str)
for tt in set([str, compat.text_type]):
result = DataFrame([np.NaN]).astype(tt)
expected = DataFrame(['nan'])
assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(tt)
expected = DataFrame(['1.12345678901'])
assert_frame_equal(result, expected)
def test_array_interface(self):
result = np.sqrt(self.frame)
tm.assertIsInstance(result, type(self.frame))
self.assertIs(result.index, self.frame.index)
self.assertIs(result.columns, self.frame.columns)
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_pickle(self):
unpickled = self.round_trip_pickle(self.mixed_frame)
assert_frame_equal(self.mixed_frame, unpickled)
# buglet
self.mixed_frame._data.ndim
# empty
unpickled = self.round_trip_pickle(self.empty)
repr(unpickled)
# tz frame
unpickled = self.round_trip_pickle(self.tzframe)
assert_frame_equal(self.tzframe, unpickled)
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}
tm.assert_almost_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': nan, 'B': '3'}]
tm.assert_almost_equal(recons_data, expected_records)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k2][k])
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
tm.assert_almost_equal(test_data.to_dict(
orient='records'), expected_records)
tm.assert_almost_equal(test_data_mixed.to_dict(
orient='records'), expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp]),
'B': Series([tsmp, tsmp]),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp]),
'B': Series([1, 2]),
}
tm.assert_almost_equal(test_data.to_dict(
orient='series'), expected_series)
tm.assert_almost_equal(test_data_mixed.to_dict(
orient='series'), expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_almost_equal(test_data.to_dict(
orient='split'), expected_split)
tm.assert_almost_equal(test_data_mixed.to_dict(
orient='split'), expected_split_mixed)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A':[0, 1]})
self.assertRaises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
self.assertEqual(df.to_records()['index'][0], df.index[0])
rs = df.to_records(convert_datetime64=False)
self.assertEqual(rs['index'][0], df.index.values[0])
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
self.assertTrue('bar' in r)
self.assertTrue('one' not in r)
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <user@example.com>\n'
'To: <someone_else@example.com>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all( x in frame for x in ['Type','Subject','From'])
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
frame = DataFrame.from_records(arr)
index = np.arange(len(arr))[::-1]
indexed_frame = DataFrame.from_records(arr, index=index)
self.assert_numpy_array_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2,3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
self.assertEqual(len(records.dtype.names), 3)
records = indexed_frame.to_records(index=False)
self.assertEqual(len(records.dtype.names), 2)
self.assertNotIn('index', records.dtype.names)
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
self.assertTrue(np.isnan(df['c'][0]))
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6), (7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32), ('y', np.int64), ('z', np.int32) ])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
assert_frame_equal(df, xp.reindex(columns=['x','y']), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i/length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))]
data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i/length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(10)] for j in range(len(columns_names))]
data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a')
self.assertEqual(columns, original_columns)
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
self.assertEqual(df['a'].dtype, object)
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
self.assertEqual(df['a'].dtype, np.float64)
self.assertTrue(np.isnan(df['a'].values[-1]))
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
self.assertEqual(result.index.name, 'order_id')
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
self.assertEqual(result.index.names, ('order_id', 'quantity'))
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'object' : 1 })
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a','b','c'])
expected = DataFrame(columns=['a','b','c'])
assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a','b','b'])
expected = DataFrame(columns=['a','b','b'])
assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
assert_numpy_array_equal(df.index, Index([1], name='id'))
self.assertEqual(df.index.name, 'id')
assert_numpy_array_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
assert_numpy_array_equal(df.index, Index([], name='id'))
self.assertEqual(df.index.name, 'id')
def test_from_records_with_datetimes(self):
if sys.version < LooseVersion('2.7'):
raise nose.SkipTest('rec arrays dont work properly with py2.6')
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_little_endian():
raise nose.SkipTest("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({ 'EXPIRY' : [datetime(2005, 3, 1, 0, 0), None ]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
raise nose.SkipTest("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
assert_frame_equal(result,expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
assert_frame_equal(result,expected)
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_recods_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
self.assertIn('X', rs.dtype.fields)
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
self.assertIn('index', rs.dtype.fields)
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
self.assertIn('level_0', rs.dtype.fields)
def test_join_str_datetime(self):
str_dates = ['20120209', '20120222']
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
A = DataFrame(str_dates, index=lrange(2), columns=['aa'])
C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
tst = A.join(C, on='aa')
self.assertEqual(len(tst.columns), 3)
def test_join_multiindex_leftright(self):
# GH 10741
df1 = pd.DataFrame([['a', 'x', 0.471780], ['a','y', 0.774908],
['a', 'z', 0.563634], ['b', 'x', -0.353756],
['b', 'y', 0.368062], ['b', 'z', -1.721840],
['c', 'x', 1], ['c', 'y', 2], ['c', 'z', 3]],
columns=['first', 'second', 'value1']).set_index(['first', 'second'])
df2 = pd.DataFrame([['a', 10], ['b', 20]], columns=['first', 'value2']).set_index(['first'])
exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],
[-0.353756, 20], [0.368062, 20], [-1.721840, 20],
[1.000000, np.nan], [2.000000, np.nan], [3.000000, np.nan]],
index=df1.index, columns=['value1', 'value2'])
# these must be the same results (but columns are flipped)
tm.assert_frame_equal(df1.join(df2, how='left'), exp)
tm.assert_frame_equal(df2.join(df1, how='right'), exp[['value2', 'value1']])
exp_idx = pd.MultiIndex.from_product([['a', 'b'], ['x', 'y', 'z']],
names=['first', 'second'])
exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10],
[-0.353756, 20], [0.368062, 20], [-1.721840, 20]],
index=exp_idx, columns=['value1', 'value2'])
tm.assert_frame_equal(df1.join(df2, how='right'), exp)
tm.assert_frame_equal(df2.join(df1, how='left'), exp[['value2', 'value1']])
def test_from_records_sequencelike(self):
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
'B' : np.array(np.arange(6), dtype = np.int64),
'C' : ['foo'] * 6,
'D' : np.array([True, False] * 3, dtype=bool),
'E' : np.array(np.random.randn(6), dtype = np.float32),
'E1': np.array(np.random.randn(6), dtype = np.float32),
'F' : np.array(np.arange(6), dtype = np.int32) })
# this is actually tricky to create the recordlike arrays and have the dtypes be intact
blocks = df.blocks
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([ (c,np.dtype(dtype).descr[0][1]) for c in b.columns ])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(columns=df.columns)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(columns=df.columns)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(columns=df.columns)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(columns=df.columns)
assert_frame_equal(result, df, check_dtype=False)
assert_frame_equal(result2, df)
assert_frame_equal(result3, df)
assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
self.assert_numpy_array_equal(result.columns, lrange(8))
# test exclude parameter & we are casting the results here (as we don't have dtype info to recover)
columns_to_test = [ columns.index('C'), columns.index('E1') ]
exclude = list(set(range(8))-set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [ columns[i] for i in sorted(columns_to_test) ]
assert_series_equal(result['C'], df['C'])
assert_series_equal(result['E1'], df['E1'].astype('float64'))
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 0)
self.assert_numpy_array_equal(result.columns, ['foo', 'bar', 'baz'])
result = DataFrame.from_records([])
self.assertEqual(len(result), 0)
self.assertEqual(len(result.columns), 0)
def test_from_records_dictlike(self):
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
'B' : np.array(np.arange(6), dtype = np.int64),
'C' : ['foo'] * 6,
'D' : np.array([True, False] * 3, dtype=bool),
'E' : np.array(np.random.randn(6), dtype = np.float32),
'E1': np.array(np.random.randn(6), dtype = np.float32),
'F' : np.array(np.arange(6), dtype = np.int32) })
columns = []
for dtype, b in compat.iteritems(df.blocks):
columns.extend(b.columns)
asdict = dict((x, y) for x, y in compat.iteritems(df))
asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns))
for r in results:
assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
assert(df1.index.equals(Index(data)))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
df1 = DataFrame.from_records(df, index=['C'])
assert(df1.index.equals(Index(df.C)))
df1 = DataFrame.from_records(df, index='C')
assert(df1.index.equals(Index(df.C)))
self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])
self.assertRaises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
self.assertTrue(np.array_equal(result.columns, ['bar']))
self.assertEqual(len(result), 0)
self.assertEqual(result.index.name, 'foo')
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assertIs(cols, self.frame.columns)
idx = self.frame._get_agg_axis(1)
self.assertIs(idx, self.frame.index)
self.assertRaises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertTrue(self.empty.empty)
self.assertFalse(self.frame.empty)
self.assertFalse(self.mixed_frame.empty)
df = DataFrame({'A': [1., 2., 3.],
'B': ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assertFalse(df.empty)
def test_repr_empty(self):
buf = StringIO()
foo = repr(self.empty)
frame = DataFrame(index=np.arange(1000))
foo = repr(frame)
def test_repr_mixed(self):
buf = StringIO()
foo = repr(self.mixed_frame)
self.mixed_frame.info(verbose=False, buf=buf)
@slow
def test_repr_mixed_big(self):
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
foo = repr(biggie)
def test_repr(self):
buf = StringIO()
foo = repr(self.frame)
self.frame.info(verbose=False, buf=buf)
self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)
self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)
no_index = DataFrame(columns=[0, 1, 3])
foo = repr(no_index)
self.empty.info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
self.assertFalse("\t" in repr(df))
self.assertFalse("\r" in repr(df))
self.assertFalse("a\n" in repr(df))
def test_repr_dimensions(self):
df = DataFrame([[1, 2,], [3, 4]])
with option_context('display.show_dimensions', True):
self.assertTrue("2 rows x 2 columns" in repr(df))
with option_context('display.show_dimensions', False):
self.assertFalse("2 rows x 2 columns" in repr(df))
with option_context('display.show_dimensions', 'truncate'):
self.assertFalse("2 rows x 2 columns" in repr(df))
@slow
def test_repr_big(self):
buf = StringIO()
biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),
index=lrange(200))
foo = repr(biggie)
def test_repr_unsortable(self):
import warnings
warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
unsortable = DataFrame({'foo': [1] * 50,
datetime.today(): [1] * 50,
'bar': ['bar'] * 50,
datetime.today(
) + timedelta(1): ['bar'] * 50},
index=np.arange(50))
foo = repr(unsortable)
fmt.set_option('display.precision', 3, 'display.column_space', 10)
repr(self.frame)
fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)
repr(self.frame)
fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)
repr(self.frame)
self.reset_display_options()
warnings.filters = warn_filters
def test_repr_unicode(self):
uval = u('\u03c3\u03c3\u03c3\u03c3')
bval = uval.encode('utf-8')
df = DataFrame({'A': [uval, uval]})
result = repr(df)
ex_top = ' A'
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
df = DataFrame({'A': [uval, uval]})
result = repr(df)
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
def test_unicode_string_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
bytes(df)
else:
str(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20),
columns=tm.rands_array(10, 20))
repr(df)
def test_repr_column_name_unicode_truncation_bug(self):
df = DataFrame({'Id': [7117434],
'StringCol': ('Is it possible to modify drop plot code'
' so that the output graph is displayed '
'in iphone simulator, Is it possible to '
'modify drop plot code so that the '
'output graph is \xe2\x80\xa8displayed '
'in iphone simulator.Now we are adding '
'the CSV file externally. I want to Call'
' the File through the code..')})
result = repr(df)
self.assertIn('StringCol', result)
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame)
assert_frame_equal(self.frame.tail(0), self.frame)
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df)
assert_frame_equal(df.tail(0), df)
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'b', 'a'])
assert_almost_equal(df['a'], df['foo'])
df.insert(2, 'bar', df['c'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'bar', 'b', 'a'])
assert_almost_equal(df['c'], df['bar'])
df['x'] = df['a'].astype('float32')
result = Series(dict(float64 = 5, float32 = 1))
self.assertTrue((df.get_dtype_counts() == result).all())
df['a'] = df['a'].astype('float32')
result = Series(dict(float64 = 4, float32 = 2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64 = 4, float32 = 2, int32 = 1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
A = self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
foo = self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
a = DataFrame([[1,2,3],[4,5,6]], columns=['A','B','C'], index=['X','Y'])
b = a.pop('B')
b += 1
expected = DataFrame([[1,3],[4,6]], columns=['A','C'], index=['X','Y'])
assert_frame_equal(a, expected)
expected = Series([2,5],index=['X','Y'],name='B')+1
assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_iter(self):
self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))
def test_iterrows(self):
for i, (k, v) in enumerate(self.frame.iterrows()):
exp = self.frame.xs(self.frame.index[i])
assert_series_equal(v, exp)
for i, (k, v) in enumerate(self.mixed_frame.iterrows()):
exp = self.mixed_frame.xs(self.mixed_frame.index[i])
assert_series_equal(v, exp)
def test_itertuples(self):
for i, tup in enumerate(self.frame.itertuples()):
s = Series(tup[1:])
s.name = tup[0]
expected = self.frame.ix[i, :].reset_index(drop=True)
assert_series_equal(s, expected)
df = DataFrame({'floats': np.random.randn(5),
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
tm.assertIsInstance(tup[1], np.integer)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])
tup = next(df.itertuples(name='TestName'))
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup._fields, ('Index', 'a', 'b'))
self.assertEqual((tup.Index, tup.a, tup.b), tup)
self.assertEqual(type(tup).__name__, 'TestName')
df.columns = ['def', 'return']
tup2 = next(df.itertuples(name='TestName'))
self.assertEqual(tup2, (0, 1, 4))
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup2._fields, ('Index', '_1', '_2'))
df3 = DataFrame(dict(('f'+str(i), [i]) for i in range(1024)))
tup3 = next(df3.itertuples())
self.assertFalse(hasattr(tup3, '_fields'))
self.assertIsInstance(tup3, tuple)
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
for dtype in ('float', 'int64'):
frames = [
DataFrame(dtype=dtype),
DataFrame(columns=['A'], dtype=dtype),
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
self.assertTrue((df + df).equals(df))
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result,DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result,DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1,2])
assert_frame_equal(result,DataFrame(index=[1,2]))
result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(index=[1],columns=['A']))
result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))
def f():
DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
self.assertRaises(TypeError, f)
def f():
DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
self.assertRaises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df,df2):
for (x, y) in [(df,df2),(df2,df)]:
self.assertRaises(TypeError, lambda : x == y)
self.assertRaises(TypeError, lambda : x != y)
self.assertRaises(TypeError, lambda : x >= y)
self.assertRaises(TypeError, lambda : x > y)
self.assertRaises(TypeError, lambda : x < y)
self.assertRaises(TypeError, lambda : x <= y)
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df,df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(df)), 'b': date_range('20100101', periods=len(df))})
check(df,df2)
def test_timestamp_compare(self):
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
tm.assert_frame_equal(result, expected)
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
tm.assert_frame_equal(result, expected)
def test_modulo(self):
p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
assert_frame_equal(result,expected)
result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')
result2.iloc[0:3,1] = np.nan
assert_frame_equal(result2,expected)
result = p % 0
expected = DataFrame(np.nan,index=p.index,columns=p.columns)
assert_frame_equal(result,expected)
result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)
assert_frame_equal(result2,expected)
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s % p
res2 = p % s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_div(self):
p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
result = p / p
expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]),
'second': Series([nan, nan, nan, 1])})
assert_frame_equal(result,expected)
result2 = DataFrame(p.values.astype('float') / p.values, index=p.index,
columns=p.columns)
assert_frame_equal(result2,expected)
result = p / 0
expected = DataFrame(inf, index=p.index, columns=p.columns)
expected.iloc[0:3, 1] = nan
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values.astype('float64') / 0, index=p.index,
columns=p.columns)
assert_frame_equal(result2,expected)
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s / p
res2 = p / s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
def test_logical_typeerror(self):
if not compat.PY3:
self.assertRaises(TypeError, self.frame.__eq__, 'foo')
self.assertRaises(TypeError, self.frame.__lt__, 'foo')
self.assertRaises(TypeError, self.frame.__gt__, 'foo')
self.assertRaises(TypeError, self.frame.__ne__, 'foo')
else:
raise nose.SkipTest('test_logical_typeerror not tested on PY3')
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
self.assertEqual(d['a'].dtype, np.object_)
self.assertFalse(d['a'][1])
def test_constructor_with_nas(self):
# GH 5016
# na's in indicies
def check(df):
for i in range(len(df.columns)):
df.iloc[:,i]
indexer = np.arange(len(df.columns))[isnull(df.columns)]
if len(indexer) == 1:
assert_series_equal(df.iloc[:,indexer[0]],df.loc[:,np.nan])
else:
def f():
df.loc[:,np.nan]
self.assertRaises(TypeError, f)
df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])
check(df)
df = DataFrame([[1,2,3],[4,5,6]], columns=[1.1,2.2,np.nan])
check(df)
df = DataFrame([[0,1,2,3],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])
check(df)
df = DataFrame([[0.0,1,2,3.0],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])
check(df)
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False,downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
def test_neg(self):
assert_frame_equal(-self.frame, -1 * self.frame)
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
self.assertEqual(index, frame.index[5])
index = frame.last_valid_index()
self.assertEqual(index, frame.index[-6])
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype = dict(C = None))
if op in ['add','sub','mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
dtype = None
if op in ['sub']:
dtype = dict(B = 'object', C = None)
elif op in ['add','mul']:
dtype = dict(C = None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype = dtype)
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype = dict(C = None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
if op in ['add','sub','mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
dtype = None
if op in ['sub']:
dtype = dict(B = 'object', C = None)
elif op in ['add','mul']:
dtype = dict(C = None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype = dtype)
except:
com.pprint_thing("Failing operation %r" % op)
raise
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
with assertRaisesRegexp(ValueError, 'shape'):
f(self.frame, ndim_5)
with assertRaisesRegexp(ValueError, 'shape'):
getattr(self.frame, op)(ndim_5)
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], fill_value=3)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_binary_ops_align(self):
index=MultiIndex.from_product([list('abc'),
['one','two','three'],
[1,2,3]],
names=['first','second','third'])
df = DataFrame(np.arange(27*3).reshape(27,3),
index=index,
columns=['value1','value2','value3']).sortlevel()
idx = pd.IndexSlice
for op in ['add','sub','mul','div','truediv']:
opa = getattr(operator,op,None)
if opa is None:
continue
x = Series([ 1.0, 10.0, 100.0], [1,2,3])
result = getattr(df,op)(x,level='third',axis=0)
expected = pd.concat([ opa(df.loc[idx[:,:,i],:],v) for i, v in x.iteritems() ]).sortlevel()
assert_frame_equal(result, expected)
x = Series([ 1.0, 10.0], ['two','three'])
result = getattr(df,op)(x,level='second',axis=0)
expected = pd.concat([ opa(df.loc[idx[:,i],:],v) for i, v in x.iteritems() ]).reindex_like(df).sortlevel()
assert_frame_equal(result, expected)
],['a', 'b']])
df = DataFrame(np.ones((2,4), dtype='int64'), columns=midx)
s = pd.Series({'a':1, 'b':2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
def _check_unaligned_frame(meth, op, df, other):
part_o = other.ix[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
self.assertTrue(df.eq(df).values.all())
self.assertFalse(df.ne(df).values.any())
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
assert_frame_equal(f(other.values), o(df, other.values))
assert_frame_equal(f(0), o(df, 0))
assert_frame_equal(f(np.nan), o(df, np.nan))
with assertRaisesRegexp(ValueError, 'shape'):
f(ndim_5)
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
_test_seq(df, idx_ser.values, col_ser.values)
df.ix[0, 0] = np.nan
rs = df.eq(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ne(df)
self.assertTrue(rs.ix[0, 0])
rs = df.gt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.lt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ge(df)
self.assertFalse(rs.ix[0, 0])
rs = df.le(df)
self.assertFalse(rs.ix[0, 0])
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
self.assertFalse(rs.values.any())
rs = df.ne(df2)
self.assertTrue(rs.values.all())
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
self.assertFalse(rs.values.any())
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
assert_frame_equal(df.add(row, axis=None), df + row)
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64')
expected = DataFrame([[nan, inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0],axis='index')
assert_frame_equal(result,expected)
df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='float64')
expected = DataFrame([[np.nan,np.inf],[1.0,1.5],[1.0,1.25]])
result = df.div(df[0],axis='index')
assert_frame_equal(result,expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
tm.assert_dict_equal(added['A'].valid(),
self.frame['A'] * 2,
compare_keys=False)
self.assertTrue(np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())
self.assertTrue(np.isnan(added['D']).all())
self_added = self.frame + self.frame
self.assertTrue(self_added.index.equals(self.frame.index))
added_rev = frame_copy + self.frame
self.assertTrue(np.isnan(added['D']).all())
plus_empty = self.frame + self.empty
self.assertTrue(np.isnan(plus_empty.values).all())
empty_plus = self.empty + self.frame
self.assertTrue(np.isnan(empty_plus.values).all())
empty_empty = self.empty + self.empty
self.assertTrue(empty_empty.empty)
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype = dict(C = None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype = dict(C = None))
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype = 'float64')
def test_combineSeries(self):
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
self.assertIn('E', larger_added)
self.assertTrue(np.isnan(larger_added['E']).all())
added = self.mixed_float + series
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype = dict(C = None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype = dict(C = None))
esult = col + ts
assert_series_equal(added[key], result, check_names=False)
self.assertEqual(added[key].name, key)
if col.name == ts.name:
self.assertEqual(result.name, 'A')
else:
self.assertTrue(result.name is None)
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
self.assertTrue(smaller_added.index.equals(self.tsframe.index))
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns)
assert_frame_equal(result, expected)
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns)
assert_frame_equal(result, expected)
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts,axis='index')
self.assertEqual(len(result), len(ts))
def test_combineFunc(self):
result = self.frame * 2
self.assert_numpy_array_equal(result.values, self.frame.values * 2)
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
self.assert_numpy_array_equal(s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype = dict(C = None))
result = self.empty * 2
self.assertIs(result.index, self.empty.index)
self.assertEqual(len(result.columns), 0)
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
self.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with assertRaisesRegexp(ValueError, 'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
self.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
self.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with assertRaisesRegexp(ValueError, 'Can only compare '
'identically-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_string_comparison(self):
df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
assert_frame_equal(df[mask_a], df.ix[1:1, :])
assert_frame_equal(df[-mask_a], df.ix[0:0, :])
mask_b = df.b == "foo"
assert_frame_equal(df[mask_b], df.ix[0:0, :])
assert_frame_equal(df[-mask_b], df.ix[1:1, :])
def test_float_none_comparison(self):
df = DataFrame(np.random.randn(8, 3), index=lrange(8),
columns=['A', 'B', 'C'])
self.assertRaises(TypeError, df.__eq__, None)
def test_boolean_comparison(self):
df = DataFrame(np.arange(6).reshape((3,2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2,2])
b_c = b_r.T
l = (2,2,2)
tup = tuple(l)
expected = DataFrame([[False,False],[False,True],[True,True]])
result = df>b
assert_frame_equal(result,expected)
result = df.values>b
assert_numpy_array_equal(result,expected.values)
result = df>l
assert_frame_equal(result,expected)
result = df>tup
assert_frame_equal(result,expected)
result = df>b_r
assert_frame_equal(result,expected)
result = df.values>b_r
assert_numpy_array_equal(result,expected.values)
self.assertRaises(ValueError, df.__gt__, b_c)
self.assertRaises(ValueError, df.values.__gt__, b_c)
expected = DataFrame([[False,False],[True,False],[False,False]])
result = df == b
assert_frame_equal(result,expected)
result = df==l
assert_frame_equal(result,expected)
result = df==tup
assert_frame_equal(result,expected)
result = df == b_r
assert_frame_equal(result,expected)
result = df.values == b_r
assert_numpy_array_equal(result,expected.values)
self.assertRaises(ValueError, lambda : df == b_c)
self.assertFalse((df.values == b_c))
df = DataFrame(np.arange(6).reshape((3,2)),columns=list('AB'),index=list('abc'))
expected.index=df.index
expected.columns=df.columns
result = df==l
assert_frame_equal(result,expected)
result = df==tup
assert_frame_equal(result,expected)
self.assertRaises(ValueError, lambda : df == (2,2))
self.assertRaises(ValueError, lambda : df == [2,2])
def test_equals_different_blocks(self):
df0 = pd.DataFrame({"A": ["x","y"], "B": [1,2],
"C": ["w","z"]})
df1 = df0.reset_index()[["A","B","C"]]
self.assertTrue(df0._data.blocks[0].dtype !=
df1._data.blocks[0].dtype)
assert_frame_equal(df0, df1)
self.assertTrue(df0.equals(df1))
self.assertTrue(df1.equals(df0))
def test_copy_blocks(self):
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.ix[:, column] = _df[column] + 1
self.assertFalse(_df[column].equals(df[column]))
def test_no_copy_blocks(self):
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.ix[:, column] = _df[column] + 1
self.assertTrue(_df[column].equals(df[column]))
def test_to_csv_from_csv(self):
pname = '__tmp_to_csv_from_csv__'
with ensure_clean(pname) as path:
self.frame['A'][:5] = nan
self.frame.to_csv(path)
self.frame.to_csv(path, columns=['A', 'B'])
self.frame.to_csv(path, header=False)
self.frame.to_csv(path, index=False)
self.tsframe.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.tsframe, recons)
self.tsframe.to_csv(path, index_label='index')
recons = DataFrame.from_csv(path, index_col=None)
assert(len(recons.columns) == len(self.tsframe.columns) + 1)
self.tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(self.tsframe.values, recons.values)
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
's2': Series(lrange(2), lrange(2))})
dm.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(dm, recons)
with ensure_clean(pname) as path:
df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path)
assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx,
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path, index_col=[0, 1, 2],
parse_dates=False)
assert_frame_equal(result, df, check_names=False)
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_csv(path, header=col_aliases)
rs = DataFrame.from_csv(path)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
self.assertRaises(ValueError, self.frame2.to_csv, path,
header=['AA', 'X'])
with ensure_clean(pname) as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path,mode='a',header=False)
xp = pd.concat([df1,df2])
rs = pd.read_csv(path,index_col=0)
rs.columns = lmap(int,rs.columns)
xp.columns = lmap(int,xp.columns)
assert_frame_equal(xp,rs)
with ensure_clean() as path:
dt = pd.Timedelta(seconds=1)
df = pd.DataFrame({'dt_data': [i*dt for i in range(3)]},
index=pd.Index([i*dt for i in range(3)],
name='dt_index'))
df.to_csv(path)
result = pd.read_csv(path, index_col='dt_index')
result.index = pd.to_timedelta(result.index)
result.index = result.index.rename('dt_index')
result['dt_data'] = pd.to_timedelta(result['dt_data'])
assert_frame_equal(df, result, check_index_type=True)
with ensure_clean(pname) as path:
self.tzframe.to_csv(path)
result = pd.read_csv(path, index_col=0, parse_dates=['A'])
converter = lambda c: pd.to_datetime(result[c]).dt.tz_localize('UTC').dt.tz_convert(self.tzframe[c].dt.tz)
result['B'] = converter('B')
result['C'] = converter('C')
assert_frame_equal(result, self.tzframe)
def test_to_csv_cols_reordering(self):
import pandas as pd
chunksize=5
N = int(chunksize*2.5)
df= mkdf(N, 3)
cs = df.columns
cols = [cs[2],cs[0]]
with ensure_clean() as path:
df.to_csv(path,columns = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
assert_frame_equal(df[cols],rs_c,check_names=False)
def test_to_csv_legacy_raises_on_dupe_cols(self):
df= mkdf(10, 3)
df.columns = ['a','a','b']
with ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')
def test_to_csv_new_dupe_cols(self):
import pandas as pd
def _check_df(df,cols=None):
with ensure_clean() as path:
df.to_csv(path,columns = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df,Series):
assert_series_equal(obj_df,obj_rs)
else:
assert_frame_equal(obj_df,obj_rs,check_names=False)
else:
rs_c.columns = df.columns
assert_frame_equal(df,rs_c,check_names=False)
chunksize=5
N = int(chunksize*2.5)
df= mkdf(N, 3)
df.columns = ['a','a','b']
_check_df(df,None)
cols = ['b','a']
_check_df(df,cols)
@slow
def test_to_csv_moar(self):
path = '__tmp_to_csv_moar__'
def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,
dupe_col=False):
kwargs = dict(parse_dates=False)
if cnlvl:
if rnlvl is not None:
kwargs['index_col'] = lrange(rnlvl)
kwargs['header'] = lrange(cnlvl)
with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)
recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs)
else:
kwargs['header'] = 0
with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8',chunksize=chunksize)
recons = DataFrame.from_csv(path,**kwargs)
def _to_uni(x):
if not isinstance(x, compat.text_type):
return x.decode('utf8')
return x
if dupe_col:
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl-1)]
ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)
recons.index = ix
recons = recons.iloc[:,rnlvl-1:]
type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype='O'
recons.index = np.array(lmap(_to_uni,recons.index),
dtype=r_dtype)
df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype)
elif r_dtype == 'dt': # unicode
r_dtype='O'
recons.index = np.array(lmap(Timestamp,recons.index),
dtype=r_dtype)
df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype)
elif r_dtype == 'p':
r_dtype='O'
recons.index = np.array(list(map(Timestamp,
recons.index.to_datetime())),
dtype=r_dtype)
df.index = np.array(list(map(Timestamp,
df.index.to_datetime())),
dtype=r_dtype)
else:
r_dtype= type_map.get(r_dtype)
recons.index = np.array(recons.index,dtype=r_dtype )
df.index = np.array(df.index,dtype=r_dtype )
if c_dtype:
if c_dtype == 'u':
c_dtype='O'
recons.columns = np.array(lmap(_to_uni,recons.columns),
dtype=c_dtype)
df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype )
elif c_dtype == 'dt':
c_dtype='O'
recons.columns = np.array(lmap(Timestamp,recons.columns),
dtype=c_dtype )
df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype)
elif c_dtype == 'p':
c_dtype='O'
recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()),
dtype=c_dtype)
df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype )
else:
c_dtype= type_map.get(c_dtype)
recons.columns = np.array(recons.columns,dtype=c_dtype )
df.columns = np.array(df.columns,dtype=c_dtype )
assert_frame_equal(df,recons,check_names=False,check_less_precise=True)
N = 100
chunksize=1000
# GH3437
from pandas import NaT
def make_dtnat_arr(n,nnat=None):
if nnat is None:
nnat= int(n*0.1) # 10%
s=list(date_range('2000',freq='5min',periods=n))
if nnat:
for i in np.random.randint(0,len(s),nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
# N=35000
s1=make_dtnat_arr(chunksize+5)
s2=make_dtnat_arr(chunksize+5,0)
path = '1.csv'
# s3=make_dtnjat_arr(chunksize+5,0)
with ensure_clean('.csv') as pth:
df=DataFrame(dict(a=s1,b=s2))
df.to_csv(pth,chunksize=chunksize)
recons = DataFrame.from_csv(pth)._convert(datetime=True,
coerce=True)
assert_frame_equal(df, recons,check_names=False,check_less_precise=True)
for ncols in [4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
for ncols in [4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
pass
for r_idx_type,c_idx_type in [('i','i'),('s','s'),('u','dt'),('p','p')]:
for ncols in [1,2,3,4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type=r_idx_type,
c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)
for ncols in [1,2,3,4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols),path)
for nrows in [10,N-2,N-1,N,N+1,N+2]:
df = mkdf(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe","dupe"]
cols[-2:] = ["dupe","dupe"]
ix = list(df.index)
ix[:2] = ["rdupe","rdupe"]
ix[-2:] = ["rdupe","rdupe"]
df.index=ix
df.columns=cols
_do_test(df,path,dupe_col=True)
_do_test(DataFrame(index=lrange(10)),path)
_do_test(mkdf(chunksize//2+1, 2,r_idx_nlevels=2),path,rnlvl=2)
for ncols in [2,3,4]:
base = int(chunksize//ncols)
for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2)
_do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2)
_do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2),
path,rnlvl=2,cnlvl=2)
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
pname = '__tmp_to_csv_no_index__'
with ensure_clean(pname) as path:
df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
df['c3'] = Series([7,8,9],dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
pname = '__tmp_to_csv_headers__'
from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])
with ensure_clean(pname) as path:
from_df.to_csv(path, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
recons.reset_index(inplace=True)
assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self):
pname = '__tmp_to_csv_multiindex__'
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
with ensure_clean(pname) as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=['A', 'B'])
# round trip
frame.to_csv(path)
df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)
assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name
self.assertEqual(frame.index.names, df.index.names)
self.frame.index = old_index # needed if setUP becomes a classmethod
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=['time', 'foo'])
recons = DataFrame.from_csv(path, index_col=[0, 1])
assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name
# do not load index
tsframe.to_csv(path)
recons = DataFrame.from_csv(path, index_col=None)
np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)
# no index
tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(recons.values, self.tsframe.values)
self.tsframe.index = old_index # needed if setUP becomes classmethod
with ensure_clean(pname) as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ['first','second']
return DataFrame(np.random.randint(0,10,size=(3,3)),
columns=MultiIndex.from_tuples([('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')],
names=names),
dtype='int64')
# column & index are multi-index
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
assert_frame_equal(df,result)
# column is mi
df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)
assert_frame_equal(df,result)
# dup column names?
df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=[0,1,2],tupleize_cols=False)
assert_frame_equal(df,result)
# writing with no index
df = _make_frame()
df.to_csv(path,tupleize_cols=False,index=False)
result = read_csv(path,header=[0,1],tupleize_cols=False)
assert_frame_equal(df,result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False,index=False)
result = read_csv(path,header=[0,1],tupleize_cols=False)
self.assertTrue(all([ x is None for x in result.columns.names ]))
result.columns.names = df.columns.names
assert_frame_equal(df,result)
# tupleize_cols=True and index=False
df = _make_frame(True)
df.to_csv(path,tupleize_cols=True,index=False)
result = read_csv(path,header=0,tupleize_cols=True,index_col=None)
result.columns = df.columns
assert_frame_equal(df,result)
# whatsnew example
df = _make_frame()
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
assert_frame_equal(df,result)
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
assert_frame_equal(df,result)
# column & index are multi-index (compatibility)
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=True)
result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)
result.columns = df.columns
assert_frame_equal(df,result)
# invalid options
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False)
# catch invalid headers
with assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2\] are too many rows for this multi_index of columns'):
read_csv(path,tupleize_cols=False,header=lrange(3),index_col=0)
with assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2,3,4,5,6\], len of 7, but only 6 lines in file'):
read_csv(path,tupleize_cols=False,header=lrange(7),index_col=0)
for i in [4,5,6]:
with tm.assertRaises(CParserError):
read_csv(path, tupleize_cols=False, header=lrange(i), index_col=0)
# write with cols
with assertRaisesRegexp(TypeError, 'cannot specify cols with a MultiIndex'):
df.to_csv(path, tupleize_cols=False, columns=['foo', 'bar'])
with ensure_clean(pname) as path:
# empty
tsframe[:0].to_csv(path)
recons = DataFrame.from_csv(path)
exp = tsframe[:0]
exp.index = []
self.assertTrue(recons.columns.equals(exp.columns))
self.assertEqual(len(recons), 0)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
self.assertEqual(lines[1].split(',')[2], '999')
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
df.to_csv(path)
df2 = DataFrame.from_csv(path)
assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [ "%s%03d" % (name,i) for i in range(5) ]
df_float = DataFrame(np.random.randn(100, 5),dtype='float64',columns=create_cols('float'))
df_int = DataFrame(np.random.randn(100, 5),dtype='int64',columns=create_cols('int'))
df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))
df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))
# add in some nans
df_float.ix[30:50,1:3] = np.nan
#### this is a bug in read_csv right now ####
#df_dt.ix[30:50,1:3] = np.nan
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
# dtype
dtypes = dict()
for n,dtype in [('float',np.float64),('int',np.int64),('bool',np.bool),('object',np.object)]:
for c in create_cols(n):
dtypes[c] = dtype
with ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))
assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(np.random.randn(1000, 30),columns=lrange(15)+lrange(15),dtype='float64')
with ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename,index_col=0)
result.columns = df.columns
assert_frame_equal(result,df)
df_float = DataFrame(np.random.randn(1000, 3),dtype='float64')
df_int = DataFrame(np.random.randn(1000, 3),dtype='int64')
df_bool = DataFrame(True,index=df_float.index,columns=lrange(3))
df_object = DataFrame('foo',index=df_float.index,columns=lrange(3))
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=lrange(3))
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1, ignore_index=True)
cols = []
for i in range(5):
cols.extend([0,1,2])
df.columns = cols
from pandas import to_datetime
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename,index_col=0)
# date cols
for i in ['0.4','1.4','2.4']:
result[i] = to_datetime(result[i])
result.columns = df.columns
assert_frame_equal(result,df)
# GH3457
from pandas.util.testing import makeCustomDataframe as mkdf
N=10
df= mkdf(N, 3)
df.columns = ['a','a','b']
with ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename,index_col=0)
result = result.rename(columns={ 'a.1' : 'a' })
assert_frame_equal(result,df)
def test_to_csv_chunking(self):
aa=DataFrame({'A':lrange(100000)})
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000,50000,100000]:
with ensure_clean() as filename:
aa.to_csv(filename,chunksize=chunksize)
rs = read_csv(filename,index_col=0)
assert_frame_equal(rs, aa)
@slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO('a,1.0\nb,2.0')
df = DataFrame.from_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
with ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1
def test_to_csv_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
with ensure_clean() as path:
df.to_csv(path, encoding='UTF-8')
df2 = read_csv(path, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
df.to_csv(path, encoding='UTF-8', index=False)
df2 = read_csv(path, index_col=None, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
df = DataFrame(
[[u("\u05d0"), "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=[u("\u05d0"),
u("\u05d1"), u("\u05d2"), u("\u05d3")],
index=[u("\u05d0"), u("\u05d1")])
df.to_csv(buf, encoding='UTF-8')
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_stringio(self):
buf = StringIO()
self.frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
assert_frame_equal(recons, self.frame, check_names=False)
def test_to_csv_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, float_format='%.2f')
rs = read_csv(filename, index_col=0)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
assert_frame_equal(rs, xp)
def test_to_csv_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC)
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
self.assertEqual(result, expected)
text = 'a,b,c\n1,"test \r\n",3\n'
df = pd.read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
self.assertEqual(buf.getvalue(), text)
df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
df = df.set_index(['a', 'b'])
expected = '"a","b","c"\n"1","3","5"\n"2","4","6"\n'
self.assertEqual(df.to_csv(quoting=csv.QUOTE_ALL), expected)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,
encoding='utf-8')
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
self.assertEqual(result, expected)
def test_to_csv_quote_none(self):
df = DataFrame({'A': ['hello', '{"hello"}']})
for encoding in (None, 'utf-8'):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
result = buf.getvalue()
expected = 'A\nhello\n{"hello"}\n'
self.assertEqual(result, expected)
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected = ('A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
self.assertEqual(buf.getvalue(), expected)
def test_to_csv_line_terminators(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, line_terminator='\r\n')
expected = (',A,B\r\n'
'one,1,4\r\n'
'two,2,5\r\n'
'three,3,6\r\n')
self.assertEqual(buf.getvalue(), expected)
buf = StringIO()
df.to_csv(buf)
expected = (',A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
self.assertEqual(buf.getvalue(), expected)
def test_to_csv_from_csv_categorical(self):
s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
res = StringIO()
s.to_csv(res)
exp = StringIO()
s2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
df = DataFrame({"s":s})
df2 = DataFrame({"s":s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
def test_to_csv_path_is_none(self):
csv_str = self.frame.to_csv(path=None)
self.assertIsInstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
def test_to_csv_compression_gzip(self):
, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="gzip")
rs = read_csv(filename, compression="gzip", index_col=0)
assert_frame_equal(df, rs)
import gzip
f = gzip.open(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
self.assertIn(col, text)
def test_to_csv_compression_bz2(self):
, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="bz2")
rs = read_csv(filename, compression="bz2", index_col=0)
assert_frame_equal(df, rs)
import bz2
f = bz2.BZ2File(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
self.assertIn(col, text)
def test_to_csv_compression_value_error(self):
, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
self.assertRaises(ValueError, df.to_csv, filename, compression="zip")
def test_info(self):
io = StringIO()
self.frame.info(buf=io)
self.tsframe.info(buf=io)
frame = DataFrame(np.random.randn(5, 3))
import sys
sys.stdout = StringIO()
frame.info()
frame.info(verbose=False)
sys.stdout = sys.__stdout__
def test_info_wide(self):
from pandas import set_option, reset_option
io = StringIO()
df = DataFrame(np.random.randn(5, 101))
df.info(buf=io)
io = StringIO()
df.info(buf=io, max_cols=101)
rs = io.getvalue()
self.assertTrue(len(rs.splitlines()) > 100)
xp = rs
set_option('display.max_info_columns', 101)
io = StringIO()
df.info(buf=io)
self.assertEqual(rs, xp)
reset_option('display.max_info_columns')
def test_info_duplicate_columns(self):
io = StringIO()
frame = DataFrame(np.random.randn(1500, 4),
columns=['a', 'a', 'b', 'b'])
frame.info(buf=io)
def test_info_shows_column_dtypes(self):
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
df.info(buf=buf)
res = buf.getvalue()
for i, dtype in enumerate(dtypes):
name = '%d %d non-null %s' % (i, n, dtype)
assert name in res
def test_info_max_cols(self):
df = DataFrame(np.random.randn(10, 5))
for len_, verbose in [(5, None), (5, False), (10, True)]:
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
for len_, verbose in [(10, None), (5, False), (10, True)]:
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
for len_, max_cols in [(10, 5), (5, 4)]:
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
def test_info_memory_usage(self):
# Ensure memory usage is displayed, when asserted, on the last line
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
# display memory usage case
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
self.assertTrue("memory usage: " in res[-1])
# do not display memory usage cas
df.info(buf=buf, memory_usage=False)
res = buf.getvalue().splitlines()
self.assertTrue("memory usage: " not in res[-1])
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# memory usage is a lower bound, so print it as XYZ+ MB
self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
df.iloc[:, :5].info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# excluded column with object dtype, so estimate is accurate
self.assertFalse(re.match(r"memory usage: [^+]+\+", res[-1]))
df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])
df_with_object_index.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
df_with_object_index.info(buf=buf, memory_usage='deep')
res = buf.getvalue().splitlines()
self.assertTrue(re.match(r"memory usage: [^+]+$", res[-1]))
self.assertTrue(df_with_object_index.memory_usage(index=True, deep=True).sum() \
> df_with_object_index.memory_usage(index=True).sum())
df_object = pd.DataFrame({'a': ['a']})
self.assertTrue(df_object.memory_usage(deep=True).sum() \
> df_object.memory_usage().sum())
# Test a DataFrame with duplicate columns
dtypes = ['int64', 'int64', 'int64', 'float64']
data = {}
n = 100
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
df.columns = dtypes
# Ensure df size is as expected
df_size = df.memory_usage().sum()
exp_size = len(dtypes) * n * 8 # cols * rows * bytes
self.assertEqual(df_size, exp_size)
# Ensure number of cols in memory_usage is the same as df
size_df = np.size(df.columns.values) # index=False; default
self.assertEqual(size_df, np.size(df.memory_usage()))
# assert deep works only on object
self.assertEqual(df.memory_usage().sum(),df.memory_usage(deep=True).sum())
# test for validity
DataFrame(1,index=['a'],columns=['A']).memory_usage(index=True)
DataFrame(1,index=['a'],columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).memory_usage(index=True)
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes
def test_dtypes(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
expected = Series(dict((k, v.dtype)
for k, v in compat.iteritems(self.mixed_frame)),
index=result.index)
assert_series_equal(result, expected)
# compat, GH 8722
with option_context('use_inf_as_null',True):
df = DataFrame([[1]])
result = df.dtypes
assert_series_equal(result,Series({0:np.dtype('int64')}))
def test_convert_objects(self):
oops = self.mixed_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, self.mixed_frame)
self.assertEqual(converted['A'].dtype, np.float64)
# force numeric conversion
self.mixed_frame['H'] = '1.'
self.mixed_frame['I'] = '1'
# add in some items that will be nan
l = len(self.mixed_frame)
self.mixed_frame['J'] = '1.'
self.mixed_frame['K'] = '1'
self.mixed_frame.ix[0:5,['J','K']] = 'garbled'
converted = self.mixed_frame._convert(datetime=True, numeric=True)
self.assertEqual(converted['H'].dtype, 'float64')
self.assertEqual(converted['I'].dtype, 'int64')
self.assertEqual(converted['J'].dtype, 'float64')
self.assertEqual(converted['K'].dtype, 'float64')
self.assertEqual(len(converted['J'].dropna()), l-5)
self.assertEqual(len(converted['K'].dropna()), l-5)
# via astype
converted = self.mixed_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
self.assertEqual(converted['H'].dtype, 'float64')
self.assertEqual(converted['I'].dtype, 'int64')
# via astype, but errors
converted = self.mixed_frame.copy()
with assertRaisesRegexp(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s = Series([1, 'na', 3 ,4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s = Series([1, np.nan, 3 ,4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
series = df.ix[4]
with assertRaisesRegexp(ValueError, 'Indexes have overlapping values'):
df.append(series, verify_integrity=True)
series.name = None
with assertRaisesRegexp(TypeError, 'Can only append a Series if '
'ignore_index=True'):
df.append(series, verify_integrity=True)
result = df.append(series[::-1], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T,
ignore_index=True)
assert_frame_equal(result, expected)
# dict
result = df.append(series.to_dict(), ignore_index=True)
assert_frame_equal(result, expected)
result = df.append(series[::-1][:3], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1][:3]}).T,
ignore_index=True)
assert_frame_equal(result, expected.ix[:, result.columns])
# can append when name set
row = df.ix[4]
row.name = 5
result = df.append(row)
expected = df.append(df[-1:], ignore_index=True)
assert_frame_equal(result, expected)
def test_append_list_of_series_dicts(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
dicts = [x.to_dict() for idx, x in df.iterrows()]
result = df.append(dicts, ignore_index=True)
expected = df.append(df, ignore_index=True)
assert_frame_equal(result, expected)
# different columns
dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4},
{'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}]
result = df.append(dicts, ignore_index=True)
expected = df.append(DataFrame(dicts), ignore_index=True)
assert_frame_equal(result, expected)
def test_append_empty_dataframe(self):
# Empty df append empty df
df1 = DataFrame([])
df2 = DataFrame([])
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
def test_append_dtypes(self):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))
df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })
assert_frame_equal(result, expected)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)
rule_monthly = self.tsframe.asfreq('BM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad')
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad')
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
self.assertIsNot(result, zero_length)
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
tm.assertIsInstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
tm.assertIsInstance(ts.index, DatetimeIndex)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.ix[akey]
expected2 = df.ix[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
self.assertEqual(len(result), 4)
result = df.between_time(bkey.start, bkey.stop)
expected = df.ix[bkey]
expected2 = df.ix[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
self.assertEqual(len(result), 12)
result = df.copy()
result.ix[akey] = 0
result = result.ix[akey]
expected = df.ix[akey].copy()
expected.ix[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.ix[akey] = 0
result.ix[akey] = df.ix[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.ix[bkey] = 0
result = result.ix[bkey]
expected = df.ix[bkey].copy()
expected.ix[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.ix[bkey] = 0
result.ix[bkey] = df.ix[binds]
assert_frame_equal(result, df)
def test_as_matrix(self):
frame = self.frame
mat = frame.as_matrix()
frameCols = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][i]))
else:
self.assertEqual(value, frame[col][i])
mat = self.mixed_frame.as_matrix(['foo', 'A'])
self.assertEqual(mat[0, 0], 'bar')
df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})
mat = df.as_matrix()
self.assertEqual(mat[0, 0], 1j)
mat = self.frame.as_matrix(['A', 'B'])
expected = self.frame.reindex(columns=['A', 'B']).values
assert_almost_equal(mat, expected)
def test_as_matrix_duplicates(self):
df = DataFrame([[1, 2, 'a', 'b'],
[1, 2, 'a', 'b']],
columns=['one', 'one', 'two', 'two'])
result = df.values
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
self.assertTrue(np.array_equal(result, expected))
def test_ftypes(self):
frame = self.mixed_float
expected = Series(dict(A = 'float32:dense',
B = 'float32:dense',
C = 'float16:dense',
D = 'float64:dense')).sort_values()
result = frame.ftypes.sort_values()
assert_series_equal(result,expected)
def test_values(self):
self.frame.values[:, 0] = 5.
self.assertTrue((self.frame.values[:, 0] == 5).all())
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in compat.iteritems(series):
self.assertNotEqual(self.frame['A'][idx], value)
def test_copy(self):
cop = self.frame.copy()
cop['E'] = cop['A']
self.assertNotIn('E', self.frame)
copy = self.mixed_frame.copy()
self.assertIsNot(copy._data, self.mixed_frame._data)
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan
def test_corr_pearson(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
def test_corr_kendall(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
def test_corr_spearman(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def test_corr_non_numeric(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
result = self.mixed_frame.corr()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()
assert_frame_equal(result, expected)
def test_corr_nooverlap(self):
tm._skip_if_no_scipy()
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.ix['A', 'B']))
self.assertTrue(isnull(rs.ix['B', 'A']))
self.assertEqual(rs.ix['A', 'A'], 1)
self.assertEqual(rs.ix['B', 'B'], 1)
self.assertTrue(isnull(rs.ix['C', 'C']))
def test_corr_constant(self):
tm._skip_if_no_scipy()
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.values).all())
def test_corr_int(self):
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
def test_corr_int_and_boolean(self):
tm._skip_if_no_scipy()
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=['a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
assert_frame_equal(df.corr(meth), expected)
def test_cov(self):
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
self.assertTrue(isnull(result.values).all())
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.ix['A', 'B'] = np.nan
expected.ix['B', 'A'] = np.nan
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
result = self.mixed_frame.cov()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()
assert_frame_equal(result, expected)
df = DataFrame(np.linspace(0.0,1.0,10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1,1)),
index=df.columns,columns=df.columns)
assert_frame_equal(result, expected)
df.ix[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1,1)),
index=df.columns,columns=df.columns)
assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe + noise
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
self.assertNotIn('B', dropped)
dropped = a.corrwith(b, axis=1, drop=True)
self.assertNotIn(a.index[-1], dropped.index)
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])
assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)
assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000)**2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'],df2['a'])[0][1]
assert_almost_equal(c1, c2)
self.assertTrue(c1 < 1)
def test_drop_names(self):
df = DataFrame([[1, 2, 3],[3, 4, 5],[5, 6, 7]], index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
self.assert_numpy_array_equal(smaller_frame['foo'], mat[5:])
self.assert_numpy_array_equal(inp_frame1['foo'], mat[5:])
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assertTrue(samesize_frame.index.equals(self.frame.index))
self.assertTrue(inp_frame2.index.equals(self.frame.index))
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
self.assertRaises(KeyError, self.frame.dropna, subset=['A','X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
result = df.drop_duplicates('AAA')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.ix[[]]
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('AAA', take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.ix[[0]]
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AAA', 'B'), take_last=True)
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
df2 = df.ix[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
expected = df2.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df2.drop_duplicates(take_last=True)
with tm.assert_produces_warning(FutureWarning):
expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
result = df.drop_duplicates('C')
expected = df.iloc[[0,2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C',keep='last')
expected = df.iloc[[-2,-1]]
assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0,2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('E',keep='last')
expected = df.iloc[[-2,-1]]
assert_frame_equal(result, expected)
df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
assert_frame_equal(df.drop_duplicates(), expected)
df = pd.DataFrame([[1 , 0], [0, 2]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-2, 0], [0, -4]])
assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = pd.DataFrame([[-x, x], [0, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-x, x], [x, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_deprecated_warning(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
expected = df[:2]
with tm.assert_produces_warning(False):
result = df.drop_duplicates(subset='AAA')
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(cols='AAA')
assert_frame_equal(result, expected)
self.assertRaises(TypeError, df.drop_duplicates,
kwargs={'cols': 'AAA', 'subset': 'B'})
self.assertRaises(TypeError, df.drop_duplicates,
kwargs={'subset': 'AAA', 'bad_arg': True})
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(take_last=False, subset='AAA')
assert_frame_equal(result, expected)
self.assertRaises(ValueError, df.drop_duplicates, keep='invalid_name')
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.ix[[]]
self.assertEqual(len(result), 0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AA', 'AB'), take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
result = df.drop_duplicates('A')
expected = df.ix[[0, 2, 3]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.ix[[]]
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('A', take_last=True)
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'])
expected = df.ix[[0, 2, 3, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.ix[[6]]
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['A', 'B'], take_last=True)
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
result = df.drop_duplicates('C')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.ix[[]]
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('C', take_last=True)
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'])
expected = df.ix[[0, 1, 2, 4]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.ix[[1]]
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['C', 'B'], take_last=True)
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all(self):
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.ix[[]]
result = df
assert_frame_equal(result, expected)
self.assertEqual(len(df), 0)
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates('A', take_last=True, inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.ix[[0, 1, 2, 3]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.ix[[0]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
orig2 = orig.ix[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
with tm.assert_produces_warning(FutureWarning):
df2.drop_duplicates(take_last=True, inplace=True)
with tm.assert_produces_warning(FutureWarning):
expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
result = df2
assert_frame_equal(result, expected)
def test_duplicated_deprecated_warning(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
with tm.assert_produces_warning(False):
result = df.duplicated(subset='AAA')
with tm.assert_produces_warning(FutureWarning):
result = df.duplicated(cols='AAA')
self.assertRaises(TypeError, df.duplicated,
kwargs={'cols': 'AAA', 'subset': 'B'})
self.assertRaises(TypeError, df.duplicated,
kwargs={'subset': 'AAA', 'bad_arg': True})
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
df = pd.DataFrame(np.random.randn(10,3), columns=list('abc'))
expected = df[~(df.b>0)]
df.drop(labels=df[df.b>0].index, inplace=True)
assert_frame_equal(df,expected)
def test_fillna(self):
self.tsframe.ix[:5,'A'] = nan
self.tsframe.ix[-5:,'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5,'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5,'A']).all())
self.assertTrue((padded.ix[-5:,'A'] == padded.ix[-5,'A']).all())
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
mf = self.mixed_float.reindex(columns=['A','B','D'])
mf.ix[-10:,'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype = dict(C = None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype = dict(C = None))
df = DataFrame(columns=['x'])
for m in ['pad','backfill']:
df.x.fillna(method=m,inplace=1)
df.x.fillna(method=m)
df = DataFrame([['a','a',np.nan,'a'],['b','b',np.nan,'b'],['c','c',np.nan,'c']])
result = df.fillna({ 2: 'foo' })
expected = DataFrame([['a','a','foo','a'],['b','b','foo','b'],['c','c','foo','c']])
assert_frame_equal(result, expected)
df.fillna({ 2: 'foo' }, inplace=True)
assert_frame_equal(df, expected)
df = DataFrame(np.random.randn(10,3))
df.iloc[2:7,0] = np.nan
df.iloc[3:5,2] = np.nan
expected = df.copy()
expected.iloc[2,0] = 999
expected.iloc[3,2] = 999
result = df.fillna(999,limit=1)
assert_frame_equal(result, expected)
df = DataFrame({
'Date':[pd.NaT, Timestamp("2014-1-1")],
'Date2':[ Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0,'Date2'])
result = df.fillna(value={'Date':df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
df = DataFrame(index=["A","B","C"], columns = [1,2,3,4,5])
result = df.get_dtype_counts().sort_values()
expected = Series({ 'object' : 5 })
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A","B","C"], columns = [1,2,3,4,5])
result = result.get_dtype_counts().sort_values()
expected = Series({ 'int64' : 5 })
assert_series_equal(result, expected)
df = DataFrame(index=lrange(3),columns=['A','B'],dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan',index=lrange(3),columns=['A','B'])
assert_frame_equal(result, expected)
df = DataFrame(dict(A = [1,np.nan], B = [1.,2.]))
for v in ['',1,np.nan,1.0]:
expected = df.replace(np.nan,v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'), pd.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'), '?'],
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_fillna_skip_certain_blocks(self):
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
self.assertIsNot(expected, df)
df.fillna(value=0, inplace=True)
assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
self.assertIsNot(expected, df)
df.fillna(method='ffill', inplace=True)
assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with assertRaisesRegexp(NotImplementedError, 'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index = list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar']*5},
index = list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index = list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with assertRaisesRegexp(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
self.assertRaises(TypeError, self.frame.fillna, [1, 2])
# tuple
self.assertRaises(TypeError, self.frame.fillna, (1, 2))
# frame with series
self.assertRaises(ValueError, self.frame.iloc[:,0].fillna, self.frame)
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)
self.assertRaises(TypeError, self.tsframe.replace, nan)
# mixed type
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
### simplest cases
## regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
### simplest cases
## regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
### same as above with inplace=True
## lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
## mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})
expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})
result = df.replace({'Type': {'Q':0,'T':1}})
assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
res = df.replace(0, 'a')
assert_frame_equal(res, expec)
self.assertEqual(res.a.dtype, np.object_)
def test_replace_regex_metachar(self):
metachars = '[]', '()', '\d', '\w', '\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
tm.assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([ np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64') })
result = df.replace(0, 0.5)
assert_frame_equal(result,expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df,expected)
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64'), 'C' : Series([1,2],dtype='int64') })
expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64'), 'C' : Series([1,2],dtype='int64') })
result = df.replace(0, 0.5)
assert_frame_equal(result,expected)
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
expected = DataFrame({ 'A' : Series([1,'foo'],dtype='object'), 'B' : Series([0,1],dtype='int64') })
result = df.replace(2, 'foo')
assert_frame_equal(result,expected)
expected = DataFrame({ 'A' : Series(['foo','bar'],dtype='object'), 'B' : Series([0,'foo'],dtype='object') })
result = df.replace([1,2], ['foo','bar'])
assert_frame_equal(result,expected)
df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0,0] = m[0]
expected.iloc[1,1] = m[1]
assert_frame_equal(result,expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
tm.assert_frame_equal(expected, result)
result = df.replace({1: 'a', 4: 'b'})
tm.assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
tm.assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
tm.assert_frame_equal(expected, result)
def test_interpolate(self):
pass
def test_replace_value_is_none(self):
self.assertRaises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
self.assertTrue(result.values.all())
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats(self):
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
self.assertRaises(ValueError, df.replace, to_rep, values[1:])
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
tm.assert_frame_equal(res1, res2)
tm.assert_frame_equal(res2, res3)
tm.assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
from pandas.compat import StringIO
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({'\D': 1})
tm.assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
tm.assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
tm.assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
tm.assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
tm.assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
tm.assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
tm.assert_frame_equal(res, expect)
def test_replace_period(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json': pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json': pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json': pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json': pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json': pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json': pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
df = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
'B' : [0, np.nan, 2]})
result = df.replace(np.nan,1)
expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
'B' : Series([0, 1, 2],dtype='float64')})
assert_frame_equal(result, expected)
result = df.fillna(1)
assert_frame_equal(result, expected)
result = df.replace(0,np.nan)
expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
'B' : [np.nan, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.replace(Timestamp('20130102',tz='US/Eastern'),Timestamp('20130104',tz='US/Eastern'))
expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
Timestamp('20130104',tz='US/Eastern'),
Timestamp('20130103',tz='US/Eastern')],
'B' : [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1,0] = np.nan
result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Eastern'))
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1,0] = np.nan
result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Pacific'))
expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
Timestamp('20130104',tz='US/Pacific'),
Timestamp('20130103',tz='US/Eastern')],
'B' : [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1,0] = np.nan
result = result.replace({'A' : np.nan }, Timestamp('20130104'))
expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
Timestamp('20130104'),
Timestamp('20130103',tz='US/Eastern')],
'B' : [0, np.nan, 2]})
assert_frame_equal(result, expected)
def test_combine_multiple_frames_dtypes(self):
A = DataFrame(data=np.ones((10, 2)), columns=['foo', 'bar'], dtype=np.float64)
B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
results = pd.concat((A, B), axis=1).get_dtype_counts()
expected = Series(dict( float64 = 2, float32 = 2 ))
assert_series_equal(results,expected)
def test_ops(self):
for n in [ 4, 4000 ]:
df = DataFrame(1,index=range(n),columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+','__add__','__radd__'),
('-','__sub__','__rsub__'),
('*','__mul__','__rmul__'),
('/','__truediv__','__rtruediv__')]:
base = DataFrame(np.tile(m.values,n).reshape(n,-1),columns=list('abcd'))
expected = eval("base{op}df".format(op=op_str))
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result,expected)
if op in ['+','*']:
result = getattr(df,op)(m)
assert_frame_equal(result,expected)
elif op in ['-','/']:
result = getattr(df,rop)(m)
assert_frame_equal(result,expected)
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1-np.isnan(df.iloc[0:25]))
result = (1-np.isnan(df)).iloc[0:25]
assert_frame_equal(result,expected)
def test_truncate(self):
offset = datetools.bday
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
self.assertRaises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] +1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
self.assertFalse((self.tsframe.values[5:11] == 5).any())
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
self.assertTrue(np.isnan(self.frame[item][idx]))
else:
self.assertEqual(value, self.frame[item][idx])
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
self.assertEqual(xs.dtype, np.object_)
self.assertEqual(xs['A'], 1)
self.assertEqual(xs['B'], '1')
with tm.assertRaises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - datetools.bday)
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
series = self.frame.xs('A', axis=1)
series[:] = 5
self.assertTrue((expected == 5).all())
def test_xs_corner(self):
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
assert_almost_equal(xs, [1., 'foo', 2., 'bar', 3.])
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([], name='a', index=pd.Index([], dtype=object))
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.iloc[2]
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day'])
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(pivoted, expected)
self.assertEqual(pivoted.index.name, 'index')
self.assertEqual(pivoted.columns.name, 'columns')
pivoted = frame.pivot(index='index', columns='columns')
self.assertEqual(pivoted.index.name, 'index')
self.assertEqual(pivoted.columns.names, (None, 'columns'))
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with assertRaisesRegexp(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
self.assert_numpy_array_equal(result.columns, ['A', 'B'])
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
assert_frame_equal(result, expected, check_names=False)
self.assertEqual(result.index.name, 'index',)
self.assertEqual(result.columns.names, (None, 'columns'))
expected.columns = expected.columns.droplevel(0)
data = {
'index': range(7),
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assertTrue(newFrame.index.equals(self.ts1.index))
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result,self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5], columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
tm.assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other':['a', 'b', np.nan, 'c'],
'date':['2015-03-22', np.nan, '2012-01-08', np.nan],
'amount':[2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
from datetime import datetime
df = DataFrame(np.ones((3, 3)), index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr,index=[1,2,3,4,5,1,2,3,4,5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr,index=list(range(len(df))))
assert_frame_equal(result,expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assertTrue(bf.columns.equals(other.columns))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assertTrue(bf.columns.equals(other.columns))
self.assertTrue(bf.index.equals(other.index))
self.assertTrue(af.index.equals(other.index))
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assertTrue(bf.columns.equals(self.frame.columns))
self.assertTrue(bf.index.equals(other.index))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(self.mixed_frame.columns))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
self.assertTrue(isinstance(right, Series))
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(right, expected)
# GH 9558
df = DataFrame({'a':[1,2,3], 'b':[4,5,6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a':[0, 2, 0], 'b':[0, 5, 0]})
assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10*10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12,dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2,dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),columns=['A','B','C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'
return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in compat.iteritems(df) ]))
def _check_get(df, cond, check_dtypes = True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
exp = Series(np.where(cond[k], df[k], other1[k]),index=v.index)
assert_series_equal(v, exp, check_names=False)
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
self.assertTrue((rs.dtypes == df.dtypes).all() == True)
# check getting
for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame(dict([ (c,Series([1]*3,dtype=c)) for c in ['int64','int32','float32','float64'] ]))
df.ix[1,:] = 0
result = df.where(df>=0).get_dtype_counts()
#### when we don't preserve boolean casts
expected = Series({ 'float32' : 1, 'float64' : 1, 'int32' : 1, 'int64' : 1 })
assert_series_equal(result, expected)
def _check_align(df, cond, other, check_dtypes = True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if np.isscalar(other):
o = other
else:
if isinstance(other,np.ndarray):
o = Series(other[:,i],index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other,np.ndarray):
self.assertTrue((rs.dtypes == df.dtypes).all() == True)
for df in [ self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
cond = df > 0
check_dtypes = all([ not issubclass(s.type,np.integer) for s in df.dtypes ])
_check_align(df, cond, np.nan, check_dtypes = check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
self.assertRaises(ValueError, df.where, cond, err1)
err2 = cond.ix[:2, :].values
other1 = _safe_add(df)
self.assertRaises(ValueError, df.where, err2, other1)
self.assertRaises(ValueError, df.mask, True)
self.assertRaises(ValueError, df.mask, 0)
# where inplace
def _check_set(df, cond, check_dtypes = True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type,np.integer) and not cond[k].all():
v = np.dtype('float64')
self.assertEqual(dfi[k].dtype, v)
for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({'a': range(3), 'b': range(4, 7)})
result = df.where(df['a'] == 1)
expected = df[df['a'] == 1].reindex(df.index)
assert_frame_equal(result, expected)
def test_where_bug(self):
# GH 2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [4.0, 3.0, 2.0, 1.0]}, dtype = 'float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# mixed
for dtype in ['int16','int8','int32','int64']:
df = DataFrame({'a': np.array([1, 2, 3, 4],dtype=dtype), 'b': np.array([4.0, 3.0, 2.0, 1.0], dtype = 'float64') })
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# transpositional issue
# GH7506
a = DataFrame({ 0 : [1,2], 1 : [3,4], 2 : [5,6]})
b = DataFrame({ 0 : [np.nan,8], 1:[9,np.nan], 2:[np.nan,np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace,b)
assert_frame_equal(result,expected)
a = DataFrame({ 0 : [4,6], 1 : [1,0]})
b = DataFrame({ 0 : [np.nan,3],1:[3,np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace,b)
assert_frame_equal(result,expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A = date_range('20130102',periods=5),
B = date_range('20130104',periods=5),
C = np.random.randn(5)))
stamp = datetime(2013,1,3)
result = df[df>stamp]
expected = df.copy()
expected.loc[[0,1],'A'] = np.nan
assert_frame_equal(result,expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame({'series': Series([0,1,2,3,4,5,6,7,np.nan,np.nan]) })
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {'A': np.nan, 'B': 'Test', 'C': np.nan}])
expected = df.where(~isnull(df), None)
with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):
df.where(~isnull(df), None, inplace=True)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10,3))
df.iloc[3:5,0] = np.nan
df.iloc[4:6,1] = np.nan
df.iloc[5:8,2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notnull(df),df.mean(),axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notnull(df),df.mean(),inplace=True,axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x>0,y), y=df[0])
result = df.where(df>0,df[0],axis='index')
assert_frame_equal(result, expected)
result = df.where(df>0,df[0],axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notnull(df),DataFrame(1,index=df.index,columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame([[1+1j, 2], [np.nan, 4+1j]], columns=['a', 'b'])
df = DataFrame([[1+1j, 2], [5+1j, 4+1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df,expected)
def test_where_axis(self):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
s = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype='int64')
mask = DataFrame([[False, False], [False, False]])
s = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
expected = DataFrame({0 : np.array([0, 0], dtype='int64'),
1 : np.array([np.nan, np.nan], dtype='float64')})
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Multiple dtypes (=> multiple Blocks)
df = pd.concat([DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)))],
ignore_index=True, axis=1)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis='columns')
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype(int)
expected[3] = expected[3].astype(int)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s1, axis='columns', inplace=True)
assert_frame_equal(result, expected)
result = df.where(mask, s2, axis='index')
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype(int)
expected[3] = expected[3].astype(int)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s2, axis='index', inplace=True)
assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
assert_frame_equal(result, expected)
result = df.where(mask, d1, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True, axis='index')
assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
assert_frame_equal(result, expected)
result = df.where(mask, d2, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True, axis='columns')
assert_frame_equal(result, expected)
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
other = DataFrame(np.random.randn(5, 3))
rs = df.where(cond, other)
assert_frame_equal(rs, df.mask(df <= 0, other))
assert_frame_equal(rs, df.mask(~cond, other))
def test_mask_inplace(self):
# GH8801
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rdf = df.copy()
rdf.where(cond, inplace=True)
assert_frame_equal(rdf, df.where(cond))
assert_frame_equal(rdf, df.mask(~cond))
rdf = df.copy()
rdf.where(cond, -df, inplace=True)
assert_frame_equal(rdf, df.where(cond, -df))
assert_frame_equal(rdf, df.mask(~cond, -df))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[nan, 2]])
assert_frame_equal(res, expec)
#----------------------------------------------------------------------
# Transposing
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][idx]))
else:
self.assertEqual(value, frame[col][idx])
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in compat.iteritems(mixed_T):
self.assertEqual(s.dtype, np.object_)
def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
self.assertTrue((self.frame.values[5:10] == 5).all())
#----------------------------------------------------------------------
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
self.assert_numpy_array_equal(renamed.index, ['foo', 'bar'])
renamed = df.rename(index=str.upper)
self.assert_numpy_array_equal(renamed.index, ['BAR', 'FOO'])
# have to pass something
self.assertRaises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
self.assert_numpy_array_equal(renamed.columns, ['A', 'B', 'foo', 'bar'])
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
self.assert_numpy_array_equal(renamed.index, ['A', 'B', 'foo', 'bar'])
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
self.assert_numpy_array_equal(renamed.index, ['bar', 'foo'])
self.assertEqual(renamed.index.name, renamer.index.name)
# MultiIndex
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(tuples_columns, names=['fizz', 'buzz'])
renamer = DataFrame([(0,0),(1,1)], index=index, columns=columns)
renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')])
self.assert_numpy_array_equal(renamed.index, new_index)
self.assert_numpy_array_equal(renamed.columns, new_columns)
self.assertEqual(renamed.index.names, renamer.index.names)
self.assertEqual(renamed.columns.names, renamer.columns.names)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
self.assertTrue((self.frame['C'] == 1.).all())
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
self.assertIn('C', self.frame)
self.assertNotIn('foo', self.frame)
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
self.assertNotIn('C', frame)
self.assertIn('foo', frame)
self.assertNotEqual(id(frame['foo']), c_id)
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({ 0 : ['foo','bar'], 1 : ['bah','bas'], 2 : [1,2]})
df = df.rename(columns={0 : 'a'})
df = df.rename(columns={1 : 'b'})
df = df.set_index(['a','b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1],[2]],index=MultiIndex.from_tuples([('foo','bah'),('bar','bas')],
names=['a','b']),
columns=['2001-01-01'])
assert_frame_equal(df,expected)
#----------------------------------------------------------------------
# Time series related
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x':np.nan, 'y':pd.Series(1), 'z':pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0,2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame([[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame([[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=datetools.BDay())
self.assertEqual(len(shiftedFrame), len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + datetools.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1)
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
self.assertTrue(shifted.index.equals(ps.index))
tm.assert_dict_equal(unshifted.ix[:, 0].valid(), ps.ix[:, 0],
compare_keys=False)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, datetools.bday)
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10,5))
expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)
result = df.shift(1,axis=1)
assert_frame_equal(result,expected)
# shift named axis
df = DataFrame(np.random.rand(10,5))
expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)
result = df.shift(1,axis='columns')
assert_frame_equal(result,expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=datetools.bday)
assert_frame_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.ix[[0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_apply(self):
# ufunc
applied = self.frame.apply(np.sqrt)
assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
self.assertEqual(applied['A'], np.mean(self.frame['A']))
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
self.assertEqual(applied[d], np.mean(self.frame.xs(d)))
self.assertIs(applied.index, self.frame.index) # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
self.assertRaises(ValueError, df.apply, lambda x: x, 2)
# GH9573
df = DataFrame({'c0':['A','A','B','B'], 'c1':['C','C','D','D']})
df = df.apply(lambda ts: ts.astype('category'))
self.assertEqual(df.shape, (4, 2))
self.assertTrue(isinstance(df['c0'].dtype, com.CategoricalDtype))
self.assertTrue(isinstance(df['c1'].dtype, com.CategoricalDtype))
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({ 'A' : date_range('20130101',periods=3), 'B' : pd.to_timedelta(np.arange(3),unit='s') })
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
self.assertTrue(applied.empty)
applied = self.empty.apply(np.mean)
self.assertTrue(applied.empty)
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.ix[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series([], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series([], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
self.assertEqual(x, [])
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_apply_broadcast(self):
broadcasted = self.frame.apply(np.mean, broadcast=True)
agged = self.frame.apply(np.mean)
for col, ts in compat.iteritems(broadcasted):
self.assertTrue((ts == agged[col]).all())
broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
agged = self.frame.apply(np.mean, axis=1)
for idx in broadcasted.index:
self.assertTrue((broadcasted.xs(idx) == agged[idx]).all())
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
self.assertEqual(tapplied[d], np.mean(self.frame.xs(d)))
def test_apply_ignore_failures(self):
result = self.mixed_frame._apply_standard(np.mean, 0,
ignore_failures=True)
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'],index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.],index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
tm.assertIsInstance(res, Series)
self.assertIs(res.index, agg_axis)
else:
tm.assertIsInstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), broadcast=True)
tm.assertIsInstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.ix[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4,'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notnull(row['C']) and row['C'].startswith('shin')
and row['A'] == 'foo'):
row['D'] = 7
return row
try:
transformed = data.apply(transform, axis=1)
except AttributeError as e:
self.assertEqual(len(e.args), 2)
self.assertEqual(e.args[1], 'occurred at index 4')
self.assertEqual(e.args[0], "'float' object has no attribute 'startswith'")
def test_apply_bug(self):
import datetime
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime.datetime(2013, 1, 1), 'ABC0', 50],
[datetime.datetime(2013, 1, 2), 'YUM0', 20],
[datetime.datetime(2013, 1, 3), 'DEF0', 20],
[datetime.datetime(2013, 1, 4), 'ABC1', 50],
[datetime.datetime(2013, 1, 5), 'YUM1', 20],
[datetime.datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result,expected)
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
assert_frame_equal(df.T, df.swapaxes(0, 1))
assert_frame_equal(df.T, df.swapaxes(1, 0))
assert_frame_equal(df, df.swapaxes(0, 0))
self.assertRaises(ValueError, df.swapaxes, 2, 5)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = DataFrame(np.tile(self.frame.index,
(len(self.frame.columns), 1)).T,
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
def test_apply_multi_index(self):
s = DataFrame([[1,2], [3,4], [5,6]])
s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])
s.columns = ['col1','col2']
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
tm.assertIsInstance(res.index, MultiIndex)
def test_apply_dict(self):
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = pd.Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = pd.Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, reduce=True)
reduce_false = df.apply(fn, reduce=False)
reduce_none = df.apply(fn, reduce=None)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
assert_frame_equal(applied, self.frame * 2)
result = self.frame.applymap(type)
plymap(lambda x: (x, x))
tm.assertIsInstance(result['A'][0], tuple)
df = DataFrame(data=[1,'a'])
result = df.applymap(lambda x: x)
self.assertEqual(result.dtypes[0], object)
df = DataFrame(data=[1.,'a'])
result = df.applymap(lambda x: x)
self.assertEqual(result.dtypes[0], object)
df = DataFrame(np.random.random((3,4)))
df2 = df.copy()
cols = ['a','a','a','a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
assert_frame_equal(result,expected)
df['datetime'] = Timestamp('20130101')
df['timedelta'] = Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime','timedelta']:
self.assertEqual(result.loc[0,f],str(df.loc[0,f]))
def test_filter(self):
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered, expected)
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
filtered = expected.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False) # TODO should reindex check_names?
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_sort_values(self):
# API for 9816
# sort_index
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
frame.sort(columns='A')
with tm.assert_produces_warning(FutureWarning):
frame.sort()
unordered = frame.ix[[3, 2, 4, 1]]
expected = unordered.sort_index()
result = unordered.sort_index(axis=0)
assert_frame_equal(result, expected)
unordered = frame.ix[:, [2, 1, 3, 0]]
expected = unordered.sort_index(axis=1)
result = unordered.sort_index(axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result, expected)
# sortlevel
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level='A', sort_remaining=False)
expected = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(result, expected)
df = df.T
result = df.sort_index(level='A', axis=1, sort_remaining=False)
expected = df.sortlevel('A', axis=1, sort_remaining=False)
assert_frame_equal(result, expected)
# MI sort, but no by
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(sort_remaining=False)
expected = df.sort_index()
assert_frame_equal(result, expected)
def test_sort_index(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
sorted_df = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(sorted_df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
sorted_df = unordered.sort_index(axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(axis=1, ascending=False)
expected = frame.ix[:, ::-1]
assert_frame_equal(sorted_df, expected)
# by column
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# check for now
sorted_df = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected[::-1])
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
sorted_df = frame.sort_values(by=['A', 'B'])
assert_frame_equal(sorted_df, expected[::-1])
self.assertRaises(ValueError, lambda : frame.sort_values(by=['A','B'], axis=2, inplace=True))
msg = 'When sorting by column, axis must be 0'
with assertRaisesRegexp(ValueError, msg):
frame.sort_values(by='A', axis=1)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with assertRaisesRegexp(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_index_categorical_index(self):
df = DataFrame({'A' : np.arange(6,dtype='int64'),
'B' : Series(list('aabbca')).astype('category',categories=list('cab')) }).set_index('B')
result = df.sort_index()
expected = df.iloc[[4,0,1,5,2,3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3,2,5,1,0,4]]
assert_frame_equal(result, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A','B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A','B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A','B'], ascending=[1,0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A','B'], ascending=[0,1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index = [nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index = [6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index = [nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A','B'], ascending=[0,1], na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A','B'], ascending=[0,0], na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_index_multicolumn(self):
import random
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
self.assertNotEqual(a_id, id(df['A']))
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.ix[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_index_duplicates(self):
### with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5,9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_values(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_values(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)]))
with assertRaisesRegexp(ValueError, 'levels'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'levels'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a',1)])
expected = df.sort_values(by=[('a',1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a',1))
result = df.sort_values(by=('a',1))
assert_frame_equal(result, expected)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sortlevel(['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a','a','a','b','c','d','e','f','g'],
columns=['A'],
index=date_range('20130101',periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11','2004-01-21','2004-01-26',
'2005-09-20','2010-10-04','2009-05-12',
'2008-11-12','2010-09-28','2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1,df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1,df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with assertRaisesRegexp(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_combine_first(self):
# disjoint
head, tail = self.frame[:5], self.frame[5:]
combined = head.combine_first(tail)
reordered_frame = self.frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
self.assertTrue(tm.equalContents(combined.columns, self.frame.columns))
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
fcopy = self.frame.copy()
fcopy['A'] = 1
del fcopy['C']
fcopy2 = self.frame.copy()
fcopy2['B'] = 0
del fcopy2['D']
combined = fcopy.combine_first(fcopy2)
self.assertTrue((combined['A'] == 1).all())
assert_series_equal(combined['B'], fcopy['B'])
assert_series_equal(combined['C'], fcopy2['C'])
assert_series_equal(combined['D'], fcopy['D'])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head['A'] = 1
combined = head.combine_first(tail)
self.assertTrue((combined['A'][:10] == 1).all())
# reverse overlap
tail['A'][:10] = 0
combined = tail.combine_first(head)
self.assertTrue((combined['A'][:10] == 0).all())
# no overlap
f = self.frame[:10]
g = self.frame[10:]
combined = f.combine_first(g)
assert_series_equal(combined['A'].reindex(f.index), f['A'])
assert_series_equal(combined['A'].reindex(g.index), g['A'])
# corner cases
comb = self.frame.combine_first(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combine_first(self.frame)
assert_frame_equal(comb, self.frame)
comb = self.frame.combine_first(DataFrame(index=["faz", "boo"]))
self.assertTrue("faz" in comb.index)
# #2525
df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame({}, columns=['b'])
result = df.combine_first(df2)
self.assertTrue('b' in result)
def test_combine_first_mixed_bug(self):
idx = Index(['a', 'b', 'c', 'e'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'e'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1,
"col2": ser2,
"col3": ser3})
idx = Index(['a', 'b', 'c', 'f'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'f'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1,
"col2": ser2,
"col5": ser3})
combined = frame1.combine_first(frame2)
self.assertEqual(len(combined.columns), 5)
# gh 3016 (same as in update)
df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
other = DataFrame([[45,45]],index=[0],columns=['A','B'])
result = df.combine_first(other)
assert_frame_equal(result, df)
df.ix[0,'A'] = np.nan
result = df.combine_first(other)
df.ix[0,'A'] = 45
assert_frame_equal(result, df)
# doc example
df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan],
'B' : [np.nan, 2., 3., np.nan, 6.]})
df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],
'B' : [np.nan, np.nan, 3., 4., 6., 8.]})
result = df1.combine_first(df2)
expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })
assert_frame_equal(result,expected)
# GH3552, return object dtype with bools
df1 = DataFrame([[np.nan, 3.,True], [-4.6, np.nan, True], [np.nan, 7., False]])
df2 = DataFrame([[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])
result = df1.combine_first(df2)[2]
expected = Series([True, True, False], name=2)
assert_series_equal(result, expected)
# GH 3593, converting datetime64[ns] incorrecly
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
df1 = DataFrame({"a":[None, None, None]})
df2 = df1.combine_first(df0)
assert_frame_equal(df2, df0)
df2 = df0.combine_first(df1)
assert_frame_equal(df2, df0)
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
df1 = DataFrame({"a":[datetime(2000, 1, 2), None, None]})
df2 = df1.combine_first(df0)
result = df0.copy()
result.iloc[0,:] = df1.iloc[0,:]
assert_frame_equal(df2, result)
df2 = df0.combine_first(df1)
assert_frame_equal(df2, df0)
def test_update(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame([[1.5, nan, 3],
[3.6, 2, 3],
[1.5, nan, 3],
[1.5, nan, 7.]])
assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
other = DataFrame([[45,45]],index=[0],columns=['A','B'])
df.update(other)
expected = DataFrame([[45.,45.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame([[1.5, nan, 3],
[1.5, 2, 3],
[1.5, nan, 3],
[1.5, nan, 3.]])
assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame([[1.5, nan, 3],
[1.5, nan, 3],
[1.5, nan, 3],
[1.5, nan, 7.]])
assert_frame_equal(df, expected)
def test_update_raise(self):
df = DataFrame([[1.5, 1, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[2., nan],
[nan, 7]], index=[1, 3], columns=[1, 2])
with assertRaisesRegexp(ValueError, "Data overlaps"):
df.update(other, raise_conflict=True)
def test_update_from_non_df(self):
d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])}
df = DataFrame(d)
d['a'] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}
df = DataFrame(d)
d['a'] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
def test_combineAdd(self):
with tm.assert_produces_warning(FutureWarning):
# trivial
comb = self.frame.combineAdd(self.frame)
assert_frame_equal(comb, self.frame * 2)
# more rigorous
a = DataFrame([[1., nan, nan, 2., nan]],
columns=np.arange(5))
b = DataFrame([[2., 3., nan, 2., 6., nan]],
columns=np.arange(6))
expected = DataFrame([[3., 3., nan, 4., 6., nan]],
columns=np.arange(6))
result = a.combineAdd(b)
assert_frame_equal(result, expected)
result2 = a.T.combineAdd(b.T)
assert_frame_equal(result2, expected.T)
expected2 = a.combine(b, operator.add, fill_value=0.)
assert_frame_equal(expected, expected2)
# corner cases
comb = self.frame.combineAdd(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineAdd(self.frame)
assert_frame_equal(comb, self.frame)
# integer corner case
df1 = DataFrame({'x': [5]})
df2 = DataFrame({'x': [1]})
df3 = DataFrame({'x': [6]})
comb = df1.combineAdd(df2)
assert_frame_equal(comb, df3)
# mixed type GH2191
df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})
df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})
rs = df1.combineAdd(df2)
xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})
assert_frame_equal(xp, rs)
# TODO: test integer fill corner?
def test_combineMult(self):
with tm.assert_produces_warning(FutureWarning):
# trivial
comb = self.frame.combineMult(self.frame)
assert_frame_equal(comb, self.frame ** 2)
# corner cases
comb = self.frame.combineMult(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineMult(self.frame)
assert_frame_equal(comb, self.frame)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.ix[:-5, ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
self.assertTrue(combined['D'].isnull().all())
self.assertTrue(combined2['D'].isnull().all())
chunk = combined.ix[:-5, ['A', 'B', 'C']]
chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]
exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
self.assertFalse((capped.values > median).any())
floored = self.frame.clip_lower(median)
self.assertFalse((floored.values < median).any())
double = self.frame.clip(upper=median, lower=median)
self.assertFalse((double.values != median).any())
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000,2))
for lb, ub in [(-1,1),(1,-1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb,ub), max(ub,lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
self.assertTrue((clipped_df.values[lb_mask] == lb).all() == True)
self.assertTrue((clipped_df.values[ub_mask] == ub).all() == True)
self.assertTrue((clipped_df.values[mask] == df.values[mask]).all() == True)
def test_clip_against_series(self):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=0)
for i in range(2):
lb_mask = df.iloc[:, i] <= lb
ub_mask = df.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
assert_series_equal(result, lb[lb_mask], check_names=False)
self.assertEqual(result.name, i)
result = clipped_df.loc[ub_mask, i]
assert_series_equal(result, ub[ub_mask], check_names=False)
self.assertEqual(result.name, i)
assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
def test_clip_against_frame(self):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
assert_frame_equal(clipped_df[mask], df[mask])
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b' : [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
self.assert_numpy_array_equal(df._get_numeric_data().columns,
['a', 'b', 'e'])
def test_is_mixed_type(self):
self.assertFalse(self.frame._is_mixed_type)
self.assertTrue(self.mixed_frame._is_mixed_type)
def test_get_numeric_data(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'f' : Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd' : np.array([1.]*10,dtype='float32'),
'e' : np.array([1]*10,dtype='int32'),
'f' : np.array([1]*10,dtype='int16'),
'g' : Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.ix[:, ['a', 'b','d','e','f']]
assert_frame_equal(result, expected)
only_obj = df.ix[:, ['c','g']]
result = only_obj._get_numeric_data()
expected = df.ix[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict({'a':[1,2], 'b':['foo','bar'],'c':[np.pi,np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a':[1,2], 'c':[np.pi,np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Boolean data and integer data is included in .describe() output, string data isn't
self.assert_numpy_array_equal(df.describe().columns, ['bool_data', 'int_data'])
bool_describe = df.describe()['bool_data']
self.assertEqual(bool_describe['min'].dtype, np.bool_)
self.assertEqual(bool_describe['max'].dtype, np.bool_)
self.assertFalse(bool_describe['min'])
self.assertTrue(bool_describe['max'])
assert_almost_equal(bool_describe['mean'], 0.4)
assert_almost_equal(bool_describe['50%'], 0)
def test_reduce_mixed_frame(self):
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
assert_almost_equal(test.values, [2, 150, 'abcde'])
assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
frame = DataFrame()
ct1 = frame.count(1)
tm.assertIsInstance(ct1, Series)
ct2 = frame.count(0)
tm.assertIsInstance(ct2, Series)
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
assert_series_equal(result, expected)
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False, check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
for meth in methods:
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
if not tm._incompat_bottleneck_version(meth):
assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
assert_frame_equal(cummin, expected)
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
assert_frame_equal(cummin, expected)
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin()
cummin_xs = self.tsframe.cummin(axis=1)
self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))
def test_cummax(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
assert_frame_equal(cummax, expected)
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
assert_frame_equal(cummax, expected)
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax()
cummax_xs = self.tsframe.cummax(axis=1)
self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))
def test_max(self):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_numeric_only_flag(self):
methods = ['sem', 'var', 'std']
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
df1.ix[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
df2.ix[0, 'foo'] = 'a'
for meth in methods:
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
self.assertRaises(TypeError, lambda : getattr(df1, meth)(axis=1, numeric_only=False))
self.assertRaises(TypeError, lambda : getattr(df2, meth)(axis=1, numeric_only=False))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4)/np.sqrt(len(x)))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
assert_series_equal(kurt, kurt2, check_names=False)
self.assertTrue(kurt.name is None)
self.assertEqual(kurt2.name, 'bar')
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True, check_dates=False,
check_less_precise=False):
if frame is None:
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
self.assertIsInstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
self.assertIsInstance(result, Series)
self.assertTrue(len(result))
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if not tm._incompat_bottleneck_version(name):
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
if check_dtype:
lcd_dtype = frame.values.dtype
self.assertEqual(lcd_dtype, result0.dtype)
self.assertEqual(lcd_dtype, result1.dtype)
assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if not tm._incompat_bottleneck_version(name):
self.assertTrue(np.isnan(r0).all())
self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
"B": [10, 10, 10, np.nan, 3, 4],
"C": [8, 8, 8, 9, 9, 9],
"D": np.arange(6,dtype='int64'),
"E": [8, 8, 1, 1, 3, 3]})
assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
expected = pd.Series([], dtype='int64', name='D').to_frame()
assert_frame_equal(df[["D"]].mode(), expected)
expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()
assert_frame_equal(df[["E"]].mode(), expected)
assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
assert_frame_equal(df.mode(),
pd.DataFrame({"A": [12, np.nan, np.nan],
"B": [10, np.nan, np.nan],
"C": [8, 9, np.nan],
"D": [np.nan, np.nan, np.nan],
"E": [1, 3, 8]}))
df["C"] = list(reversed(df["C"]))
com.pprint_thing(df["C"])
com.pprint_thing(df["C"].mode())
a, b = (df[["A", "B", "C"]].mode(),
pd.DataFrame({"A": [12, np.nan],
"B": [10, np.nan],
"C": [8, 9]}))
com.pprint_thing(a)
com.pprint_thing(b)
assert_frame_equal(a, b)
df = pd.DataFrame({"A": np.arange(6,dtype='int64'),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
"B": pd.Series([], dtype=df["B"].dtype),
"C": pd.Series([], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
df.loc[1, "A"] = 0
df.loc[4, "B"] = df.loc[3, "B"]
df.loc[5, "C"] = 'e'
exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
"B": pd.Series([df.loc[3, "B"]], dtype=df["B"].dtype),
"C": pd.Series(['e'], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
tm.assertIsInstance(axis0, Series)
tm.assertIsInstance(axis1, Series)
self.assertEqual(len(axis0), 0)
self.assertEqual(len(axis1), 0)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
self.assertEqual(means['bool'], self.frame['bool'].values.mean())
def test_stats_mixed_type(self):
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.tsframe.quantile(0.9, axis=1)
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1':['A','A','B','B'], 'col2':[1,2,3,4]})
rs = df.quantile(0.5)
xp = df.median()
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=[0.5, 0.75])
assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1])
assert_series_equal(result, expected)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=0)
expected = Series([2., 3.], index=["A", "B"])
assert_series_equal(result, expected)
expected = df.quantile(.5, axis="index")
assert_series_equal(result, expected)
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
self.assertRaises(ValueError, df.quantile, 0.1, axis=-1)
self.assertRaises(ValueError, df.quantile, 0.1, axis="column")
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'])
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1])
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.tsframe.quantile(invalid)
def test_cumsum(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum()
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))
def test_cumprod(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
assert_frame_equal(result, exp)
def test_rank2(self):
from datetime import datetime
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
assert_frame_equal(result, expected)
df = DataFrame({"a":[1e-20, -5, 1e-20+1e-40, 10, 1e60, 1e80, 1e-30]})
exp = DataFrame({"a":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]})
assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
def test_axis_aliases(self):
f = self.frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis='index')
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis='columns')
assert_series_equal(result, expected)
def test_combine_first_mixed(self):
a = Series(['a', 'b'], index=lrange(2))
b = Series(lrange(2), index=lrange(2))
f = DataFrame({'A': a, 'B': b})
a = Series(['a', 'b'], index=lrange(5, 7))
b = Series(lrange(2), index=lrange(5, 7))
g = DataFrame({'A': a, 'B': b})
combined = f.combine_first(g)
def test_more_asMatrix(self):
values = self.mixed_frame.as_matrix()
self.assertEqual(values.shape[1], len(self.mixed_frame.columns))
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[0][1]))
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[1]).all())
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
self.assertIn('foo', reindexed)
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
self.assertNotIn('foo', reindexed)
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
self.assertTrue(reindexed.columns.equals(index))
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
self.assertEqual(smaller['E'].dtype, np.float64)
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
newFrame = self.frame.reindex_axis(cols, axis=1)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(2), lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
self.assertIn('FOO', renamed)
self.assertNotIn('foo', renamed)
def test_fill_corner(self):
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
filled = self.mixed_frame.fillna(value=0)
self.assertTrue((filled.ix[5:20,'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
result = empty_float.fillna(value=0)
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
result = dm.cumsum()
#----------------------------------------------------------------------
# Stacking / unstacking
def test_stack_unstack(self):
stacked = self.frame.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, self.frame)
assert_frame_equal(unstacked_df['bar'], self.frame)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, self.frame)
assert_frame_equal(unstacked_cols_df['bar'].T, self.frame)
def test_stack_ints(self):
df = DataFrame(
np.random.randn(30, 27),
columns=MultiIndex.from_tuples(
list(itertools.product(range(3), repeat=3))
)
)
assert_frame_equal(
df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1)
)
assert_frame_equal(
df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1)
)
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(
df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
self.assertRaises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False )
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
self.assertTrue(isinstance(data, Series))
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64' : 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A','B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64' : 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A','B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64' : 2, 'float64' : 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64' : 2, 'object' : 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a']*5, 'C':c, 'D':d,
'B':pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
self.assertEqual(left.shape, (3, 2))
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with tm.assertRaises(ValueError):
df.unstack('c1')
with tm.assertRaises(ValueError):
df.T.stack('c1')
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notnull().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
self.assertEqual(left, right)
df = DataFrame({'jim':['a', 'b', nan, 'd'],
'joe':['w', 'x', 'y', 'z'],
'jolie':['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
self.assertEqual(udf.notnull().values.sum(), len(df))
verify(udf['jolie'])
df = DataFrame({'1st':['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd':['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd':[67,39,53,72,57,80,31,18,11,30,59,
50,62,59,76,52,14,53,60,51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
self.assertEqual(udf.notnull().values.sum(), 2 * len(df))
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame({'A': list('aaaabbbb'),'B':range(8), 'C':range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B':list(range(4))*2,
'C':range(8)})
df.iloc[2,1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'),'B':list(range(4))*2,
'C':range(8)})
df.iloc[3,1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C':np.arange(10),
'B':date_range('2012-01-01', periods=5).tolist()*2 })
df.iloc[3,1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id','dosage','agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.ix[17264:].copy().set_index(['s_id','dosage','agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st':[1, 2, 1, 2, 1, 2],
'2nd':pd.date_range('2014-02-01', periods=6, freq='D'),
'jim':100 + np.arange(6),
'joe':(np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
self.assertEqual(left.notnull().values.sum(), 2 * len(df))
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
self.assertEqual(r[col], left.loc[key])
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_repr_with_mi_nat(self):
df = DataFrame({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
res = repr(df)
exp = ' X\nNaT a 1\n2013-01-01 b 2'
nose.tools.assert_equal(res, exp)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
assert_almost_equal(values, deleveled[name])
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
self.assert_numpy_array_equal(deleveled['first'],
deleveled2['level_0'])
self.assert_numpy_array_equal(deleveled['second'],
deleveled2['level_1'])
# default name assigned
rdf = self.frame.reset_index()
self.assert_numpy_array_equal(rdf['index'], self.frame.index.values)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
self.assert_numpy_array_equal(rdf['level_0'], self.frame.index.values)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
self.assert_numpy_array_equal(deleveled['index'],
self.frame.index.values)
self.assert_numpy_array_equal(deleveled.index,
np.arange(len(deleveled)))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
self.assertEqual(resetted.columns.name, 'columns')
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
assert_frame_equal(rs, self.frame, check_names=False) # TODO should reset_index check_names ?
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
resetted = df.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
#----------------------------------------------------------------------
# Tests to cope with refactored internals
def test_as_matrix_numeric_cols(self):
self.frame['foo'] = 'bar'
values = self.frame.as_matrix(['A', 'B', 'C', 'D'])
self.assertEqual(values.dtype, np.float64)
def test_as_matrix_lcd(self):
# mixed lcd
values = self.mixed_float.as_matrix(['A', 'B', 'C', 'D'])
self.assertEqual(values.dtype, np.float64)
values = self.mixed_float.as_matrix(['A', 'B', 'C' ])
self.assertEqual(values.dtype, np.float32)
values = self.mixed_float.as_matrix(['C'])
self.assertEqual(values.dtype, np.float16)
values = self.mixed_int.as_matrix(['A','B','C','D'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A','D'])
self.assertEqual(values.dtype, np.int64)
# guess all ints are cast to uints....
values = self.mixed_int.as_matrix(['A','B','C'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A','C'])
self.assertEqual(values.dtype, np.int32)
values = self.mixed_int.as_matrix(['C','D'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A'])
self.assertEqual(values.dtype, np.int32)
values = self.mixed_int.as_matrix(['C'])
self.assertEqual(values.dtype, np.uint8)
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A' : [2**63-1] })
result = df['A']
expected = Series(np.asarray([2**63-1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [2**63] })
result = df['A']
expected = Series(np.asarray([2**63], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [datetime(2005, 1, 1), True] })
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [None, 1] })
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0, 2] })
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, 3] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, 3.0] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, True] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0, None] })
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, None] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [2.0, 1, True, None] })
result = df['A']
expected = Series(np.asarray([2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A' : [2.0, 1, datetime(2006, 1, 1), None] })
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({ 'datetime64[ns]' : 3 })
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
self.assertEqual(self.mixed_frame['datetime'].dtype, 'M8[ns]')
self.assertEqual(self.mixed_frame['timedelta'].dtype, 'm8[ns]')
result = self.mixed_frame.get_dtype_counts().sort_values()
expected = Series({ 'float64' : 4,
'object' : 1,
'datetime64[ns]' : 1,
'timedelta64[ns]' : 1}).sort_values()
assert_series_equal(result,expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1,2,3],dtype='timedelta64[s]')
s = Series(arr)
expected = Series(timedelta_range('00:00:01',periods=3,freq='s'))
assert_series_equal(s,expected)
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A' : timedelta_range('00:00:01',periods=3,freq='s')},
index=range(3))
assert_frame_equal(df,expected)
# convert from a numpy array of non-ns datetime64
#### note that creating a numpy datetime64 is in LOCAL time!!!!
#### seems to work for M8[D], but not for M8[s]
s = Series(np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]'))
assert_series_equal(s,Series(date_range('20130101',periods=3,freq='D')))
#s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
#assert_series_equal(s,date_range('20130101 00:00:01',period=3,freq='s'))
expected = DataFrame({
'dt1' : Timestamp('20130101'),
'dt2' : date_range('20130101',periods=3),
#'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
},index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]')
#df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
self.assertTrue((cop['A'] == 5).all())
self.assertFalse((self.frame['A'] == 5).all())
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
self.assertTrue((df.values[5] == 5).all())
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
self.assertFalse((df.values[6] == 6).all())
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
self.assertFalse((series['A'] == 5).all())
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return DataFrame(data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)),
columns=["A", "B", "C"], dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A","datetime64[h]"), ("B","str"), ("C","int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not is_platform_windows():
f('M8[ns]')
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'], check_names=False)
assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1,2]], columns=['a','a'])
df.columns = ['a','a.1']
str(df)
expected = DataFrame([[1,2]], columns=['a','a.1'])
assert_frame_equal(df, expected)
df = DataFrame([[1,2,3]], columns=['b','a','a'])
df.columns = ['b','a','a.1']
str(df)
expected = DataFrame([[1,2,3]], columns=['b','a','a.1'])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1,2]], columns=['a','a'])
df.columns = ['b','b']
str(df)
expected = DataFrame([[1,2]], columns=['b','b'])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c'])
df.columns = list('ABCDEFG')
str(df)
expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG'))
assert_frame_equal(df, expected)
# this is an error because we cannot disambiguate the dup columns
self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a']))
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3),dtype='float64')
df_int = DataFrame(np.random.randn(10, 3),dtype='int64')
df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)
df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
self.assertEqual(len(df._data._blknos), len(df.columns))
self.assertEqual(len(df._data._blklocs), len(df.columns))
# testing iget
for i in range(len(df.columns)):
df.iloc[:,i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.], [2, -2, 3.]]
rs = DataFrame(vals, columns=['A', 'A', 'B'])
xp = DataFrame(vals)
xp.columns = ['A', 'A', 'B']
assert_frame_equal(rs, xp)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1,1.1],[2, 2.2]],columns=['a','b'])
assert_frame_equal(result,expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3,1,1.1],[2.3,2, 2.2]],columns=['c','a','b'])
assert_frame_equal(result,expected)
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
expected = DataFrame(self.frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(self.frame._data, dtype=np.int32)
expected = DataFrame(self.frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self):
self.frame['E'] = 7.
consolidated = self.frame.consolidate()
self.assertEqual(len(consolidated._data.blocks), 1)
# Ensure copy, do I want this?
recons = consolidated.consolidate()
self.assertIsNot(recons, consolidated)
assert_frame_equal(recons, consolidated)
self.frame['F'] = 8.
self.assertEqual(len(self.frame._data.blocks), 3)
self.frame.consolidate(inplace=True)
self.assertEqual(len(self.frame._data.blocks), 1)
def test_consolidate_inplace(self):
frame = self.frame.copy()
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
self.frame[chr(letter)] = chr(letter)
def test_as_matrix_consolidate(self):
self.frame['E'] = 7.
self.assertFalse(self.frame._data.is_consolidated())
_ = self.frame.as_matrix()
self.assertTrue(self.frame._data.is_consolidated())
def test_modify_values(self):
self.frame.values[5] = 5
self.assertTrue((self.frame.values[5] == 5).all())
# unconsolidated
self.frame['E'] = 7.
self.frame.values[6] = 6
self.assertTrue((self.frame.values[6] == 6).all())
def test_boolean_set_uncons(self):
self.frame['E'] = 7.
expected = self.frame.values.copy()
expected[expected > 1] = 2
self.frame[self.frame > 1] = 2
assert_almost_equal(expected, self.frame.values)
def test_xs_view(self):
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
self.assertTrue((dm.xs(2) == 10).all())
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A','B','C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with assertRaisesRegexp(ValueError, 'Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame(
{long(0): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987, 49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2>0.3] = 1
expected = df.copy()
expected.loc[40,1] = 1
expected.loc[49,1] = 1
expected.loc[50,1] = 1
expected.loc[35,4] = 1
assert_frame_equal(df2,expected)
df['foo'] = 'test'
with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):
df[df > 0.3] = 1
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isnull(df)
self.assertEqual(bools.sum(axis=1)[0], 10)
def test_fillna_col_reordering(self):
idx = lrange(20)
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
self.assertEqual(df.columns.tolist(), filled.columns.tolist())
def test_take(self):
# homogeneous
#----------------------------------------
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# neg indicies
order = [2,1,-1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
self.assertRaises(IndexError, df.take, [3,1,2,30], axis=0)
self.assertRaises(IndexError, df.take, [3,1,2,-31], axis=0)
self.assertRaises(IndexError, df.take, [3,1,2,5], axis=1)
self.assertRaises(IndexError, df.take, [3,1,2,-5], axis=1)
# mixed-dtype
#----------------------------------------
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# neg indicies
order = [4,1,-2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float,self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_iterkv_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
self.mixed_float.iterkv()
def test_iterkv_names(self):
for k, v in compat.iteritems(self.mixed_frame):
self.assertEqual(v.name, k)
def test_series_put_names(self):
series = self.mixed_frame._series
for k, v in compat.iteritems(series):
self.assertEqual(v.name, k)
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
result = a.dot(b1['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
# can pass correct-length arrays
row = a.ix[0].values
result = a.dot(row)
exp = a.dot(a.ix[0])
assert_series_equal(result, exp)
with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
B = DataFrame(b)
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)
def test_idxmin(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmin, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmax, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmax, axis=2)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment',None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum()
exp = Y['g'].sum()
self.assertTrue(isnull(Y['g']['c']))
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
result = df.ix[IndexType("foo", "bar")]["A"]
self.assertEqual(result, 1)
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
self.assertFalse(df.empty)
df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
self.assertTrue(df.empty)
self.assertTrue(df.T.empty)
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
df = DataFrame(randn(10, 4)) > 0
df.any(1)
df.all(1)
df.any(1, bool_only=True)
df.all(1, bool_only=True)
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper))
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper))
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
self.assertRaises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail:
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
self.assertFalse(r0.any())
self.assertFalse(r1.any())
else:
self.assertTrue(r0.all())
self.assertTrue(r1.all())
def test_strange_column_corruption_issue(self):
df = DataFrame(index=[0, 1])
df[0] = nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if not col in wasCol:
wasCol[col] = 1
df[col] = nan
df[col][dt] = i
myid = 100
first = len(df.ix[isnull(df[myid]), [myid]])
second = len(df.ix[isnull(df[myid]), [myid]])
self.assertTrue(first == second == 0)
def test_inplace_return_self(self):
# re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
'c': [1, 2, 3, 4]})
def _check_f(base, f):
result = f(base)
self.assertTrue(result is None)
# -----DataFrame-----
# set_index
f = lambda x: x.set_index('a', inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index('a'), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# sortlevel
f = lambda x: x.sortlevel(0, inplace=True)
_check_f(data.set_index(['a', 'b']), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index('a')['c'], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(d.copy(), f)
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
assert_frame_equal(result, expected)
def test_isin_empty(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
result = df.isin([])
expected = pd.DataFrame(False, df.index, df.columns)
assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
#GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with tm.assertRaises(TypeError):
df.isin('a')
with tm.assertRaises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
assert_frame_equal(result, expected)
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with tm.assertRaises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with tm.assertRaises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with tm.assertRaises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A','A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
assert_frame_equal(result, expected)
def test_to_csv_date_format(self):
from pandas import to_datetime
pname = '__tmp_to_csv_date_format__'
with ensure_clean(pname) as path:
for engine in [None, 'python']:
w = FutureWarning if engine == 'python' else None
dt_index = self.tsframe.index
datetime_frame = DataFrame({'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
with tm.assert_produces_warning(w, check_stacklevel=False):
datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(lambda x: int(x.strftime('%Y%m%d')))
datetime_frame_int.index = datetime_frame_int.index.map(lambda x: int(x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_int)
with tm.assert_produces_warning(w, check_stacklevel=False):
datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(lambda x: x.strftime('%Y-%m-%d'))
datetime_frame_str.index = datetime_frame_str.index.map(lambda x: x.strftime('%Y-%m-%d'))
assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
with tm.assert_produces_warning(w, check_stacklevel=False):
datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(lambda x: int(x.strftime('%Y%m%d')))
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(lambda x: x.strftime('%Y%m%d'))
assert_frame_equal(test, datetime_frame_columns)
nat_index = to_datetime(['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
with tm.assert_produces_warning(w, check_stacklevel=False):
nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
test = read_csv(path, parse_dates=[0, 1], index_col=0)
assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with ensure_clean('csv_date_format_with_dst') as path:
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times+pd.Timedelta('10s')]:
time_range = np.array(range(len(i)), dtype='int64')
df = DataFrame({'A' : time_range}, index=i)
df.to_csv(path,index=True)
result = read_csv(path,index_col=0)
result.index = pd.to_datetime(result.index).tz_localize('UTC').tz_convert('Europe/London')
assert_frame_equal(result,df)
idx = pd.date_range('2015-01-01', '2015-12-31', freq = 'H', tz='Europe/Paris')
df = DataFrame({'values' : 1, 'idx' : idx},
index=idx)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_csv(path,index=True)
result = read_csv(path,index_col=0)
result.index = pd.to_datetime(result.index).tz_localize('UTC').tz_convert('Europe/Paris')
result['idx'] = pd.to_datetime(result['idx']).astype('datetime64[ns, Europe/Paris]')
assert_frame_equal(result,df)
df.astype(str)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_pickle(path)
result = pd.read_pickle(path)
assert_frame_equal(result,df)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df['a'] = df['a'].astype(np.bool_)
df['b'] = df['b'].astype(np.int32)
df['c'] = df['c'].astype(np.float64)
result = pd.concat([df, df])
self.assertEqual(result['a'].dtype, np.bool_)
self.assertEqual(result['b'].dtype, np.int32)
self.assertEqual(result['c'].dtype, np.float64)
result = pd.concat([df, df.astype(np.float64)])
self.assertEqual(result['a'].dtype, np.object_)
self.assertEqual(result['b'].dtype, np.float64)
self.assertEqual(result['c'].dtype, np.float64)
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1,2,3])
assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list("abc")))
assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list("abc")))
assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list("abc")))
odict = OrderedDict
df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])
assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)])))
assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')])))
assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)])))
assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')])))
def test_dtypes_are_correct_after_column_slice(self):
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_), ('b', np.float_),
('c', np.float_),])))
assert_series_equal(df.iloc[:,2:].dtypes,
pd.Series(odict([('c', np.float_)])))
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_), ('b', np.float_),
('c', np.float_),])))
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
self.assertEqual(df.set_index(df.index).index.names, ['name'])
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])
self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
self.assertTrue(isinstance(df.set_index([df.index, df.index]).index, MultiIndex))
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_select_dtypes_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc'))})
ri = df.select_dtypes(include=[np.number])
ei = df[['b', 'c', 'd']]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number,'category'])
ei = df[['b', 'c', 'd', 'f']]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_exclude(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True]})
re = df.select_dtypes(exclude=[np.number])
ee = df[['a', 'e']]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
exclude = np.datetime64,
include = np.bool_, 'integer'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'c', 'e']]
tm.assert_frame_equal(r, e)
exclude = 'datetime',
include = 'bool', 'int64', 'int32'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'e']]
tm.assert_frame_equal(r, e)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
df['g'] = df.f.diff()
assert not hasattr(np, 'u8')
r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])
e = df[['a', 'b']]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])
e = df[['a', 'b', 'g']]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(ValueError, 'at least one of include or '
'exclude must be nonempty'):
df.select_dtypes()
def test_select_dtypes_raises_on_string(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(include='object')
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(exclude='object')
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(include=int, exclude='object')
def test_select_dtypes_bad_datetime64(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):
df.select_dtypes(include=['datetime64[D]'])
with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):
df.select_dtypes(exclude=['datetime64[as]'])
def test_select_dtypes_str_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
string_dtypes = set((str, 'str', np.string_, 'S1',
'unicode', np.unicode_, 'U1'))
try:
string_dtypes.add(unicode)
except NameError:
pass
for dt in string_dtypes:
with tm.assertRaisesRegexp(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(include=[dt])
with tm.assertRaisesRegexp(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(exclude=[dt])
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'):
df.select_dtypes(['blargy, blarg, blarg'])
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with tm.assertRaises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
with tm.assertRaises(KeyError):
df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])
with tm.assertRaises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
def test_dataframe_metadata(self):
df = SubclassedDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]},
index=['a', 'b', 'c'])
df.testattr = 'XXX'
self.assertEqual(df.testattr, 'XXX')
self.assertEqual(df[['X']].testattr, 'XXX')
self.assertEqual(df.loc[['a', 'b'], :].testattr, 'XXX')
self.assertEqual(df.iloc[[0, 1], :].testattr, 'XXX')
# GH9776
self.assertEqual(df.iloc[0:1, :].testattr, 'XXX')
# GH10553
unpickled = self.round_trip_pickle(df)
assert_frame_equal(df, unpickled)
self.assertEqual(df._metadata, unpickled._metadata)
self.assertEqual(df.testattr, unpickled.testattr)
def test_nlargest(self):
# GH10393
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nlargest(5, 'a')
expected = df.sort_values('a', ascending=False).head(5)
tm.assert_frame_equal(result, expected)
def test_nlargest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nlargest(5, ['a', 'b'])
expected = df.sort_values(['a', 'b'], ascending=False).head(5)
tm.assert_frame_equal(result, expected)
def test_nsmallest(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nsmallest(5, 'a')
expected = df.sort_values('a').head(5)
tm.assert_frame_equal(result, expected)
def test_nsmallest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nsmallest(5, ['a', 'c'])
expected = df.sort_values(['a', 'c']).head(5)
tm.assert_frame_equal(result, expected)
def test_to_panel_expanddim(self):
# GH 9762
class SubclassedFrame(DataFrame):
@property
def _constructor_expanddim(self):
return SubclassedPanel
class SubclassedPanel(Panel):
pass
index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)])
df = SubclassedFrame({'X':[1, 2, 3], 'Y': [4, 5, 6]}, index=index)
result = df.to_panel()
self.assertTrue(isinstance(result, SubclassedPanel))
expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]],
items=['X', 'Y'], major_axis=[0],
minor_axis=[0, 1, 2],
dtype='int64')
tm.assert_panel_equal(result, expected)
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
raise nose.SkipTest("cannot query engine numexpr when numexpr not "
"installed")
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
raise nose.SkipTest("cannot evaluate with parser {0!r}".format(parser))
class TestDataFrameQueryWithMultiIndex(object):
def check_query_with_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = tm.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_named_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_named_multiindex, parser, engine
def check_query_with_unnamed_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = tm.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
#### LEVEL 1 ####
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_unnamed_multiindex, parser, engine
def check_query_with_partially_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_with_partially_named_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_partially_named_multiindex, parser, engine
def test_query_multiindex_get_index_resolvers(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_multiindex_get_index_resolvers, parser, engine
def check_query_multiindex_get_index_resolvers(self, parser, engine):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
def test_raise_on_panel_with_multiindex(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_raise_on_panel_with_multiindex, parser, engine
def check_raise_on_panel_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with tm.assertRaises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
def test_raise_on_panel4d_with_multiindex(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_raise_on_panel4d_with_multiindex, parser, engine
def check_raise_on_panel4d_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p4d = tm.makePanel4D(7)
p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)
with tm.assertRaises(NotImplementedError):
pd.eval('p4d + 1', parser=parser, engine=engine)
class TestDataFrameQueryNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne(cls.engine)
@classmethod
def tearDownClass(cls):
super(TestDataFrameQueryNumExprPandas, cls).tearDownClass()
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine, parser=parser)
expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
ops = '==', '!=', '<', '>', '<=', '>='
for op in ops:
with tm.assertRaises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with tm.assertRaises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
tm.assert_frame_equal(res, expected)
# no local variable c
with tm.assertRaises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with tm.assertRaises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
# we don't pick up the local 'sin'
with tm.assertRaises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assertRaisesRegexp(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
with tm.assertRaises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assertRaisesRegexp(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
tm.assert_frame_equal(result, expected)
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
tm.assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
cls.frame = _frame.copy()
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with tm.assertRaises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
x = 1
result = pd.eval('x + 1', engine=engine, parser=parser)
self.assertEqual(result, 2)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
with tm.assertRaises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with tm.assertRaises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
tm.assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = _frame.copy()
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryPythonPython, cls).setUpClass()
cls.engine = cls.parser = 'python'
cls.frame = _frame.copy()
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
PARSERS = 'python', 'pandas'
ENGINES = 'python', 'numexpr'
class TestDataFrameQueryStrings(object):
def check_str_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
assertRaises(NotImplementedError, df.query, ex, engine=engine,
parser=parser, local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_query_method(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_str_query_method, parser, engine
def test_str_list_query_method(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_str_list_query_method, parser, engine
def check_str_list_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with tm.assertRaises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def check_query_with_string_columns(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with assertRaises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with assertRaises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_query_with_string_columns(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_string_columns, parser, engine
def check_object_array_eq_ne(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_object_array_eq_ne(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_object_array_eq_ne, parser, engine
def check_query_with_nested_strings(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
from pandas.compat import StringIO
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
tm.assert_frame_equal(expected, res)
def test_query_with_nested_string(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_nested_strings, parser, engine
def check_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
tm.skip_if_no_ne(engine)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
tm.assert_frame_equal(res, expec)
def test_query_with_nested_special_character(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_nested_special_character, parser, engine
def check_query_lex_compare_strings(self, parser, engine):
tm.skip_if_no_ne(engine=engine)
import operator as opr
a = Series(tm.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_lex_compare_strings(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_lex_compare_strings, parser, engine
def check_query_single_element_booleans(self, parser, engine):
tm.skip_if_no_ne(engine)
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_single_element_booleans, parser, engine
def check_query_string_scalar_variable(self, parser, engine):
tm.skip_if_no_ne(engine)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US'
r = df.query('Symbol == @symb', parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
def test_query_string_scalar_variable(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_string_scalar_variable, parser, engine
class TestDataFrameEvalNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne()
def setUp(self):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def tearDown(self):
del self.frame
def test_simple_expr(self):
res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self):
res = self.frame.eval('a[a < 1] + b', engine=self.engine,
parser=self.parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
def test_invalid_type_for_operator_raises(self):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
ops = '+', '-', '*', '/'
for op in ops:
with tm.assertRaisesRegexp(TypeError,
"unsupported operand type\(s\) for "
".+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=self.engine,
parser=self.parser)
class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalPythonPython, cls).tearDownClass()
cls.engine = cls.parser = 'python'
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| true | true |
79001ee3b002d0859ab2fbc5f5b221a54e51390f | 87,210 | py | Python | datalad/utils.py | AKSoo/datalad | dbc34478980c808a86b5531316c986abac953e37 | [
"MIT"
] | null | null | null | datalad/utils.py | AKSoo/datalad | dbc34478980c808a86b5531316c986abac953e37 | [
"MIT"
] | null | null | null | datalad/utils.py | AKSoo/datalad | dbc34478980c808a86b5531316c986abac953e37 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
| 32.859834 | 123 | 0.616432 |
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
return stream.isatty()
except ValueError as exc:
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
def escape_filename(filename):
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
def encode_filename(filename):
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
def decode_input(s):
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
if on_windows:
def lmtime(filepath, mtime):
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
def ensure_tuple_or_list(obj):
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
if not isinstance(s, bytes):
return s
if encoding is None:
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
return not first
def map_items(func, v):
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1,
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4
) // (maxl + 3)
)
e_chunks = generate_chunks(files, chunk_size)
return file_chunks
def saved_generator(gen):
saved = []
def gen1():
for x in gen:
saved.append(x)
yield x
def gen2():
for x in saved:
yield x
return gen1(), gen2()
better_wraps = wraps
def optional_args(decorator):
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
if tkwargs is None:
tkwargs_ = {}
else:
tkwargs_ = tkwargs.copy()
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
yield
@contextmanager
def swallow_outputs():
class StringIOAdapter(object):
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
lgr = logging.getLogger(name)
old_level = lgr.level
old_handlers = lgr.handlers
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
class NullFilter(logging.Filter):
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
_sys_excepthook = sys.excepthook
def setup_exceptionhook(ipython=False):
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
def getpwd():
global _pwd_mode
if _pwd_mode is None:
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError:
pass
def _path_(*p):
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
def get_logfilename(dspath, cmd='datalad'):
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
continue
if trace and cand_super != trace[-1]:
continue
if not trace and cand_super != start:
continue
if cand_sub == end:
return trace
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
path = normpath(op.join(path, os.pardir))
apath = abspath(path)
if altered and exists(op.join(altered, suffix)):
return altered
return None
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
def open_r_encdetect(fname, readahead=1000):
from chardet import detect
import io
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
import csv
if dialect is None:
with open(fname) as tsvfile:
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
def __init__(self, name, executable=False):
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
| true | true |
79001ee9162781fa713e5a90e03281765088a3a3 | 1,976 | py | Python | backend/apps/iamstudent/models_persistent_filter.py | match4healthcare/match4healthcare | acf69e3b781d715f0a947c2a9df6646e94f1ca6b | [
"MIT"
] | 2 | 2020-03-28T13:56:39.000Z | 2020-03-29T10:16:12.000Z | backend/apps/iamstudent/models_persistent_filter.py | match4healthcare/match4healthcare | acf69e3b781d715f0a947c2a9df6646e94f1ca6b | [
"MIT"
] | 76 | 2020-03-27T21:53:04.000Z | 2020-03-30T20:27:43.000Z | backend/apps/iamstudent/models_persistent_filter.py | match4healthcare/match4healthcare | acf69e3b781d715f0a947c2a9df6646e94f1ca6b | [
"MIT"
] | null | null | null | from datetime import datetime
import uuid
from django.db import models
import django.forms as forms
import django_filters.fields as filter_fields
from apps.ineedstudent.models import Hospital
from .filters import StudentJobRequirementsFilter
from .models import * # noqa: F401, F403
from .models import COUNTRY_CODE_CHOICES
class LocationFilterModel(models.Model):
plz = models.CharField(max_length=5, null=True)
distance = models.IntegerField(default=0)
countrycode = models.CharField(max_length=2, choices=COUNTRY_CODE_CHOICES, default="DE",)
uuid = models.CharField(max_length=100, blank=True, unique=True, default=uuid.uuid4)
class StudentListFilterModel(models.Model):
hospital = models.ForeignKey(Hospital, on_delete=models.CASCADE)
location = LocationFilterModel
uuid = models.CharField(max_length=100, blank=True, unique=True, default=uuid.uuid4)
registration_date = models.DateTimeField(default=datetime.now, blank=True, null=True)
name = models.CharField(max_length=100)
jrf = StudentJobRequirementsFilter()
for f_name, jr_filter in jrf.base_filters.items():
if type(jr_filter.field) == forms.NullBooleanField:
StudentListFilterModel.add_to_class(
f_name, models.NullBooleanField(default=None, null=True)
)
elif type(jr_filter.field) == forms.DecimalField:
StudentListFilterModel.add_to_class(f_name, models.IntegerField(default=0))
elif type(jr_filter.field) == filter_fields.ChoiceField:
StudentListFilterModel.add_to_class(
f_name, models.IntegerField(default=0, choices=jr_filter.field.choices)
)
elif type(jr_filter.field) == forms.DateField:
StudentListFilterModel.add_to_class(
f_name, models.DateField(null=True, default=datetime.now)
)
else:
raise ValueError(
"I do not know what to do with field type '%s' for '%s'"
% (type(jr_filter.field), f_name)
)
| 34.666667 | 93 | 0.730263 | from datetime import datetime
import uuid
from django.db import models
import django.forms as forms
import django_filters.fields as filter_fields
from apps.ineedstudent.models import Hospital
from .filters import StudentJobRequirementsFilter
from .models import *
from .models import COUNTRY_CODE_CHOICES
class LocationFilterModel(models.Model):
plz = models.CharField(max_length=5, null=True)
distance = models.IntegerField(default=0)
countrycode = models.CharField(max_length=2, choices=COUNTRY_CODE_CHOICES, default="DE",)
uuid = models.CharField(max_length=100, blank=True, unique=True, default=uuid.uuid4)
class StudentListFilterModel(models.Model):
hospital = models.ForeignKey(Hospital, on_delete=models.CASCADE)
location = LocationFilterModel
uuid = models.CharField(max_length=100, blank=True, unique=True, default=uuid.uuid4)
registration_date = models.DateTimeField(default=datetime.now, blank=True, null=True)
name = models.CharField(max_length=100)
jrf = StudentJobRequirementsFilter()
for f_name, jr_filter in jrf.base_filters.items():
if type(jr_filter.field) == forms.NullBooleanField:
StudentListFilterModel.add_to_class(
f_name, models.NullBooleanField(default=None, null=True)
)
elif type(jr_filter.field) == forms.DecimalField:
StudentListFilterModel.add_to_class(f_name, models.IntegerField(default=0))
elif type(jr_filter.field) == filter_fields.ChoiceField:
StudentListFilterModel.add_to_class(
f_name, models.IntegerField(default=0, choices=jr_filter.field.choices)
)
elif type(jr_filter.field) == forms.DateField:
StudentListFilterModel.add_to_class(
f_name, models.DateField(null=True, default=datetime.now)
)
else:
raise ValueError(
"I do not know what to do with field type '%s' for '%s'"
% (type(jr_filter.field), f_name)
)
| true | true |
79001f0f7183f3342e680bc8f8702a157a912fce | 4,512 | py | Python | tests/portfolio_projects/forms_test.py | Dafov/portfolio | fb3cb3721b944624c092d6046b0d9b005b7d9019 | [
"MIT"
] | null | null | null | tests/portfolio_projects/forms_test.py | Dafov/portfolio | fb3cb3721b944624c092d6046b0d9b005b7d9019 | [
"MIT"
] | null | null | null | tests/portfolio_projects/forms_test.py | Dafov/portfolio | fb3cb3721b944624c092d6046b0d9b005b7d9019 | [
"MIT"
] | null | null | null | import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolio.settings')
django.setup()
import base64
import tempfile
from django.test import TestCase, override_settings
from portfolio.portfolio_projects.forms import CommentForm, ProjectForm
from django.core.files.uploadedfile import InMemoryUploadedFile
from io import BytesIO
class TestForms(TestCase):
def test_comment_form_valid_data(self):
form = CommentForm({
'text': 'Text',
})
self.assertTrue(form.is_valid())
def test_comment_form_has_no_data(self):
form = CommentForm({
'text': '',
})
self.assertFalse(form.is_valid())
def test_project_form_has_no_data(self):
form = ProjectForm({})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 4)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_project_form_valid_data(self):
image = InMemoryUploadedFile(
BytesIO(base64.b64decode(TEST_IMAGE)),
field_name='tempfile',
name='tempfile.png',
content_type='image/png',
size=len(TEST_IMAGE),
charset='utf-8',
)
form = ProjectForm({
'title': 'Title1',
'description': 'Description1',
'link': 'https://www.google.com/',
}, {
'image': image,
})
self.assertTrue(form.is_valid())
TEST_IMAGE = '''
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAAACXBI
WXMAAABIAAAASABGyWs+AAAACXZwQWcAAAAQAAAAEABcxq3DAAABfElEQVQ4y52TvUuCURTGf5Zg
9goR9AVlUZJ9KURuUkhIUEPQUIubRFtIJTk0NTkUFfgntAUt0eBSQwRKRFSYBYFl1GAt901eUYuw
QTLM1yLPds/zPD/uPYereYjHcwD+tQ3+Uys+LwCah3g851la/lf4qwKb61Sn3z5WFUWpCHB+GUGb
SCRIpVKqBkmSAMrqsViMqnIiwLx7HO/U+6+30GYyaVXBP1uHrfUAWvWMWiF4+qoOUJLJkubYcDs2
S03hvODSE7564ek5W+Kt+tloa9ax6v4OZ++jZO+jbM+pD7oE4HM1lX1vYNGoDhCyQMiCGacRm0Vf
EM+uiudjke6YcRoLfiELNB2dXTkAa08LPlcT2fpJAMxWZ1H4NnKITuwD4Nl6RMgCAE1DY3PuyyQZ
JLrNvZhMJgCmJwYB2A1eAHASDiFkQUr5Xn0RoJLSDg7ZCB0fVRQ29/TmP1Nf/0BFgL2dQH4LN9dR
7CMOaiXDn6FayYB9xMHeTgCz1cknd+WC3VgTorUAAAAldEVYdGNyZWF0ZS1kYXRlADIwMTAtMTIt
MjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2RpZnktZGF0ZQAyMDEwLTEyLTI2VDE0OjQ5
OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggolQTkcNChoKAAAADUlIRFIAAAAQAAAAEAgGAAAAH/P/
YQAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAAAl2cEFnAAAAEAAAABAA
XMatwwAAAhdJREFUOMuVk81LVFEYxn/3zocfqVebUbCyTLyYRYwD0cemCIRyUVToLloERUFBbYpo
E7WIFv0TLaP6C2Y17oYWWQxRMwo5OUplkR/XOefMuW8LNYyZLB94eOE5L79zzns4johIPp/n+YtX
fPn6jaq1bKaI65LY3sHohXOk02mcNxMT8vjJU5TWbEUN8Ti3bl4n0tLW/qBcniW0ltBaxFrsWl3P
7IZ8PdNa82m6RPTDxyLGmLq7JDuaqVQCllbqn6I4OUU0CJYJw7BmMR6LcPvyURbLGR49q/71KlGj
dV3AlbEhBnog3mo5e8Tycrz+cKPamBrAiUOdnD/ZhlFziKpw7RS8LVry01IDcI3WbHRXu8OdS524
pgx6BlkJEKW4PxrSFP2z12iNq1UFrTVaaxDNw6vttDXMg/2O2AXC5UUkWKI7vsDdM+Z3X9Ws2tXG
YLTCaMWNMY8DfREAFpcUkzPC1JzL8kKAGM3xvoDD+1uJVX+ilEIptTpECUP8PXEGB/rIzw/iNPXj
de1jML0Xay3l6QKfZyewP95x8dhr7r0HpSoAODt7dktoQ0SEpsZGent78f1+fN/H9/sxxlAoFCkU
CxQKRUqlEkppXNddBXTv2CXrtH/JofYVoqnUQbLZ8f/+A85aFWAolYJcLiee50ksFtuSm7e1SCaT
EUREcrmcnB4ZkWQyKZ7nbepEIiHDw8OSzWZFROQX6PpZFxAtS8IAAAAldEVYdGNyZWF0ZS1kYXRl
ADIwMTAtMTItMjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2RpZnktZGF0ZQAyMDEwLTEy
LTI2VDE0OjQ5OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggolQTkcNChoKAAAADUlIRFIAAAAQAAAA
EAgGAAAAH/P/YQAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAAAl2cEFn
AAAAEAAAABAAXMatwwAAAo9JREFUOMuNks1rVGcUxn/ve+9kUuOdfIzamNHEMK3RVILQQAuCWURo
rSAtbsV20T/EP6O7FtxkkYWQKK7F4Kb1C6yoSVrNdDIm1YTMjDP3vfc9p4ubZEYopQceDhwOD89z
zmO89/rw0SNu3b5D5a8q3gv7ZXa7dkY2sIwMf8w3X3/F9PTnhL/+9oCff7nBeq2GMYb/U5sbm1TX
a8TOEQwMHbq+vLKKqqIiiAh+r3tBvKBds72der1OtVolfP78BWmadmnNVKgqI0cOkiRtNrc9Zt9H
x9fK6iphs/keVflAoqpSHOzjh+8maL59yk83WzRa8G8OwzRxiHQIFOjJBXw7O8b0qV50K2H1tWf+
riCiHRbNFIUucYgoZu/Yqlz44iiXzh3EpJuE0uLKl57lNc/93wVjOyYyApeguwpElTOf9HH1YkSU
e0O72cC/b1DMK9/PGP5c97zaUGwXg01cjHMxcRwz0Cf8ePkAJ47U0eRvSLehtYM06pw+1OTauZje
wBG7mCTJEDqX3eCjvOXqxQGmTwXUmwlxmmdrpw+z0ybiHXnbYqasvDgbcGPJEvvsHKFzDp96Tgz3
cvjwMM/efsaBwZP0D39KabKEpgnbG3/wrvaU5psnHD/6mMF8jcqWwRgwpWOjKiLkQkOhv5+xsTLl
cpnR0WOUSiVEhLVKhbXXa7xcXqHyaoV6o0Hqd1MxUjqu7XYLMFkaNXtXYC09+R5UwbkYEcVaizFm
P/LWGsLJydMs3VvCWkP3gzxK7OKu7Bl81/tEhKmpKVhYWNCJiQkNglDDMKdhLpf1/0AQhDo+Pq5z
c3NKmqa6uLios7MXtFgsahRFGhUKHUS7KBQ0iiIdGhrS8+dndH5+XpMk0X8AMTVx/inpU4cAAAAl
dEVYdGNyZWF0ZS1kYXRlADIwMTAtMTItMjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2Rp
ZnktZGF0ZQAyMDEwLTEyLTI2VDE0OjQ5OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggg==
'''.strip() | 45.12 | 76 | 0.843085 | import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolio.settings')
django.setup()
import base64
import tempfile
from django.test import TestCase, override_settings
from portfolio.portfolio_projects.forms import CommentForm, ProjectForm
from django.core.files.uploadedfile import InMemoryUploadedFile
from io import BytesIO
class TestForms(TestCase):
def test_comment_form_valid_data(self):
form = CommentForm({
'text': 'Text',
})
self.assertTrue(form.is_valid())
def test_comment_form_has_no_data(self):
form = CommentForm({
'text': '',
})
self.assertFalse(form.is_valid())
def test_project_form_has_no_data(self):
form = ProjectForm({})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 4)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_project_form_valid_data(self):
image = InMemoryUploadedFile(
BytesIO(base64.b64decode(TEST_IMAGE)),
field_name='tempfile',
name='tempfile.png',
content_type='image/png',
size=len(TEST_IMAGE),
charset='utf-8',
)
form = ProjectForm({
'title': 'Title1',
'description': 'Description1',
'link': 'https://www.google.com/',
}, {
'image': image,
})
self.assertTrue(form.is_valid())
TEST_IMAGE = '''
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAAACXBI
WXMAAABIAAAASABGyWs+AAAACXZwQWcAAAAQAAAAEABcxq3DAAABfElEQVQ4y52TvUuCURTGf5Zg
9goR9AVlUZJ9KURuUkhIUEPQUIubRFtIJTk0NTkUFfgntAUt0eBSQwRKRFSYBYFl1GAt901eUYuw
QTLM1yLPds/zPD/uPYereYjHcwD+tQ3+Uys+LwCah3g851la/lf4qwKb61Sn3z5WFUWpCHB+GUGb
SCRIpVKqBkmSAMrqsViMqnIiwLx7HO/U+6+30GYyaVXBP1uHrfUAWvWMWiF4+qoOUJLJkubYcDs2
S03hvODSE7564ek5W+Kt+tloa9ax6v4OZ++jZO+jbM+pD7oE4HM1lX1vYNGoDhCyQMiCGacRm0Vf
EM+uiudjke6YcRoLfiELNB2dXTkAa08LPlcT2fpJAMxWZ1H4NnKITuwD4Nl6RMgCAE1DY3PuyyQZ
JLrNvZhMJgCmJwYB2A1eAHASDiFkQUr5Xn0RoJLSDg7ZCB0fVRQ29/TmP1Nf/0BFgL2dQH4LN9dR
7CMOaiXDn6FayYB9xMHeTgCz1cknd+WC3VgTorUAAAAldEVYdGNyZWF0ZS1kYXRlADIwMTAtMTIt
MjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2RpZnktZGF0ZQAyMDEwLTEyLTI2VDE0OjQ5
OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggolQTkcNChoKAAAADUlIRFIAAAAQAAAAEAgGAAAAH/P/
YQAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAAAl2cEFnAAAAEAAAABAA
XMatwwAAAhdJREFUOMuVk81LVFEYxn/3zocfqVebUbCyTLyYRYwD0cemCIRyUVToLloERUFBbYpo
E7WIFv0TLaP6C2Y17oYWWQxRMwo5OUplkR/XOefMuW8LNYyZLB94eOE5L79zzns4johIPp/n+YtX
fPn6jaq1bKaI65LY3sHohXOk02mcNxMT8vjJU5TWbEUN8Ti3bl4n0tLW/qBcniW0ltBaxFrsWl3P
7IZ8PdNa82m6RPTDxyLGmLq7JDuaqVQCllbqn6I4OUU0CJYJw7BmMR6LcPvyURbLGR49q/71KlGj
dV3AlbEhBnog3mo5e8Tycrz+cKPamBrAiUOdnD/ZhlFziKpw7RS8LVry01IDcI3WbHRXu8OdS524
pgx6BlkJEKW4PxrSFP2z12iNq1UFrTVaaxDNw6vttDXMg/2O2AXC5UUkWKI7vsDdM+Z3X9Ws2tXG
YLTCaMWNMY8DfREAFpcUkzPC1JzL8kKAGM3xvoDD+1uJVX+ilEIptTpECUP8PXEGB/rIzw/iNPXj
de1jML0Xay3l6QKfZyewP95x8dhr7r0HpSoAODt7dktoQ0SEpsZGent78f1+fN/H9/sxxlAoFCkU
CxQKRUqlEkppXNddBXTv2CXrtH/JofYVoqnUQbLZ8f/+A85aFWAolYJcLiee50ksFtuSm7e1SCaT
EUREcrmcnB4ZkWQyKZ7nbepEIiHDw8OSzWZFROQX6PpZFxAtS8IAAAAldEVYdGNyZWF0ZS1kYXRl
ADIwMTAtMTItMjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2RpZnktZGF0ZQAyMDEwLTEy
LTI2VDE0OjQ5OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggolQTkcNChoKAAAADUlIRFIAAAAQAAAA
EAgGAAAAH/P/YQAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAAAl2cEFn
AAAAEAAAABAAXMatwwAAAo9JREFUOMuNks1rVGcUxn/ve+9kUuOdfIzamNHEMK3RVILQQAuCWURo
rSAtbsV20T/EP6O7FtxkkYWQKK7F4Kb1C6yoSVrNdDIm1YTMjDP3vfc9p4ubZEYopQceDhwOD89z
zmO89/rw0SNu3b5D5a8q3gv7ZXa7dkY2sIwMf8w3X3/F9PTnhL/+9oCff7nBeq2GMYb/U5sbm1TX
a8TOEQwMHbq+vLKKqqIiiAh+r3tBvKBds72der1OtVolfP78BWmadmnNVKgqI0cOkiRtNrc9Zt9H
x9fK6iphs/keVflAoqpSHOzjh+8maL59yk83WzRa8G8OwzRxiHQIFOjJBXw7O8b0qV50K2H1tWf+
riCiHRbNFIUucYgoZu/Yqlz44iiXzh3EpJuE0uLKl57lNc/93wVjOyYyApeguwpElTOf9HH1YkSU
e0O72cC/b1DMK9/PGP5c97zaUGwXg01cjHMxcRwz0Cf8ePkAJ47U0eRvSLehtYM06pw+1OTauZje
wBG7mCTJEDqX3eCjvOXqxQGmTwXUmwlxmmdrpw+z0ybiHXnbYqasvDgbcGPJEvvsHKFzDp96Tgz3
cvjwMM/efsaBwZP0D39KabKEpgnbG3/wrvaU5psnHD/6mMF8jcqWwRgwpWOjKiLkQkOhv5+xsTLl
cpnR0WOUSiVEhLVKhbXXa7xcXqHyaoV6o0Hqd1MxUjqu7XYLMFkaNXtXYC09+R5UwbkYEcVaizFm
P/LWGsLJydMs3VvCWkP3gzxK7OKu7Bl81/tEhKmpKVhYWNCJiQkNglDDMKdhLpf1/0AQhDo+Pq5z
c3NKmqa6uLios7MXtFgsahRFGhUKHUS7KBQ0iiIdGhrS8+dndH5+XpMk0X8AMTVx/inpU4cAAAAl
dEVYdGNyZWF0ZS1kYXRlADIwMTAtMTItMjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2Rp
ZnktZGF0ZQAyMDEwLTEyLTI2VDE0OjQ5OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggg==
'''.strip() | true | true |
79001f43e8974311a07a16a2259f3c36834226bf | 1,590 | py | Python | mindspore/ops/composite/multitype_ops/logical_and_impl.py | i4oolish/mindspore | dac3be31d0f2c0a3516200f47af30980e566601b | [
"Apache-2.0"
] | 2 | 2020-08-12T16:14:40.000Z | 2020-12-04T03:05:57.000Z | mindspore/ops/composite/multitype_ops/logical_and_impl.py | dilingsong/mindspore | 4276050f2494cfbf8682560a1647576f859991e8 | [
"Apache-2.0"
] | null | null | null | mindspore/ops/composite/multitype_ops/logical_and_impl.py | dilingsong/mindspore | 4276050f2494cfbf8682560a1647576f859991e8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""logical_and_impl"""
from mindspore.ops.composite import base
from mindspore.ops import functional as F
# logical_and is a metagraph object which will generate function according to input type
# using ".register" decorator
logical_and = base.MultitypeFuncGraph("logical_and")
@logical_and.register("Number", "Number")
def _logical_and_scala(x, y):
"""
Return logical and operation result of x and y.
Args:
x(Number): Number.
y(Number): Number.
Returns:
bool, Return logical and operation result of x and y.
"""
return F.bool_and(x.__bool__(), y.__bool__())
@logical_and.register("Tensor", "Tensor")
def _logical_and_tensor(x, y):
"""
Return logical and operation result of x and y.
Args:
x(Tensor): Tensor.
y(Tensor): Tensor.
Returns:
Tensor, Return logical and operation result of x and y.
"""
return F.logical_and(x, y)
| 30 | 88 | 0.67673 |
from mindspore.ops.composite import base
from mindspore.ops import functional as F
logical_and = base.MultitypeFuncGraph("logical_and")
@logical_and.register("Number", "Number")
def _logical_and_scala(x, y):
return F.bool_and(x.__bool__(), y.__bool__())
@logical_and.register("Tensor", "Tensor")
def _logical_and_tensor(x, y):
return F.logical_and(x, y)
| true | true |
79001f58d9b23dc3df6f1923d4452781045576f8 | 1,507 | py | Python | finance/tutorial/tester.py | leonsariel/python | dd68c21a02417341031b40c945152a61be12e3eb | [
"MIT"
] | 1 | 2018-04-09T14:09:21.000Z | 2018-04-09T14:09:21.000Z | finance/tutorial/tester.py | leonsariel/python | dd68c21a02417341031b40c945152a61be12e3eb | [
"MIT"
] | null | null | null | finance/tutorial/tester.py | leonsariel/python | dd68c21a02417341031b40c945152a61be12e3eb | [
"MIT"
] | null | null | null | # _*_ coding: utf-8 _*_
__author__ = 'Di Meng'
__date__ = '1/3/2018 10:16 PM'
# _*_ coding: utf-8 _*_
__author__ = 'Di Meng'
__date__ = '1/3/2018 9:26 PM'
from tutorial.feature_functions import *
import pandas as pd
import plotly as py
import json
from plotly import tools
import plotly.graph_objs as go
#loading our data
df = pd.read_csv('EURUSD_hours.csv')
df.columns = ['date','open','high','low','close','volume']
df.date = pd.to_datetime(df.date,format='%d.%m.%Y %H:%M:%S.%f')
df = df.set_index(df.date)
df = df[['open','high','low','close','volume']]
df.drop_duplicates(keep=False)
df = df.iloc[:500]
#moving average
ma = df.close.rolling(center=False, window=30).mean()
# detrended = detrend(df, method='difference')
# f = fourier(df, [10, 15],method='difference')
#HA
# HAresults = candles(df, [1])
# HA = HAresults.candles[1]
#wad
results = wadl(df, [15])
line = results.wadl[15]
print(line['close'])
# draw grarphs
trace = go.Ohlc(x=df.index, open=df.open, high=df.high, low=df.low, close=df.close, name='Currency Quote')
trace1 = go.Scatter(x=df.index, y=ma)
trace2 = go.Scatter(x=df.index, y=(line.close.to_json()))
# linear detrand plot
# trace2 = go.Scatter(x=df.index, y=detrended)
# difference detrand plot
# trace2 = go.Scatter(x=df.index, y=detrended)
data = [trace, trace1, trace2]
fig = tools.make_subplots(rows=2,cols=1,shared_xaxes=True)
fig.append_trace(trace,1,1)
fig.append_trace(trace1,1,1)
fig.append_trace(trace2,2,1)
py.offline.plot(fig, filename="test.html") | 23.546875 | 106 | 0.696085 |
__author__ = 'Di Meng'
__date__ = '1/3/2018 10:16 PM'
__author__ = 'Di Meng'
__date__ = '1/3/2018 9:26 PM'
from tutorial.feature_functions import *
import pandas as pd
import plotly as py
import json
from plotly import tools
import plotly.graph_objs as go
df = pd.read_csv('EURUSD_hours.csv')
df.columns = ['date','open','high','low','close','volume']
df.date = pd.to_datetime(df.date,format='%d.%m.%Y %H:%M:%S.%f')
df = df.set_index(df.date)
df = df[['open','high','low','close','volume']]
df.drop_duplicates(keep=False)
df = df.iloc[:500]
ma = df.close.rolling(center=False, window=30).mean()
results = wadl(df, [15])
line = results.wadl[15]
print(line['close'])
trace = go.Ohlc(x=df.index, open=df.open, high=df.high, low=df.low, close=df.close, name='Currency Quote')
trace1 = go.Scatter(x=df.index, y=ma)
trace2 = go.Scatter(x=df.index, y=(line.close.to_json()))
data = [trace, trace1, trace2]
fig = tools.make_subplots(rows=2,cols=1,shared_xaxes=True)
fig.append_trace(trace,1,1)
fig.append_trace(trace1,1,1)
fig.append_trace(trace2,2,1)
py.offline.plot(fig, filename="test.html") | true | true |
79001f5f5314c74f84ae6dab8896fab3cf5ff8cc | 5,021 | py | Python | openstack_dashboard/test/integration_tests/basewebobject.py | jeff-phillips-18/horizon | bb02c0685625eb85bdf116ac118d3aa5b18bc5d0 | [
"Apache-2.0"
] | 3 | 2015-04-24T22:39:12.000Z | 2021-03-29T15:38:53.000Z | openstack_dashboard/test/integration_tests/basewebobject.py | jeff-phillips-18/horizon | bb02c0685625eb85bdf116ac118d3aa5b18bc5d0 | [
"Apache-2.0"
] | 1 | 2021-03-21T11:48:09.000Z | 2021-03-21T11:48:09.000Z | openstack_dashboard/test/integration_tests/basewebobject.py | jeff-phillips-18/horizon | bb02c0685625eb85bdf116ac118d3aa5b18bc5d0 | [
"Apache-2.0"
] | 1 | 2016-05-20T17:58:21.000Z | 2016-05-20T17:58:21.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import selenium.common.exceptions as Exceptions
from selenium.webdriver.common import by
import selenium.webdriver.support.ui as Support
from selenium.webdriver.support import wait
class BaseWebObject(unittest.TestCase):
"""Base class for all web objects."""
_spinner_locator = (by.By.CSS_SELECTOR, '.modal-body > .spinner')
def __init__(self, driver, conf):
self.driver = driver
self.conf = conf
self.explicit_wait = self.conf.selenium.explicit_wait
def _is_element_present(self, *locator):
try:
self._turn_off_implicit_wait()
self._get_element(*locator)
return True
except Exceptions.NoSuchElementException:
return False
finally:
self._turn_on_implicit_wait()
def _is_element_visible(self, *locator):
try:
return self._get_element(*locator).is_displayed()
except (Exceptions.NoSuchElementException,
Exceptions.ElementNotVisibleException):
return False
def _is_element_displayed(self, element):
try:
return element.is_displayed()
except Exception:
return False
def _is_text_visible(self, element, text, strict=True):
try:
if strict:
return element.text == text
else:
return text in element.text
except Exception:
return False
def _get_element(self, *locator):
return self.driver.find_element(*locator)
def _get_elements(self, *locator):
return self.driver.find_elements(*locator)
def _fill_field_element(self, data, field_element):
field_element.clear()
field_element.send_keys(data)
return field_element
def _select_dropdown(self, value, element):
select = Support.Select(element)
select.select_by_visible_text(value)
def _select_dropdown_by_value(self, value, element):
select = Support.Select(element)
select.select_by_value(value)
def _turn_off_implicit_wait(self):
self.driver.implicitly_wait(0)
def _turn_on_implicit_wait(self):
self.driver.implicitly_wait(self.conf.selenium.page_timeout)
def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
"""Wait until the value returned by predicate is not False or
the timeout is elapsed.
'predicate' takes the driver as argument.
"""
if not timeout:
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout, poll_frequency).until(
predicate)
def _wait_till_text_present_in_element(self, element, text, timeout=None):
"""Waiting for a text to appear in a certain element very often is
actually waiting for a _different_ element with a different text to
appear in place of an old element. So a way to avoid capturing stale
element reference should be provided for this use case.
Better to wrap getting entity status cell in a lambda
to avoid problems with cell being replaced with totally different
element by Javascript
"""
def predicate(_):
elt = element() if hasattr(element, '__call__') else element
return self._is_text_visible(elt, text)
self._wait_until(predicate, timeout)
def _wait_till_element_visible(self, element, timeout=None):
self._wait_until(lambda x: self._is_element_displayed(element),
timeout)
def _wait_till_element_disappears(self, element, timeout=None):
self._wait_until(lambda x: not self._is_element_displayed(element),
timeout)
def wait_till_element_disappears(self, element_getter):
try:
self._turn_off_implicit_wait()
self._wait_till_element_disappears(element_getter())
except Exceptions.NoSuchElementException:
# NOTE(mpavlase): This is valid state. When request completes
# even before Selenium get a chance to get the spinner element,
# it will raise the NoSuchElementException exception.
pass
finally:
self._turn_on_implicit_wait()
def wait_till_spinner_disappears(self):
getter = lambda: self.driver.find_element(*self._spinner_locator)
self.wait_till_element_disappears(getter)
| 37.192593 | 78 | 0.671779 |
import unittest
import selenium.common.exceptions as Exceptions
from selenium.webdriver.common import by
import selenium.webdriver.support.ui as Support
from selenium.webdriver.support import wait
class BaseWebObject(unittest.TestCase):
_spinner_locator = (by.By.CSS_SELECTOR, '.modal-body > .spinner')
def __init__(self, driver, conf):
self.driver = driver
self.conf = conf
self.explicit_wait = self.conf.selenium.explicit_wait
def _is_element_present(self, *locator):
try:
self._turn_off_implicit_wait()
self._get_element(*locator)
return True
except Exceptions.NoSuchElementException:
return False
finally:
self._turn_on_implicit_wait()
def _is_element_visible(self, *locator):
try:
return self._get_element(*locator).is_displayed()
except (Exceptions.NoSuchElementException,
Exceptions.ElementNotVisibleException):
return False
def _is_element_displayed(self, element):
try:
return element.is_displayed()
except Exception:
return False
def _is_text_visible(self, element, text, strict=True):
try:
if strict:
return element.text == text
else:
return text in element.text
except Exception:
return False
def _get_element(self, *locator):
return self.driver.find_element(*locator)
def _get_elements(self, *locator):
return self.driver.find_elements(*locator)
def _fill_field_element(self, data, field_element):
field_element.clear()
field_element.send_keys(data)
return field_element
def _select_dropdown(self, value, element):
select = Support.Select(element)
select.select_by_visible_text(value)
def _select_dropdown_by_value(self, value, element):
select = Support.Select(element)
select.select_by_value(value)
def _turn_off_implicit_wait(self):
self.driver.implicitly_wait(0)
def _turn_on_implicit_wait(self):
self.driver.implicitly_wait(self.conf.selenium.page_timeout)
def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
if not timeout:
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout, poll_frequency).until(
predicate)
def _wait_till_text_present_in_element(self, element, text, timeout=None):
def predicate(_):
elt = element() if hasattr(element, '__call__') else element
return self._is_text_visible(elt, text)
self._wait_until(predicate, timeout)
def _wait_till_element_visible(self, element, timeout=None):
self._wait_until(lambda x: self._is_element_displayed(element),
timeout)
def _wait_till_element_disappears(self, element, timeout=None):
self._wait_until(lambda x: not self._is_element_displayed(element),
timeout)
def wait_till_element_disappears(self, element_getter):
try:
self._turn_off_implicit_wait()
self._wait_till_element_disappears(element_getter())
except Exceptions.NoSuchElementException:
pass
finally:
self._turn_on_implicit_wait()
def wait_till_spinner_disappears(self):
getter = lambda: self.driver.find_element(*self._spinner_locator)
self.wait_till_element_disappears(getter)
| true | true |
79001f7dcfb3d77af87da142647f53e78e22f2ef | 1,184 | py | Python | app/controllers/web/forgot_password.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | app/controllers/web/forgot_password.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | app/controllers/web/forgot_password.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | """
Forgot Password Web Controller
"""
# Standard Library
import os
# Third Party Library
from django.views import View
from django.shortcuts import render
from django.utils.translation import gettext as _
# Local Library
from app.modules.core.context import Context
from app.modules.entity.option_entity import OptionEntity
from app.modules.core.decorators import redirect_if_authenticated
from app.modules.core.decorators import redirect_if_not_installed
class ForgotPassword(View):
template_name = 'templates/forgot_password.html'
__context = None
__option_entity = None
__correlation_id = None
@redirect_if_not_installed
@redirect_if_authenticated
def get(self, request):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__context = Context()
self.__option_entity = OptionEntity()
self.__context.autoload_options()
self.__context.push({
"page_title": _("Forgot Password · %s") % self.__context.get("app_name", os.getenv("APP_NAME", "Silverback"))
})
return render(request, self.template_name, self.__context.get())
| 28.878049 | 121 | 0.734797 |
import os
from django.views import View
from django.shortcuts import render
from django.utils.translation import gettext as _
from app.modules.core.context import Context
from app.modules.entity.option_entity import OptionEntity
from app.modules.core.decorators import redirect_if_authenticated
from app.modules.core.decorators import redirect_if_not_installed
class ForgotPassword(View):
template_name = 'templates/forgot_password.html'
__context = None
__option_entity = None
__correlation_id = None
@redirect_if_not_installed
@redirect_if_authenticated
def get(self, request):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__context = Context()
self.__option_entity = OptionEntity()
self.__context.autoload_options()
self.__context.push({
"page_title": _("Forgot Password · %s") % self.__context.get("app_name", os.getenv("APP_NAME", "Silverback"))
})
return render(request, self.template_name, self.__context.get())
| true | true |
7900204d85c3f10b0d2af408f72500bec2531473 | 526 | py | Python | openslides_backend/presenter/initial_data.py | ThomasJunk/openslides-backend | 798ed65d1490bf93ed3bd870cfc6f2a8c6f47986 | [
"MIT"
] | null | null | null | openslides_backend/presenter/initial_data.py | ThomasJunk/openslides-backend | 798ed65d1490bf93ed3bd870cfc6f2a8c6f47986 | [
"MIT"
] | null | null | null | openslides_backend/presenter/initial_data.py | ThomasJunk/openslides-backend | 798ed65d1490bf93ed3bd870cfc6f2a8c6f47986 | [
"MIT"
] | null | null | null | from typing import Any, Dict
from .base import Presenter
from .presenter import register_presenter
@register_presenter("initial-data")
class InitialData(Presenter):
"""
Initial data for setup
"""
@property
def data(self) -> Dict[Any, Any]:
return {
"privacy_policy": "The PP",
"legal_notice": "The LN",
"theme": "openslides-default",
"logo_web_header_path": None,
"login_info_text": None,
"saml_settings": None,
}
| 22.869565 | 42 | 0.587452 | from typing import Any, Dict
from .base import Presenter
from .presenter import register_presenter
@register_presenter("initial-data")
class InitialData(Presenter):
@property
def data(self) -> Dict[Any, Any]:
return {
"privacy_policy": "The PP",
"legal_notice": "The LN",
"theme": "openslides-default",
"logo_web_header_path": None,
"login_info_text": None,
"saml_settings": None,
}
| true | true |
790020a6fea96543b32a88bfb06cd00b72445702 | 3,264 | py | Python | djangocms_googlemap/migrations/0001_initial.py | yakky/djangocms-googlemap | 4f5f00fafc5d530e0a2854e20dc4a372006cab38 | [
"BSD-3-Clause"
] | null | null | null | djangocms_googlemap/migrations/0001_initial.py | yakky/djangocms-googlemap | 4f5f00fafc5d530e0a2854e20dc4a372006cab38 | [
"BSD-3-Clause"
] | null | null | null | djangocms_googlemap/migrations/0001_initial.py | yakky/djangocms-googlemap | 4f5f00fafc5d530e0a2854e20dc4a372006cab38 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='GoogleMap',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True)),
('title', models.CharField(verbose_name='map title', blank=True, null=True, max_length=100)),
('address', models.CharField(verbose_name='address', max_length=150)),
('zipcode', models.CharField(verbose_name='zip code', max_length=30)),
('city', models.CharField(verbose_name='city', max_length=100)),
('content', models.CharField(help_text='Displayed under address in the bubble.', blank=True, max_length=255, verbose_name='additional content')),
('zoom', models.PositiveSmallIntegerField(verbose_name='zoom level', default=13, choices=[(0, '0'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'), (13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'), (19, '19'), (20, '20'), (21, '21')])),
('lat', models.DecimalField(help_text='Use latitude & longitude to fine tune the map position.', blank=True, max_digits=10, verbose_name='latitude', null=True, decimal_places=6)),
('lng', models.DecimalField(max_digits=10, verbose_name='longitude', blank=True, null=True, decimal_places=6)),
('route_planer_title', models.CharField(verbose_name='route planer title', blank=True, null=True, max_length=150, default='Calculate your fastest way to here')),
('route_planer', models.BooleanField(verbose_name='route planer', default=False)),
('width', models.CharField(help_text='Plugin width (in pixels or percent).', default='100%', max_length=6, verbose_name='width')),
('height', models.CharField(help_text='Plugin height (in pixels).', default='400px', max_length=6, verbose_name='height')),
('info_window', models.BooleanField(help_text='Show textbox over marker', default=True, verbose_name='info window')),
('scrollwheel', models.BooleanField(help_text='Enable scrollwheel zooming on the map', default=True, verbose_name='scrollwheel')),
('double_click_zoom', models.BooleanField(verbose_name='double click zoom', default=True)),
('draggable', models.BooleanField(verbose_name='draggable', default=True)),
('keyboard_shortcuts', models.BooleanField(verbose_name='keyboard shortcuts', default=True)),
('pan_control', models.BooleanField(verbose_name='Pan control', default=True)),
('zoom_control', models.BooleanField(verbose_name='zoom control', default=True)),
('street_view_control', models.BooleanField(verbose_name='Street View control', default=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| 72.533333 | 352 | 0.61826 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='GoogleMap',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True)),
('title', models.CharField(verbose_name='map title', blank=True, null=True, max_length=100)),
('address', models.CharField(verbose_name='address', max_length=150)),
('zipcode', models.CharField(verbose_name='zip code', max_length=30)),
('city', models.CharField(verbose_name='city', max_length=100)),
('content', models.CharField(help_text='Displayed under address in the bubble.', blank=True, max_length=255, verbose_name='additional content')),
('zoom', models.PositiveSmallIntegerField(verbose_name='zoom level', default=13, choices=[(0, '0'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'), (13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'), (19, '19'), (20, '20'), (21, '21')])),
('lat', models.DecimalField(help_text='Use latitude & longitude to fine tune the map position.', blank=True, max_digits=10, verbose_name='latitude', null=True, decimal_places=6)),
('lng', models.DecimalField(max_digits=10, verbose_name='longitude', blank=True, null=True, decimal_places=6)),
('route_planer_title', models.CharField(verbose_name='route planer title', blank=True, null=True, max_length=150, default='Calculate your fastest way to here')),
('route_planer', models.BooleanField(verbose_name='route planer', default=False)),
('width', models.CharField(help_text='Plugin width (in pixels or percent).', default='100%', max_length=6, verbose_name='width')),
('height', models.CharField(help_text='Plugin height (in pixels).', default='400px', max_length=6, verbose_name='height')),
('info_window', models.BooleanField(help_text='Show textbox over marker', default=True, verbose_name='info window')),
('scrollwheel', models.BooleanField(help_text='Enable scrollwheel zooming on the map', default=True, verbose_name='scrollwheel')),
('double_click_zoom', models.BooleanField(verbose_name='double click zoom', default=True)),
('draggable', models.BooleanField(verbose_name='draggable', default=True)),
('keyboard_shortcuts', models.BooleanField(verbose_name='keyboard shortcuts', default=True)),
('pan_control', models.BooleanField(verbose_name='Pan control', default=True)),
('zoom_control', models.BooleanField(verbose_name='zoom control', default=True)),
('street_view_control', models.BooleanField(verbose_name='Street View control', default=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| true | true |
790020ebff21a4ba915a47c2c6964eea09063b89 | 1,447 | py | Python | ansible/environments/stage/dynamic_inventory.py | Otus-DevOps-2020-08/ValeriyTyutyunnik_infra | 3ac66b3945ff477c6616c085d993bb3641a2bb91 | [
"MIT"
] | null | null | null | ansible/environments/stage/dynamic_inventory.py | Otus-DevOps-2020-08/ValeriyTyutyunnik_infra | 3ac66b3945ff477c6616c085d993bb3641a2bb91 | [
"MIT"
] | null | null | null | ansible/environments/stage/dynamic_inventory.py | Otus-DevOps-2020-08/ValeriyTyutyunnik_infra | 3ac66b3945ff477c6616c085d993bb3641a2bb91 | [
"MIT"
] | 1 | 2020-10-06T12:58:58.000Z | 2020-10-06T12:58:58.000Z | #!/usr/bin/python
import argparse
import subprocess
import json
parser = argparse.ArgumentParser()
parser.add_argument("--list", action="store_true")
args = parser.parse_args()
result = {"_meta": {"hostvars": {}}}
if args.list:
output = subprocess.check_output([
"cd ../terraform/stage; terraform show -json"
], shell=True)
data = json.loads(output)
group_list = set()
try:
for module in data["values"]["root_module"]["child_modules"]:
try:
for resource in module["resources"]:
if resource["type"] == "null_resource":
continue
group_name = resource["name"]
values = resource["values"]
host_name = values["name"]
ip = values["network_interface"][0]["nat_ip_address"]
if group_name not in result:
result[group_name] = {"hosts": []}
group_list.add(group_name)
result[group_name]["hosts"].append(host_name)
result["_meta"]["hostvars"][host_name] = {
"ansible_host": ip
}
except KeyError:
continue
result["all"] = {"children": list(group_list), "hosts": [], "vars": {}}
except KeyError:
pass
print(json.dumps(result))
else:
print(json.dumps(result))
| 28.94 | 79 | 0.520387 |
import argparse
import subprocess
import json
parser = argparse.ArgumentParser()
parser.add_argument("--list", action="store_true")
args = parser.parse_args()
result = {"_meta": {"hostvars": {}}}
if args.list:
output = subprocess.check_output([
"cd ../terraform/stage; terraform show -json"
], shell=True)
data = json.loads(output)
group_list = set()
try:
for module in data["values"]["root_module"]["child_modules"]:
try:
for resource in module["resources"]:
if resource["type"] == "null_resource":
continue
group_name = resource["name"]
values = resource["values"]
host_name = values["name"]
ip = values["network_interface"][0]["nat_ip_address"]
if group_name not in result:
result[group_name] = {"hosts": []}
group_list.add(group_name)
result[group_name]["hosts"].append(host_name)
result["_meta"]["hostvars"][host_name] = {
"ansible_host": ip
}
except KeyError:
continue
result["all"] = {"children": list(group_list), "hosts": [], "vars": {}}
except KeyError:
pass
print(json.dumps(result))
else:
print(json.dumps(result))
| true | true |
790022389c29b0dee0eef5e56249aaeb3e94eb3e | 268 | py | Python | 2017.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 6 | 2021-04-13T00:33:43.000Z | 2022-02-10T10:23:59.000Z | 2017.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | null | null | null | 2017.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 3 | 2021-03-23T18:42:24.000Z | 2022-02-10T10:24:07.000Z | def dif(x, y):
q = 0
for i in range(len(x)):
if x[i] != y[i]: q += 1
return q
e = str(input())
n = int(input())
v = []
for i in range(5): v.append(dif(e, str(input())))
if min(v) > n: print(-1)
else:
print(v.index(min(v))+1)
print(min(v))
| 17.866667 | 49 | 0.492537 | def dif(x, y):
q = 0
for i in range(len(x)):
if x[i] != y[i]: q += 1
return q
e = str(input())
n = int(input())
v = []
for i in range(5): v.append(dif(e, str(input())))
if min(v) > n: print(-1)
else:
print(v.index(min(v))+1)
print(min(v))
| true | true |
7900228e863e3c503aef331b7855c37ee856cb02 | 10,416 | py | Python | tests/test_admin.py | minervaproject/django-gdpr-assist | 2c498c1faee5f57a7e493aa912c33466184bb6cf | [
"BSD-3-Clause"
] | null | null | null | tests/test_admin.py | minervaproject/django-gdpr-assist | 2c498c1faee5f57a7e493aa912c33466184bb6cf | [
"BSD-3-Clause"
] | 3 | 2020-07-15T11:45:35.000Z | 2020-09-22T16:05:39.000Z | tests/test_admin.py | minervaproject/django-gdpr-assist | 2c498c1faee5f57a7e493aa912c33466184bb6cf | [
"BSD-3-Clause"
] | 2 | 2020-03-04T13:07:54.000Z | 2020-09-07T13:04:02.000Z | """
Test admin tools
"""
from io import BytesIO, TextIOWrapper
import csv
import six
import zipfile
import django
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.test import Client, TestCase
import gdpr_assist
from .gdpr_assist_tests_app.factories import (
ModelWithPrivacyMetaFactory,
FirstSearchModelFactory,
SecondSearchModelFactory,
)
from .gdpr_assist_tests_app.models import (
FirstSearchModel,
SecondSearchModel,
)
model_root_url = '/admin/gdpr_assist_tests_app/modelwithprivacymeta/'
tool_root_url = '/admin/gdpr_assist/personaldata/'
class AdminTestCase(TestCase):
def setUp(self):
self.client = Client()
User = get_user_model()
user = User.objects.create_superuser(
username='test',
email='test@example.com',
password='test',
)
if django.VERSION <= (1, 9):
# Django 1.8 support - no client.force_login
self.client.login(username='test', password='test')
else:
# Django 1.9+
self.client.force_login(user)
class TestModelAdmin(AdminTestCase):
def test_changelist__anonymise_action_present(self):
ModelWithPrivacyMetaFactory.create()
response = self.client.get(model_root_url)
self.assertContains(response, '<option value="anonymise">')
def test_anonymise_action_submit__redirect_to_anonymise_view(self):
obj_1 = ModelWithPrivacyMetaFactory.create()
obj_2 = ModelWithPrivacyMetaFactory.create()
response = self.client.post(
model_root_url,
{
'action': 'anonymise',
'_selected_action': [obj_1.pk, obj_2.pk],
},
follow=True,
)
test_url = '{root_url}anonymise/?ids={pk1},{pk2}'.format(
root_url=model_root_url,
pk1=obj_1.pk,
pk2=obj_2.pk,
)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(
test_url
))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(test_url, 302)],
)
self.assertContains(
response,
'<p>Are you sure you want to anonymise the following Model With Privacy Metas:</p>',
)
self.assertContains(
response,
'<input type="hidden" name="ids" value="{pk1},{pk2}">'.format(
pk1=obj_1.pk,
pk2=obj_2.pk,
),
)
def test_anonymise_view_submit__redirect_to_anonymise_view(self):
obj_1 = ModelWithPrivacyMetaFactory.create(anonymised=False)
obj_2 = ModelWithPrivacyMetaFactory.create(anonymised=False)
response = self.client.post(
model_root_url + 'anonymise/',
{
'ids': ','.join([str(obj_1.pk), str(obj_2.pk)]),
},
follow=True,
)
obj_1.refresh_from_db()
obj_2.refresh_from_db()
self.assertTrue(obj_1.anonymised)
self.assertTrue(obj_2.anonymised)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(model_root_url))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(model_root_url, 302)],
)
self.assertContains(
response,
'<li class="success">2 Model With Privacy Metas anonymised</li>',
)
class TestAdminTool(AdminTestCase):
def test_tool_is_available(self):
FirstSearchModelFactory.create()
response = self.client.get(tool_root_url)
self.assertContains(response, '<h1>Personal Data</h1>')
def test_search__returns_correct_results(self):
obj_1 = FirstSearchModelFactory.create(
email='one@example.com',
)
FirstSearchModelFactory.create(
email='two@example.com',
)
response = self.client.post(tool_root_url, {'term': 'one@example.com'})
self.assertContains(
response,
'<h2>Gdpr_Assist_Tests_App: First Search Model</h2>',
)
self.assertContains(
response,
'<input name="obj_pk" value="{}-{}" class="action-select" type="checkbox">'.format(
ContentType.objects.get_for_model(FirstSearchModel).pk,
obj_1.pk,
),
)
def test_anonymise__records_anonymised(self):
obj_1 = FirstSearchModelFactory.create(
email='one@example.com',
anonymised=False,
)
obj_2 = FirstSearchModelFactory.create(
email='two@example.com',
anonymised=False,
)
content_type = ContentType.objects.get_for_model(FirstSearchModel).pk
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_ANONYMISE,
'obj_pk': ['{}-{}'.format(content_type, obj_1.pk)],
},
follow=True,
)
obj_1.refresh_from_db()
obj_2.refresh_from_db()
self.assertTrue(obj_1.anonymised)
self.assertFalse(obj_2.anonymised)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(tool_root_url))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(tool_root_url, 302)],
)
def test_export_no_matches__reports_error(self):
# Request an object we know doesn't exist
self.assertEqual(FirstSearchModel.objects.count(), 0)
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_EXPORT,
'obj_pk': [
'{}-1'.format(
ContentType.objects.get_for_model(FirstSearchModel).pk,
),
],
},
)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<li class="error">No objects selected</li>',
)
def test_export_matches__records_export(self):
# Creating 4 records:
# * One matching in FirstSearchModel so we collect multiple models
# * One not matching in FirstSearchModel so we exclude ignored records
# * Two in SecondSearchModel so we collect multiple records
obj_1 = FirstSearchModelFactory.create(
chars='test1',
email='one@example.com',
anonymised=False,
)
obj_2 = FirstSearchModelFactory.create(
chars='test2',
email='two@example.com',
anonymised=False,
)
obj_3 = SecondSearchModelFactory.create(
chars='test3',
email='one@example.com',
anonymised=False,
)
obj_4 = SecondSearchModelFactory.create(
chars='test4',
email='one@example.com',
anonymised=False,
)
content_type_1 = ContentType.objects.get_for_model(FirstSearchModel).pk
content_type_2 = ContentType.objects.get_for_model(SecondSearchModel).pk
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_EXPORT,
'obj_pk': [
'{}-{}'.format(content_type_1, obj_1.pk),
'{}-{}'.format(content_type_2, obj_3.pk),
'{}-{}'.format(content_type_2, obj_4.pk),
],
},
follow=True,
)
# Check they didn't get anonymised by mistake
obj_1.refresh_from_db()
obj_2.refresh_from_db()
obj_3.refresh_from_db()
obj_4.refresh_from_db()
self.assertFalse(obj_1.anonymised)
self.assertFalse(obj_2.anonymised)
self.assertFalse(obj_3.anonymised)
self.assertFalse(obj_4.anonymised)
# Download zip into memory and check it's as expected
zip_data = BytesIO()
zip_data.write(response.content)
zip_file = zipfile.ZipFile(zip_data)
self.assertEqual(
sorted(zip_file.namelist()),
[
'gdpr_assist_tests_app-FirstSearchModel.csv',
'second_search.csv',
],
)
if six.PY2:
mode = 'rU'
else:
mode = 'r'
with zip_file.open(
'gdpr_assist_tests_app-FirstSearchModel.csv',
mode,
) as f:
reader = csv.DictReader(TextIOWrapper(f))
self.assertEqual(
reader.fieldnames,
['email'],
)
rows = list(reader)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['email'], 'one@example.com')
with zip_file.open('second_search.csv', mode) as f:
reader = csv.DictReader(TextIOWrapper(f))
self.assertEqual(
sorted(reader.fieldnames),
['chars', 'email'],
)
rows = list(reader)
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]['chars'], 'test3')
self.assertEqual(rows[0]['email'], 'one@example.com')
self.assertEqual(rows[1]['chars'], 'test4')
self.assertEqual(rows[1]['email'], 'one@example.com')
| 33.171975 | 96 | 0.569796 | from io import BytesIO, TextIOWrapper
import csv
import six
import zipfile
import django
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.test import Client, TestCase
import gdpr_assist
from .gdpr_assist_tests_app.factories import (
ModelWithPrivacyMetaFactory,
FirstSearchModelFactory,
SecondSearchModelFactory,
)
from .gdpr_assist_tests_app.models import (
FirstSearchModel,
SecondSearchModel,
)
model_root_url = '/admin/gdpr_assist_tests_app/modelwithprivacymeta/'
tool_root_url = '/admin/gdpr_assist/personaldata/'
class AdminTestCase(TestCase):
def setUp(self):
self.client = Client()
User = get_user_model()
user = User.objects.create_superuser(
username='test',
email='test@example.com',
password='test',
)
if django.VERSION <= (1, 9):
self.client.login(username='test', password='test')
else:
self.client.force_login(user)
class TestModelAdmin(AdminTestCase):
def test_changelist__anonymise_action_present(self):
ModelWithPrivacyMetaFactory.create()
response = self.client.get(model_root_url)
self.assertContains(response, '<option value="anonymise">')
def test_anonymise_action_submit__redirect_to_anonymise_view(self):
obj_1 = ModelWithPrivacyMetaFactory.create()
obj_2 = ModelWithPrivacyMetaFactory.create()
response = self.client.post(
model_root_url,
{
'action': 'anonymise',
'_selected_action': [obj_1.pk, obj_2.pk],
},
follow=True,
)
test_url = '{root_url}anonymise/?ids={pk1},{pk2}'.format(
root_url=model_root_url,
pk1=obj_1.pk,
pk2=obj_2.pk,
)
if django.VERSION <= (1, 9):
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(
test_url
))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
self.assertEqual(
response.redirect_chain,
[(test_url, 302)],
)
self.assertContains(
response,
'<p>Are you sure you want to anonymise the following Model With Privacy Metas:</p>',
)
self.assertContains(
response,
'<input type="hidden" name="ids" value="{pk1},{pk2}">'.format(
pk1=obj_1.pk,
pk2=obj_2.pk,
),
)
def test_anonymise_view_submit__redirect_to_anonymise_view(self):
obj_1 = ModelWithPrivacyMetaFactory.create(anonymised=False)
obj_2 = ModelWithPrivacyMetaFactory.create(anonymised=False)
response = self.client.post(
model_root_url + 'anonymise/',
{
'ids': ','.join([str(obj_1.pk), str(obj_2.pk)]),
},
follow=True,
)
obj_1.refresh_from_db()
obj_2.refresh_from_db()
self.assertTrue(obj_1.anonymised)
self.assertTrue(obj_2.anonymised)
if django.VERSION <= (1, 9):
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(model_root_url))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
self.assertEqual(
response.redirect_chain,
[(model_root_url, 302)],
)
self.assertContains(
response,
'<li class="success">2 Model With Privacy Metas anonymised</li>',
)
class TestAdminTool(AdminTestCase):
def test_tool_is_available(self):
FirstSearchModelFactory.create()
response = self.client.get(tool_root_url)
self.assertContains(response, '<h1>Personal Data</h1>')
def test_search__returns_correct_results(self):
obj_1 = FirstSearchModelFactory.create(
email='one@example.com',
)
FirstSearchModelFactory.create(
email='two@example.com',
)
response = self.client.post(tool_root_url, {'term': 'one@example.com'})
self.assertContains(
response,
'<h2>Gdpr_Assist_Tests_App: First Search Model</h2>',
)
self.assertContains(
response,
'<input name="obj_pk" value="{}-{}" class="action-select" type="checkbox">'.format(
ContentType.objects.get_for_model(FirstSearchModel).pk,
obj_1.pk,
),
)
def test_anonymise__records_anonymised(self):
obj_1 = FirstSearchModelFactory.create(
email='one@example.com',
anonymised=False,
)
obj_2 = FirstSearchModelFactory.create(
email='two@example.com',
anonymised=False,
)
content_type = ContentType.objects.get_for_model(FirstSearchModel).pk
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_ANONYMISE,
'obj_pk': ['{}-{}'.format(content_type, obj_1.pk)],
},
follow=True,
)
obj_1.refresh_from_db()
obj_2.refresh_from_db()
self.assertTrue(obj_1.anonymised)
self.assertFalse(obj_2.anonymised)
if django.VERSION <= (1, 9):
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(tool_root_url))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
self.assertEqual(
response.redirect_chain,
[(tool_root_url, 302)],
)
def test_export_no_matches__reports_error(self):
self.assertEqual(FirstSearchModel.objects.count(), 0)
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_EXPORT,
'obj_pk': [
'{}-1'.format(
ContentType.objects.get_for_model(FirstSearchModel).pk,
),
],
},
)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<li class="error">No objects selected</li>',
)
def test_export_matches__records_export(self):
# Creating 4 records:
# * One matching in FirstSearchModel so we collect multiple models
# * One not matching in FirstSearchModel so we exclude ignored records
# * Two in SecondSearchModel so we collect multiple records
obj_1 = FirstSearchModelFactory.create(
chars='test1',
email='one@example.com',
anonymised=False,
)
obj_2 = FirstSearchModelFactory.create(
chars='test2',
email='two@example.com',
anonymised=False,
)
obj_3 = SecondSearchModelFactory.create(
chars='test3',
email='one@example.com',
anonymised=False,
)
obj_4 = SecondSearchModelFactory.create(
chars='test4',
email='one@example.com',
anonymised=False,
)
content_type_1 = ContentType.objects.get_for_model(FirstSearchModel).pk
content_type_2 = ContentType.objects.get_for_model(SecondSearchModel).pk
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_EXPORT,
'obj_pk': [
'{}-{}'.format(content_type_1, obj_1.pk),
'{}-{}'.format(content_type_2, obj_3.pk),
'{}-{}'.format(content_type_2, obj_4.pk),
],
},
follow=True,
)
# Check they didn't get anonymised by mistake
obj_1.refresh_from_db()
obj_2.refresh_from_db()
obj_3.refresh_from_db()
obj_4.refresh_from_db()
self.assertFalse(obj_1.anonymised)
self.assertFalse(obj_2.anonymised)
self.assertFalse(obj_3.anonymised)
self.assertFalse(obj_4.anonymised)
zip_data = BytesIO()
zip_data.write(response.content)
zip_file = zipfile.ZipFile(zip_data)
self.assertEqual(
sorted(zip_file.namelist()),
[
'gdpr_assist_tests_app-FirstSearchModel.csv',
'second_search.csv',
],
)
if six.PY2:
mode = 'rU'
else:
mode = 'r'
with zip_file.open(
'gdpr_assist_tests_app-FirstSearchModel.csv',
mode,
) as f:
reader = csv.DictReader(TextIOWrapper(f))
self.assertEqual(
reader.fieldnames,
['email'],
)
rows = list(reader)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['email'], 'one@example.com')
with zip_file.open('second_search.csv', mode) as f:
reader = csv.DictReader(TextIOWrapper(f))
self.assertEqual(
sorted(reader.fieldnames),
['chars', 'email'],
)
rows = list(reader)
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]['chars'], 'test3')
self.assertEqual(rows[0]['email'], 'one@example.com')
self.assertEqual(rows[1]['chars'], 'test4')
self.assertEqual(rows[1]['email'], 'one@example.com')
| true | true |
790022f8c4afd1beee5f9f1b313044b0686cf160 | 60 | py | Python | experiments/circularImportB.py | Daniel-Chin/mini-Python | b122450a075adc4315cc13c29502f2029584e4bc | [
"MIT"
] | 1 | 2021-12-02T21:13:04.000Z | 2021-12-02T21:13:04.000Z | experiments/circularImportB.py | Daniel-Chin/mini-Python | b122450a075adc4315cc13c29502f2029584e4bc | [
"MIT"
] | null | null | null | experiments/circularImportB.py | Daniel-Chin/mini-Python | b122450a075adc4315cc13c29502f2029584e4bc | [
"MIT"
] | null | null | null | from circularImportA import a
def f():
print(a)
b = 2
| 8.571429 | 29 | 0.633333 | from circularImportA import a
def f():
print(a)
b = 2
| true | true |
79002466907fabae889126e29c221cca4cada6e2 | 1,128 | py | Python | week1/w1e6.py | melphick/pynet | 047fbcf4eb0798379c48d0281ace74a6d126f119 | [
"Apache-2.0"
] | null | null | null | week1/w1e6.py | melphick/pynet | 047fbcf4eb0798379c48d0281ace74a6d126f119 | [
"Apache-2.0"
] | null | null | null | week1/w1e6.py | melphick/pynet | 047fbcf4eb0798379c48d0281ace74a6d126f119 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""
A Python program that creates a list. One of the elements of the list should be
a dictionary with at least two keys. Write this list out to a file using both
YAML and JSON formats. The YAML file should be in the expanded form.
"""
import yaml
import json
a = {
'name': 'router1',
'ip_addr': '1.2.3.4',
'serial_number': 'FTX000232',
'os_version': '12.4.15T',
'optional_attrib_1': 'foo',
}
b = {
'name': 'router2',
'ip_addr': '5.6.7.8',
'serial_number': 'FTX345632',
'os_version': '12.4.15T',
}
example_list = [a, b, "empty1", "empty2"]
print "Here is the list"
print "----------------"
print example_list
print "----------------\n"
print "Here is the list in YAML"
print "------------------------"
print yaml.dump(example_list, default_flow_style=False)
print "------------------------"
print "Here is the list in JSON"
print "------------------------"
print json.dumps(example_list)
print "------------------------"
with open("example_yaml.yml", "w") as f:
f.write(yaml.dump(example_list, default_flow_style=False))
with open("example_json.json", "w") as f:
f.write(json.dumps(example_list))
| 25.066667 | 79 | 0.617908 |
"""
A Python program that creates a list. One of the elements of the list should be
a dictionary with at least two keys. Write this list out to a file using both
YAML and JSON formats. The YAML file should be in the expanded form.
"""
import yaml
import json
a = {
'name': 'router1',
'ip_addr': '1.2.3.4',
'serial_number': 'FTX000232',
'os_version': '12.4.15T',
'optional_attrib_1': 'foo',
}
b = {
'name': 'router2',
'ip_addr': '5.6.7.8',
'serial_number': 'FTX345632',
'os_version': '12.4.15T',
}
example_list = [a, b, "empty1", "empty2"]
print "Here is the list"
print "----------------"
print example_list
print "----------------\n"
print "Here is the list in YAML"
print "------------------------"
print yaml.dump(example_list, default_flow_style=False)
print "------------------------"
print "Here is the list in JSON"
print "------------------------"
print json.dumps(example_list)
print "------------------------"
with open("example_yaml.yml", "w") as f:
f.write(yaml.dump(example_list, default_flow_style=False))
with open("example_json.json", "w") as f:
f.write(json.dumps(example_list))
| false | true |
790024a9fc6b0b7133377ca1c6234ecab0a2801e | 4,429 | py | Python | use_it_or_lose_it.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | null | null | null | use_it_or_lose_it.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | null | null | null | use_it_or_lose_it.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | 1 | 2022-01-03T01:44:39.000Z | 2022-01-03T01:44:39.000Z | '''
Created on Sep 18, 2017
@author: jschm
'''
from cs115 import map
def powerset(lst):
"""returns the power set of the list - the set of all subsets of the list"""
if lst == []:
return [[]]
#power set is a list of lists
#this way is more efficent for getting the combinations of the characters in a list
lose_it = powerset(lst[1:])
use_it = map(lambda subset: [lst[0]] + subset, lose_it)
return lose_it + use_it
print(powerset(['a', 'b', 'c']))
def subset(target, lst):
"""determines whether or not it is possible to create target sum using the
values in the list. Values in teh list can be positive, negative, or zero."""
if target == 0:
return True
#what if target is 0?
if lst == []:
return False
#use_it = subset(target - lst[0], lst[1:])
#lose_it = subset(target, lst[1:])
"""and and or are short-cut operators in python. THe second operand is not evaluated
when the overall result can be deduced by evaluating the second operand"""
#return use_it or lose_it
return subset(target - lst[0], lst[1:]) or subset(target, lst[1:])
print(subset(5,[1,3,2,4,5]))
def subset_with_values(target, lst):
"""Determines whether or not it is possible to create the target sum using
values in the list. Values in the list can be positive, negative, or zero.
The function returns a tuple of exactly two items. The first is a boolean,
that indicates true if the sum is possible and false if it is not. The second
element in the tuple is a list of all values that add up to make the target sum."""
if target == 0:
return(True, [])
if lst == []:
return(False, [])
use_it = subset_with_values(target - lst[0], lst[1:])
if use_it[0]:
return(True, [lst[0]] + use_it[1])
return subset_with_values(target, lst[1:])
print(subset_with_values(8, [7,2,2,2,2]))
print(subset_with_values(12, [1,2,4,9]))
"""
def LCSWithValues2(S1,S2):
if S1 == "" or S2 == "":
return (0, "")
if S1[0] == S2[0]:
result = result + S1[0]
return (1 + LCSWithValues2(S1[1:], S2[1:]), result)
useS1 = LCSWithValues2(S1, S2[1:])
useS2 = LCSWithValues2(S1[1:], S2)
if useS1[0] > useS2[0]:
return useS1
return useS2
print(LCSWithValues2("sam", "spam"))
"""
def LCSWithValues(S1,S2):
"""returns the longest common string"""
if S1 == "" or S2 == "":
return (0, "")
if S1[0] == S2[0]:
result = LCSWithValues(S1[1:], S2[1:])
return (1 + result[0], S1[0] + result[1])
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if useS1[0] > useS2[0]:
return useS1
return useS2
print(LCSWithValues("sam", "spam"))
#^^^the LCSWithValues2 does not work because the result variable needs to be defined, and if it is redefined it stays empty always.
def coin_row(lst):
#one line:
return 0 if lst == [] else max(lst[0] + coin_row(lst[2:]), coin_row(lst[1:]))
"""
if(lst == []):
return 0
return max(lst[0] + coin_row(lst[2:]), coin_row(lst[1:]))
"""
"""
if(lst == []):
return 0
use_it = lst[0] + coin_row(lst[2:])
lose_it = coin_row(lst[1:])
return max(use_it, lose_it)
This is how you set up each function^^^
and then you can make it nicer
"""
"""
if(coin_row(lst[1:])>lst[0]):
amount = coin_row(lst[1:])
return max(coin_row(lst[2:]), coin_row(lst[2:]))
"""
def coin_row_with_values(lst):
if lst == []:
return [0, []]
use_it = coin_row_with_values(lst[2:])
new_sum = lst[0] + use_it[0]
#that's the result^
lose_it = coin_row_with_values(lst[1:])
if new_sum > lose_it[0]:
#only returns this once I think
#nevermind!
#print('hello')
return [new_sum, [lst[0]] + use_it[1]]
return lose_it
print(coin_row([10, 5, 5, 5, 10, 10, 1, 1]))
print(coin_row_with_values([10, 5, 5, 5, 10, 50, 1, 10, 1, 1, 25]))
#can use below as spell-checker
def distance(first, second):
if first == '':
return len(second)
if second == '':
return len(first)
if first[0] == second[0]:
return distance(first[1:], second[1:])
substitution = 1 + distance(first[1:], second[1:])
deletion = 1 + distance(first[1:], second)
insertion = 1 + distance(first, second[1:])
return min(substitution, deletion, insertion)
| 31.635714 | 131 | 0.604651 | from cs115 import map
def powerset(lst):
if lst == []:
return [[]]
lose_it = powerset(lst[1:])
use_it = map(lambda subset: [lst[0]] + subset, lose_it)
return lose_it + use_it
print(powerset(['a', 'b', 'c']))
def subset(target, lst):
if target == 0:
return True
if lst == []:
return False
return subset(target - lst[0], lst[1:]) or subset(target, lst[1:])
print(subset(5,[1,3,2,4,5]))
def subset_with_values(target, lst):
if target == 0:
return(True, [])
if lst == []:
return(False, [])
use_it = subset_with_values(target - lst[0], lst[1:])
if use_it[0]:
return(True, [lst[0]] + use_it[1])
return subset_with_values(target, lst[1:])
print(subset_with_values(8, [7,2,2,2,2]))
print(subset_with_values(12, [1,2,4,9]))
def LCSWithValues(S1,S2):
if S1 == "" or S2 == "":
return (0, "")
if S1[0] == S2[0]:
result = LCSWithValues(S1[1:], S2[1:])
return (1 + result[0], S1[0] + result[1])
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if useS1[0] > useS2[0]:
return useS1
return useS2
print(LCSWithValues("sam", "spam"))
def coin_row(lst):
return 0 if lst == [] else max(lst[0] + coin_row(lst[2:]), coin_row(lst[1:]))
def coin_row_with_values(lst):
if lst == []:
return [0, []]
use_it = coin_row_with_values(lst[2:])
new_sum = lst[0] + use_it[0]
lose_it = coin_row_with_values(lst[1:])
if new_sum > lose_it[0]:
#only returns this once I think
#nevermind!
#print('hello')
return [new_sum, [lst[0]] + use_it[1]]
return lose_it
print(coin_row([10, 5, 5, 5, 10, 10, 1, 1]))
print(coin_row_with_values([10, 5, 5, 5, 10, 50, 1, 10, 1, 1, 25]))
#can use below as spell-checker
def distance(first, second):
if first == '':
return len(second)
if second == '':
return len(first)
if first[0] == second[0]:
return distance(first[1:], second[1:])
substitution = 1 + distance(first[1:], second[1:])
deletion = 1 + distance(first[1:], second)
insertion = 1 + distance(first, second[1:])
return min(substitution, deletion, insertion)
| true | true |
790025ba42a649dac0d6f5e2049338b0ebff12fe | 9,333 | py | Python | test/functional/feature_block_reward_reallocation.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 2 | 2020-12-01T17:15:50.000Z | 2020-12-11T13:29:54.000Z | test/functional/feature_block_reward_reallocation.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 1 | 2020-07-27T10:54:07.000Z | 2020-08-28T05:37:26.000Z | test/functional/feature_block_reward_reallocation.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 2 | 2020-11-09T16:38:04.000Z | 2021-04-02T05:27:36.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Ttm Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.blocktools import create_block, create_coinbase, get_masternode_payment
from test_framework.mininode import P2PDataStore, network_thread_start
from test_framework.messages import CTxOut, FromHex, CCbTx, CTransaction, ToHex
from test_framework.script import CScript
from test_framework.test_framework import TtmTestFramework
from test_framework.util import assert_equal, get_bip9_status, hex_str_to_bytes
'''
feature_block_reward_reallocation.py
Checks block reward reallocation correctness
'''
class BlockRewardReallocationTest(TtmTestFramework):
def set_test_params(self):
self.set_ttm_test_params(2, 1, fast_dip3_enforcement=True)
self.set_ttm_dip8_activation(450)
# 536870912 == 0x20000000, i.e. not signalling for anything
def create_test_block(self, version=536870912):
self.bump_mocktime(5)
bt = self.nodes[0].getblocktemplate()
tip = int(bt['previousblockhash'], 16)
nextheight = bt['height']
coinbase = create_coinbase(nextheight)
coinbase.nVersion = 3
coinbase.nType = 5 # CbTx
coinbase.vout[0].nValue = bt['coinbasevalue']
for mn in bt['masternode']:
coinbase.vout.append(CTxOut(mn['amount'], CScript(hex_str_to_bytes(mn['script']))))
coinbase.vout[0].nValue -= mn['amount']
cbtx = FromHex(CCbTx(), bt['coinbase_payload'])
coinbase.vExtraPayload = cbtx.serialize()
coinbase.rehash()
coinbase.calc_sha256()
block = create_block(tip, coinbase, self.mocktime)
block.nVersion = version
# Add quorum commitments from template
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def signal(self, num_blocks, expected_lockin):
self.log.info("Signal with %d/500 blocks" % (num_blocks))
# create and send non-signalling blocks
for i in range(500 - num_blocks):
test_block = self.create_test_block()
self.nodes[0].submitblock(ToHex(test_block))
# generate at most 10 signaling blocks at a time
if num_blocks > 0:
for i in range((num_blocks - 1) // 10):
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].generate((num_blocks - 1) % 10)
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'started')
self.nodes[0].generate(1)
if expected_lockin:
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'locked_in')
else:
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'started')
def threshold(self, attempt):
threshold_calc = 400 - attempt * attempt
if threshold_calc < 300:
return 300
return threshold_calc
def run_test(self):
self.log.info("Wait for DIP3 to activate")
while get_bip9_status(self.nodes[0], 'dip0003')['status'] != 'active':
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].add_p2p_connection(P2PDataStore())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
self.log.info("Mine all but one remaining block in the window")
bi = self.nodes[0].getblockchaininfo()
for i in range(498 - bi['blocks']):
self.bump_mocktime(1)
self.nodes[0].generate(1)
self.log.info("Initial state is DEFINED")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 498)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'defined')
self.log.info("Advance from DEFINED to STARTED at height = 499")
self.nodes[0].generate(1)
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(0))
self.signal(399, False) # 1 block short
self.log.info("Still STARTED but new threshold should be lower at height = 999")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 999)
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(1))
self.signal(398, False) # 1 block short again
self.log.info("Still STARTED but new threshold should be even lower at height = 1499")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1499)
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(2))
pre_locked_in_blockhash = bi['bestblockhash']
self.signal(396, True) # just enough to lock in
self.log.info("Advanced to LOCKED_IN at height = 1999")
for i in range(49):
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].generate(9)
self.log.info("Still LOCKED_IN at height = 2498")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 2498)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'locked_in')
self.log.info("Advance from LOCKED_IN to ACTIVE at height = 2499")
self.nodes[0].generate(1) # activation
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 2499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'active')
assert_equal(bi['bip9_softforks']['realloc']['since'], 2500)
self.log.info("Reward split should stay ~50/50 before the first superblock after activation")
# This applies even if reallocation was activated right at superblock height like it does here
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['height'], 2500)
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
self.nodes[0].generate(9)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
assert_equal(bt['coinbasevalue'], 13748571607)
assert_equal(bt['masternode'][0]['amount'], 6874285801) # 0.4999999998
self.log.info("Reallocation should kick-in with the superblock mined at height = 2010")
for period in range(19): # there will be 19 adjustments, 3 superblocks long each
for i in range(3):
self.bump_mocktime(10)
self.nodes[0].generate(10)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
self.log.info("Reward split should reach ~60/40 after reallocation is done")
assert_equal(bt['coinbasevalue'], 10221599170)
assert_equal(bt['masternode'][0]['amount'], 6132959502) # 0.6
self.log.info("Reward split should stay ~60/40 after reallocation is done")
for period in range(10): # check 10 next superblocks
self.bump_mocktime(10)
self.nodes[0].generate(10)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
assert_equal(bt['coinbasevalue'], 9491484944)
assert_equal(bt['masternode'][0]['amount'], 5694890966) # 0.6
# make sure all nodes are still synced
self.sync_all()
self.log.info("Rollback the chain back to the STARTED state")
self.mocktime = self.nodes[0].getblock(pre_locked_in_blockhash, 1)['time']
for node in self.nodes:
node.invalidateblock(pre_locked_in_blockhash)
# create and send non-signalling block
test_block = self.create_test_block()
self.nodes[0].submitblock(ToHex(test_block))
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(2))
self.log.info("Check thresholds reach min level and stay there")
for i in range(8): # 7 to reach min level and 1 more to check it doesn't go lower than that
self.signal(0, False) # no need to signal
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1999 + i * 500)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(i + 3))
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], 300)
if __name__ == '__main__':
BlockRewardReallocationTest().main()
| 45.975369 | 124 | 0.645773 |
from test_framework.blocktools import create_block, create_coinbase, get_masternode_payment
from test_framework.mininode import P2PDataStore, network_thread_start
from test_framework.messages import CTxOut, FromHex, CCbTx, CTransaction, ToHex
from test_framework.script import CScript
from test_framework.test_framework import TtmTestFramework
from test_framework.util import assert_equal, get_bip9_status, hex_str_to_bytes
class BlockRewardReallocationTest(TtmTestFramework):
def set_test_params(self):
self.set_ttm_test_params(2, 1, fast_dip3_enforcement=True)
self.set_ttm_dip8_activation(450)
def create_test_block(self, version=536870912):
self.bump_mocktime(5)
bt = self.nodes[0].getblocktemplate()
tip = int(bt['previousblockhash'], 16)
nextheight = bt['height']
coinbase = create_coinbase(nextheight)
coinbase.nVersion = 3
coinbase.nType = 5
coinbase.vout[0].nValue = bt['coinbasevalue']
for mn in bt['masternode']:
coinbase.vout.append(CTxOut(mn['amount'], CScript(hex_str_to_bytes(mn['script']))))
coinbase.vout[0].nValue -= mn['amount']
cbtx = FromHex(CCbTx(), bt['coinbase_payload'])
coinbase.vExtraPayload = cbtx.serialize()
coinbase.rehash()
coinbase.calc_sha256()
block = create_block(tip, coinbase, self.mocktime)
block.nVersion = version
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def signal(self, num_blocks, expected_lockin):
self.log.info("Signal with %d/500 blocks" % (num_blocks))
for i in range(500 - num_blocks):
test_block = self.create_test_block()
self.nodes[0].submitblock(ToHex(test_block))
if num_blocks > 0:
for i in range((num_blocks - 1) // 10):
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].generate((num_blocks - 1) % 10)
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'started')
self.nodes[0].generate(1)
if expected_lockin:
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'locked_in')
else:
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'started')
def threshold(self, attempt):
threshold_calc = 400 - attempt * attempt
if threshold_calc < 300:
return 300
return threshold_calc
def run_test(self):
self.log.info("Wait for DIP3 to activate")
while get_bip9_status(self.nodes[0], 'dip0003')['status'] != 'active':
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].add_p2p_connection(P2PDataStore())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
self.log.info("Mine all but one remaining block in the window")
bi = self.nodes[0].getblockchaininfo()
for i in range(498 - bi['blocks']):
self.bump_mocktime(1)
self.nodes[0].generate(1)
self.log.info("Initial state is DEFINED")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 498)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'defined')
self.log.info("Advance from DEFINED to STARTED at height = 499")
self.nodes[0].generate(1)
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(0))
self.signal(399, False)
self.log.info("Still STARTED but new threshold should be lower at height = 999")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 999)
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(1))
self.signal(398, False)
self.log.info("Still STARTED but new threshold should be even lower at height = 1499")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1499)
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(2))
pre_locked_in_blockhash = bi['bestblockhash']
self.signal(396, True)
self.log.info("Advanced to LOCKED_IN at height = 1999")
for i in range(49):
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].generate(9)
self.log.info("Still LOCKED_IN at height = 2498")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 2498)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'locked_in')
self.log.info("Advance from LOCKED_IN to ACTIVE at height = 2499")
self.nodes[0].generate(1)
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 2499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'active')
assert_equal(bi['bip9_softforks']['realloc']['since'], 2500)
self.log.info("Reward split should stay ~50/50 before the first superblock after activation")
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['height'], 2500)
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
self.nodes[0].generate(9)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
assert_equal(bt['coinbasevalue'], 13748571607)
assert_equal(bt['masternode'][0]['amount'], 6874285801)
self.log.info("Reallocation should kick-in with the superblock mined at height = 2010")
for period in range(19):
for i in range(3):
self.bump_mocktime(10)
self.nodes[0].generate(10)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
self.log.info("Reward split should reach ~60/40 after reallocation is done")
assert_equal(bt['coinbasevalue'], 10221599170)
assert_equal(bt['masternode'][0]['amount'], 6132959502)
self.log.info("Reward split should stay ~60/40 after reallocation is done")
for period in range(10):
self.bump_mocktime(10)
self.nodes[0].generate(10)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
assert_equal(bt['coinbasevalue'], 9491484944)
assert_equal(bt['masternode'][0]['amount'], 5694890966)
self.sync_all()
self.log.info("Rollback the chain back to the STARTED state")
self.mocktime = self.nodes[0].getblock(pre_locked_in_blockhash, 1)['time']
for node in self.nodes:
node.invalidateblock(pre_locked_in_blockhash)
test_block = self.create_test_block()
self.nodes[0].submitblock(ToHex(test_block))
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(2))
self.log.info("Check thresholds reach min level and stay there")
for i in range(8):
self.signal(0, False) # no need to signal
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1999 + i * 500)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(i + 3))
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], 300)
if __name__ == '__main__':
BlockRewardReallocationTest().main()
| true | true |
790026b50ae10f4540afcac11522fd528abc603f | 2,789 | py | Python | monitor_temp.py | KevinLee3627/pi-temp-monitor | 0ab519f19693a201fa5a49e58cfa7e73becd7206 | [
"MIT"
] | null | null | null | monitor_temp.py | KevinLee3627/pi-temp-monitor | 0ab519f19693a201fa5a49e58cfa7e73becd7206 | [
"MIT"
] | null | null | null | monitor_temp.py | KevinLee3627/pi-temp-monitor | 0ab519f19693a201fa5a49e58cfa7e73becd7206 | [
"MIT"
] | null | null | null | from gpiozero import CPUTemperature
from tabulate import tabulate
from math import floor
import numpy as np
import termplotlib as tpl
import time
import shutil
def roundNum(num, digits):
return floor(num * 10 ** digits) / (10 ** digits)
def CtoF(temp):
fahrenheit = (temp + 1.8) + 32
rounded = roundNum(fahrenheit, 3)
return str(rounded)
cpu = CPUTemperature()
colors = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKCYAN': '\033[96m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
}
times = [0]
temps = [cpu.temperature]
while True:
tickRate = 2 #takes data every {tickRate} seconds
minutes = 5
numPoints = int(60 / tickRate * minutes)
width, height = shutil.get_terminal_size()
if len(temps) > numPoints:
temps = temps[-numPoints:]
times = times[-numPoints:]
temps.append(cpu.temperature)
times.append(times[-1] + tickRate)
averageTemp = roundNum(np.average(temps), 3)
cpuTempColor = ''
if cpu.temperature < 50:
cpuTempColor = colors['OKBLUE']
elif cpu.temperature < 65:
cpuTempColor = colors['OKCYAN']
elif cpu.temperature < 80:
cpuTempColor = colors['OKGREEN']
else:
cpuTempColor = colors['FAIL'] + colors['BOLD']
table = [[
f"{cpuTempColor}{str(cpu.temperature)}\N{DEGREE SIGN}C / {CtoF(cpu.temperature)}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{averageTemp} / {CtoF(averageTemp)}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{np.amax(temps)} / {CtoF(np.amax(temps))}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{np.amin(temps)} / {CtoF(np.amin(temps))}\N{DEGREE SIGN}F"
]]
headers = [
f"{colors['OKGREEN']}CPU TEMPERATURE",
f"{colors['OKGREEN']}Average Temperature (last {minutes} minutes)",
f"{colors['FAIL']}Peak Temperature (last {minutes} minutes)",
f"{colors['OKCYAN']}Lowest Temperature (last {minutes} minutes){colors['OKGREEN']}", #OKGREEN at end is to make sure table lines are green, not cyan
]
print('\n')
fig = tpl.figure()
plotConfig = {
'width': width-2,
'height': height-5,
'label': 'CPU Temperature',
'xlabel': 'Time (s)',
'xlim': [times[0], times[-1:]],
'ylim': [np.amin(temps)-2, np.amax(temps)+2],
'title': f"CPU Temperature over last {minutes} minutes",
}
fig.plot(times, temps, **plotConfig)
fig.show()
# width=width-2, height=height-5, label='CPU Temperature', xlabel='Time (s)', , ylim=[np.amin(temps)-2, np.amax(temps)+2], title='CPU Temperature over last 5 minutes'
print('\n')
print(tabulate(table, headers=headers))
time.sleep(tickRate) | 30.988889 | 170 | 0.608103 | from gpiozero import CPUTemperature
from tabulate import tabulate
from math import floor
import numpy as np
import termplotlib as tpl
import time
import shutil
def roundNum(num, digits):
return floor(num * 10 ** digits) / (10 ** digits)
def CtoF(temp):
fahrenheit = (temp + 1.8) + 32
rounded = roundNum(fahrenheit, 3)
return str(rounded)
cpu = CPUTemperature()
colors = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKCYAN': '\033[96m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
}
times = [0]
temps = [cpu.temperature]
while True:
tickRate = 2
minutes = 5
numPoints = int(60 / tickRate * minutes)
width, height = shutil.get_terminal_size()
if len(temps) > numPoints:
temps = temps[-numPoints:]
times = times[-numPoints:]
temps.append(cpu.temperature)
times.append(times[-1] + tickRate)
averageTemp = roundNum(np.average(temps), 3)
cpuTempColor = ''
if cpu.temperature < 50:
cpuTempColor = colors['OKBLUE']
elif cpu.temperature < 65:
cpuTempColor = colors['OKCYAN']
elif cpu.temperature < 80:
cpuTempColor = colors['OKGREEN']
else:
cpuTempColor = colors['FAIL'] + colors['BOLD']
table = [[
f"{cpuTempColor}{str(cpu.temperature)}\N{DEGREE SIGN}C / {CtoF(cpu.temperature)}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{averageTemp} / {CtoF(averageTemp)}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{np.amax(temps)} / {CtoF(np.amax(temps))}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{np.amin(temps)} / {CtoF(np.amin(temps))}\N{DEGREE SIGN}F"
]]
headers = [
f"{colors['OKGREEN']}CPU TEMPERATURE",
f"{colors['OKGREEN']}Average Temperature (last {minutes} minutes)",
f"{colors['FAIL']}Peak Temperature (last {minutes} minutes)",
f"{colors['OKCYAN']}Lowest Temperature (last {minutes} minutes){colors['OKGREEN']}",
]
print('\n')
fig = tpl.figure()
plotConfig = {
'width': width-2,
'height': height-5,
'label': 'CPU Temperature',
'xlabel': 'Time (s)',
'xlim': [times[0], times[-1:]],
'ylim': [np.amin(temps)-2, np.amax(temps)+2],
'title': f"CPU Temperature over last {minutes} minutes",
}
fig.plot(times, temps, **plotConfig)
fig.show()
print('\n')
print(tabulate(table, headers=headers))
time.sleep(tickRate) | true | true |
79002735558a463640ebfdcb3865832eb37a941a | 5,017 | py | Python | sim/main.py | dnbh/kpg | c9e79b8092434919e9ac90dc199f49845403c2ba | [
"MIT"
] | 69 | 2018-01-08T19:56:55.000Z | 2022-03-05T17:14:05.000Z | sim/main.py | dnbaker/emp | c9e79b8092434919e9ac90dc199f49845403c2ba | [
"MIT"
] | 6 | 2018-04-14T21:09:51.000Z | 2021-07-17T21:08:54.000Z | sim/main.py | dnbaker/emp | c9e79b8092434919e9ac90dc199f49845403c2ba | [
"MIT"
] | 11 | 2018-03-21T19:28:35.000Z | 2021-06-29T17:33:34.000Z | from __future__ import division
import fa
import sys
import os
from fa import chunker
if __name__ == "__main__":
from sys import stderr
import argparse
parser = argparse.ArgumentParser(description=(
"Create a set of synthetic genomes consisting "
"of subgroups per tax level. Some kmers are unique, "
"some are shared, and this provides a case where we can test"
" the efficacy and behavior of our bitmap method."))
parser.add_argument("-n", "--num-nucleotides-per-leaf",
type=int, default=13000)
parser.add_argument("-N", "--num-nucs-shared-per-subgroup",
type=int, default=2000)
parser.add_argument("-l", "--num-nucs-shared-per-level",
type=int, default=8000)
parser.add_argument("-d", "--tree-depth",
type=int, default=4)
parser.add_argument("-s", "--split-size", type=int,
default=3,
help=("Number of subgroups for "
"each parent node."))
parser.add_argument("--parent-map", "-p",
help="Path to which to write synthetic taxonomy.",
default="nodes.dmp")
parser.add_argument("-S", "--subgroup-size", type=int,
default=3,
help="Number of genomes for each subgroup")
parser.add_argument("-o", "--outdir", default=".", type=str)
parser.add_argument("--name-id-map", "-m", default="synth_nameidmap.txt")
args = parser.parse_args()
# Variables/settings for constructing synthetic genome
# and accessory files.
mult_per_layer = args.split_size * args.subgroup_size
depth = args.tree_depth
nleaves = mult_per_layer ** (depth - 1)
leaf_seqs = [fa.SeqId(fa.gen_seq(args.num_nucleotides_per_leaf), i) for
i in range(nleaves)]
nleaf_seq = len(leaf_seqs)
outdir = args.outdir
if not os.path.isdir(outdir):
if os.path.isfile(outdir):
raise Exception("Path set for outdir ('%s') is a"
" file... Nah, dawg." % outdir)
os.mkdir(outdir)
outdir = outdir + '/' # Append slash
name_id_map = outdir + args.name_id_map
parent_map = outdir + args.parent_map
# Variables for constructing the parent_map dictionary.
pcmap = {}
used_seqids = set(i.taxid() for i in leaf_seqs)
ctax = max(used_seqids) + 1
last_layer = []
for i in range(1, depth):
nchunks = nleaf_seq // (mult_per_layer ** i)
chunk_size = nleaf_seq // nchunks
assert nleaf_seq % chunk_size == 0
for seqsetid, seqset in enumerate(chunker(leaf_seqs, chunk_size)):
print("seqset len: %i" % len(seqset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_level)
for seq in seqset:
seq.seq += add
seq.subsets[i] = seqsetid
for sssid, seqsubset in enumerate(chunker(seqset,
args.subgroup_size)):
# print("seqsubset len: %i" % len(seqsubset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_subgroup)
for seq in seqset:
seq.seq += add
seq.subgroups[i] = seqsetid
if i == 1: # or it not last_layer
# Add leaf node to parent connections
for seq in seqset:
pcmap[seq.taxid()] = ctax + seqsetid
if i > 1:
# Add higher nodes to parent connections
if i == depth - 1:
pcmap.update((el, 1) for el in last_layer)
break
# This leaves the loop on the last layer in the tree
# because the root is 1 by construction
else:
# pcmap.update((tax, i + ctax) for tax in
# last_layer[i:i+mult_per_layer] for
# i in range(mult_per_layer))
for i in range(mult_per_layer):
for tax in last_layer[i:i + mult_per_layer]:
pcmap[tax] = i + ctax
last_layer = [ctax + i for i in range(nchunks)]
used_seqids.update(last_layer)
ctax = max(used_seqids) + 1
del used_seqids
del ctax
del last_layer
{seq.write(outdir + seq.filename()) for seq in leaf_seqs}
print("[1/3] Successfully created synthetic genomes.", file=stderr)
filenames = [outdir + seq.filename() for seq in leaf_seqs]
fa.write_nameid_map(name_id_map, filenames)
print("[2/3] Successfully wrote nameidmap to %s." % name_id_map,
file=stderr)
fa.write_parent_map(parent_map, pcmap)
print("[3/3] Successfully wrote child->parent map.", file=stderr)
stderr.write("Genomes: %s\n" % ', '.join(filenames))
stderr.write("Nameidmap: %s\n" % name_id_map)
stderr.write("Taxonomy: %s\n" % parent_map)
| 43.626087 | 77 | 0.568069 | from __future__ import division
import fa
import sys
import os
from fa import chunker
if __name__ == "__main__":
from sys import stderr
import argparse
parser = argparse.ArgumentParser(description=(
"Create a set of synthetic genomes consisting "
"of subgroups per tax level. Some kmers are unique, "
"some are shared, and this provides a case where we can test"
" the efficacy and behavior of our bitmap method."))
parser.add_argument("-n", "--num-nucleotides-per-leaf",
type=int, default=13000)
parser.add_argument("-N", "--num-nucs-shared-per-subgroup",
type=int, default=2000)
parser.add_argument("-l", "--num-nucs-shared-per-level",
type=int, default=8000)
parser.add_argument("-d", "--tree-depth",
type=int, default=4)
parser.add_argument("-s", "--split-size", type=int,
default=3,
help=("Number of subgroups for "
"each parent node."))
parser.add_argument("--parent-map", "-p",
help="Path to which to write synthetic taxonomy.",
default="nodes.dmp")
parser.add_argument("-S", "--subgroup-size", type=int,
default=3,
help="Number of genomes for each subgroup")
parser.add_argument("-o", "--outdir", default=".", type=str)
parser.add_argument("--name-id-map", "-m", default="synth_nameidmap.txt")
args = parser.parse_args()
mult_per_layer = args.split_size * args.subgroup_size
depth = args.tree_depth
nleaves = mult_per_layer ** (depth - 1)
leaf_seqs = [fa.SeqId(fa.gen_seq(args.num_nucleotides_per_leaf), i) for
i in range(nleaves)]
nleaf_seq = len(leaf_seqs)
outdir = args.outdir
if not os.path.isdir(outdir):
if os.path.isfile(outdir):
raise Exception("Path set for outdir ('%s') is a"
" file... Nah, dawg." % outdir)
os.mkdir(outdir)
outdir = outdir + '/'
name_id_map = outdir + args.name_id_map
parent_map = outdir + args.parent_map
pcmap = {}
used_seqids = set(i.taxid() for i in leaf_seqs)
ctax = max(used_seqids) + 1
last_layer = []
for i in range(1, depth):
nchunks = nleaf_seq // (mult_per_layer ** i)
chunk_size = nleaf_seq // nchunks
assert nleaf_seq % chunk_size == 0
for seqsetid, seqset in enumerate(chunker(leaf_seqs, chunk_size)):
print("seqset len: %i" % len(seqset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_level)
for seq in seqset:
seq.seq += add
seq.subsets[i] = seqsetid
for sssid, seqsubset in enumerate(chunker(seqset,
args.subgroup_size)):
add = fa.gen_seq(args.num_nucs_shared_per_subgroup)
for seq in seqset:
seq.seq += add
seq.subgroups[i] = seqsetid
if i == 1:
for seq in seqset:
pcmap[seq.taxid()] = ctax + seqsetid
if i > 1:
if i == depth - 1:
pcmap.update((el, 1) for el in last_layer)
break
else:
for i in range(mult_per_layer):
for tax in last_layer[i:i + mult_per_layer]:
pcmap[tax] = i + ctax
last_layer = [ctax + i for i in range(nchunks)]
used_seqids.update(last_layer)
ctax = max(used_seqids) + 1
del used_seqids
del ctax
del last_layer
{seq.write(outdir + seq.filename()) for seq in leaf_seqs}
print("[1/3] Successfully created synthetic genomes.", file=stderr)
filenames = [outdir + seq.filename() for seq in leaf_seqs]
fa.write_nameid_map(name_id_map, filenames)
print("[2/3] Successfully wrote nameidmap to %s." % name_id_map,
file=stderr)
fa.write_parent_map(parent_map, pcmap)
print("[3/3] Successfully wrote child->parent map.", file=stderr)
stderr.write("Genomes: %s\n" % ', '.join(filenames))
stderr.write("Nameidmap: %s\n" % name_id_map)
stderr.write("Taxonomy: %s\n" % parent_map)
| true | true |
790027e1f01a39fdaddef1520846338aff1fd1da | 16,602 | py | Python | plaso/parsers/sqlite_plugins/skype.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | 2 | 2016-02-18T12:46:29.000Z | 2022-03-13T03:04:59.000Z | plaso/parsers/sqlite_plugins/skype.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | null | null | null | plaso/parsers/sqlite_plugins/skype.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | 6 | 2016-12-18T08:05:36.000Z | 2021-04-06T14:19:11.000Z | # -*- coding: utf-8 -*-
"""This file contains a basic Skype SQLite parser."""
import logging
from plaso.events import time_events
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
__author__ = 'Joaquin Moreno Garijo (bastionado@gmail.com)'
class SkypeChatEvent(time_events.PosixTimeEvent):
"""Convenience class for a Skype event."""
DATA_TYPE = u'skype:event:chat'
def __init__(self, row, to_account):
"""Build a Skype Event from a single row.
Args:
row: A row object (instance of sqlite3.Row) that contains the
extracted data from a single row in the database.
to_account: A string containing the accounts (excluding the
author) of the conversation.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeChatEvent, self).__init__(
row['timestamp'], u'Chat from Skype', self.DATA_TYPE)
self.title = row['title']
self.text = row['body_xml']
self.from_account = u'{0:s} <{1:s}>'.format(
row['from_displayname'], row['author'])
self.to_account = to_account
class SkypeAccountEvent(time_events.PosixTimeEvent):
"""Convenience class for account information."""
DATA_TYPE = u'skype:event:account'
def __init__(
self, timestamp, usage, identifier, full_name, display_name, email,
country):
"""Initialize the event.
Args:
timestamp: The POSIX timestamp value.
usage: A string containing the description string of the timestamp.
identifier: The row identifier.
full_name: A string containing the full name of the Skype account holder.
display_name: A string containing the chosen display name of the account
holder.
email: A string containing the registered email address of the account
holder.
country: A string containing the chosen home country of the account
holder.
"""
super(SkypeAccountEvent, self).__init__(timestamp, usage)
self.offset = identifier
self.username = u'{0:s} <{1:s}>'.format(full_name, display_name)
self.display_name = display_name
self.email = email
self.country = country
self.data_type = self.DATA_TYPE
class SkypeSMSEvent(time_events.PosixTimeEvent):
"""Convenience EventObject for SMS."""
DATA_TYPE = u'skype:event:sms'
def __init__(self, row, dst_number):
"""Read the information related with the SMS.
Args:
row: row form the sql query.
row['time_sms']: timestamp when the sms was send.
row['dstnum_sms']: number which receives the sms.
row['msg_sms']: text send to this sms.
dst_number: phone number where the user send the sms.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeSMSEvent, self).__init__(
row['time_sms'], u'SMS from Skype', self.DATA_TYPE)
self.number = dst_number
self.text = row['msg_sms']
class SkypeCallEvent(time_events.PosixTimeEvent):
"""Convenience EventObject for the calls."""
DATA_TYPE = u'skype:event:call'
def __init__(self, timestamp, call_type, user_start_call,
source, destination, video_conference):
"""Contains information if the call was cancelled, accepted or finished.
Args:
timestamp: the timestamp of the event.
call_type: WAITING, STARTED, FINISHED.
user_start_call: boolean, true indicates that the owner
account started the call.
source: the account which started the call.
destination: the account which gets the call.
video_conference: boolean, if is true it was a videoconference.
"""
super(SkypeCallEvent, self).__init__(
timestamp, u'Call from Skype', self.DATA_TYPE)
self.call_type = call_type
self.user_start_call = user_start_call
self.src_call = source
self.dst_call = destination
self.video_conference = video_conference
class SkypeTransferFileEvent(time_events.PosixTimeEvent):
"""Evaluate the action of send a file."""
DATA_TYPE = u'skype:event:transferfile'
def __init__(self, row, timestamp, action_type, source, destination):
"""Actions related with sending files.
Args:
row:
filepath: path from the file.
filename: name of the file.
filesize: size of the file.
timestamp: when the action happens.
action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED.
source: The account that sent the file.
destination: The account that received the file.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeTransferFileEvent, self).__init__(
timestamp, u'File transfer from Skype', self.DATA_TYPE)
self.offset = row['id']
self.action_type = action_type
self.source = source
self.destination = destination
self.transferred_filepath = row['filepath']
self.transferred_filename = row['filename']
try:
self.transferred_filesize = int(row['filesize'])
except ValueError:
logging.debug(u'Unknown filesize {0:s}'.format(
self.transferred_filename))
self.transferred_filesize = 0
class SkypePlugin(interface.SQLitePlugin):
"""SQLite plugin for Skype main.db SQlite database file."""
NAME = u'skype'
DESCRIPTION = u'Parser for Skype SQLite database files.'
# Queries for building cache.
QUERY_DEST_FROM_TRANSFER = (
u'SELECT parent_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
QUERY_SOURCE_FROM_TRANSFER = (
u'SELECT pk_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
# Define the needed queries.
QUERIES = [
((u'SELECT c.id, c.participants, c.friendlyname AS title, '
u'm.author AS author, m.from_dispname AS from_displayname, '
u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m '
u'WHERE c.name = m.chatname'), u'ParseChat'),
((u'SELECT id, fullname, given_displayname, emails, '
u'country, profile_timestamp, authreq_timestamp, '
u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, '
u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'),
((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, '
u'body AS msg_sms FROM SMSes'), u'ParseSMS'),
((u'SELECT id, partner_handle, partner_dispname, offer_send_list, '
u'starttime, accepttime, finishtime, filepath, filename, filesize, '
u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'),
((u'SELECT c.id, cm.guid, c.is_incoming, '
u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, '
u'cm.start_timestamp AS accept_call, cm.call_duration '
u'FROM Calls c, CallMembers cm '
u'WHERE c.id = cm.call_db_id;'), u'ParseCall')]
# The required tables.
REQUIRED_TABLES = frozenset([
u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes',
u'Transfers', u'CallMembers', u'Calls'])
def ParseAccountInformation(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses the Accounts database.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
if row['profile_timestamp']:
event_object = SkypeAccountEvent(
row['profile_timestamp'], u'Profile Changed', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['authreq_timestamp']:
event_object = SkypeAccountEvent(
row['authreq_timestamp'], u'Authenticate Request', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastonline_timestamp']:
event_object = SkypeAccountEvent(
row['lastonline_timestamp'], u'Last Online', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['mood_timestamp']:
event_object = SkypeAccountEvent(
row['mood_timestamp'], u'Mood Event', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['sent_authrequest_time']:
event_object = SkypeAccountEvent(
row['sent_authrequest_time'], u'Auth Request Sent', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastused_timestamp']:
event_object = SkypeAccountEvent(
row['lastused_timestamp'], u'Last Used', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a chat message row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
to_account = u''
accounts = []
participants = row['participants'].split(' ')
for participant in participants:
if participant != row['author']:
accounts.append(participant)
to_account = u', '.join(accounts)
if not to_account:
if row['dialog_partner']:
to_account = row['dialog_partner']
else:
to_account = u'Unknown User'
event_object = SkypeChatEvent(row, to_account)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parse SMS.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
dst_number = row['dstnum_sms'].replace(u' ', u'')
event_object = SkypeSMSEvent(row, dst_number)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parse the calls taking into accounts some rows.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
try:
aux = row['guid']
if aux:
aux_list = aux.split(u'-')
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux = u'Unknown [no GUID]'
dst_aux = u'Unknown [no GUID]'
except IndexError:
src_aux = u'Unknown [{0:s}]'.format(row['guid'])
dst_aux = u'Unknown [{0:s}]'.format(row['guid'])
if row['is_incoming'] == u'0':
user_start_call = True
source = src_aux
if row['ip_address']:
destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address'])
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
if row['videostatus'] == u'3':
video_conference = True
else:
video_conference = False
event_object = SkypeCallEvent(
row['try_call'], u'WAITING', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accept_call']:
event_object = SkypeCallEvent(
row['accept_call'], u'ACCEPTED', user_start_call, source,
destination, video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['call_duration']:
try:
timestamp = int(row['accept_call']) + int(row['call_duration'])
event_object = SkypeCallEvent(
timestamp, u'FINISHED', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
except ValueError:
logging.debug((
u'[{0:s}] Unable to determine when the call {1:s} was '
u'finished.').format(self.NAME, row['id']))
def ParseFileTransfer(
self, parser_mediator, row, cache=None, database=None, query=None,
**unused_kwargs):
"""Parse the transfer files.
There is no direct relationship between who sends the file and
who accepts the file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: the row with all information related with the file transfers.
query: Optional query string. The default is None.
cache: a cache object (instance of SQLiteCache).
database: A database object (instance of SQLiteDatabase).
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
source_dict = cache.GetResults(u'source')
if not source_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
results, 'source', 'pk_id', ('skypeid', 'skypename'))
source_dict = cache.GetResults(u'source')
dest_dict = cache.GetResults(u'destination')
if not dest_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
results, 'destination', 'parent_id', ('skypeid', 'skypename'))
dest_dict = cache.GetResults(u'destination')
source = u'Unknown'
destination = u'Unknown'
if row['parent_id']:
destination = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
skype_id, skype_name = source_dict.get(row['parent_id'], [None, None])
if skype_name:
source = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
else:
source = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
if row['pk_id']:
skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None])
if skype_name:
destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
if row['status'] == 8:
if row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'GETSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accepttime']:
event_object = SkypeTransferFileEvent(
row, row['accepttime'], u'ACCEPTED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['finishtime']:
event_object = SkypeTransferFileEvent(
row, row['finishtime'], u'FINISHED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
elif row['status'] == 2 and row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'SENDSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
| 36.893333 | 80 | 0.665161 |
import logging
from plaso.events import time_events
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
__author__ = 'Joaquin Moreno Garijo (bastionado@gmail.com)'
class SkypeChatEvent(time_events.PosixTimeEvent):
DATA_TYPE = u'skype:event:chat'
def __init__(self, row, to_account):
super(SkypeChatEvent, self).__init__(
row['timestamp'], u'Chat from Skype', self.DATA_TYPE)
self.title = row['title']
self.text = row['body_xml']
self.from_account = u'{0:s} <{1:s}>'.format(
row['from_displayname'], row['author'])
self.to_account = to_account
class SkypeAccountEvent(time_events.PosixTimeEvent):
DATA_TYPE = u'skype:event:account'
def __init__(
self, timestamp, usage, identifier, full_name, display_name, email,
country):
super(SkypeAccountEvent, self).__init__(timestamp, usage)
self.offset = identifier
self.username = u'{0:s} <{1:s}>'.format(full_name, display_name)
self.display_name = display_name
self.email = email
self.country = country
self.data_type = self.DATA_TYPE
class SkypeSMSEvent(time_events.PosixTimeEvent):
DATA_TYPE = u'skype:event:sms'
def __init__(self, row, dst_number):
super(SkypeSMSEvent, self).__init__(
row['time_sms'], u'SMS from Skype', self.DATA_TYPE)
self.number = dst_number
self.text = row['msg_sms']
class SkypeCallEvent(time_events.PosixTimeEvent):
DATA_TYPE = u'skype:event:call'
def __init__(self, timestamp, call_type, user_start_call,
source, destination, video_conference):
super(SkypeCallEvent, self).__init__(
timestamp, u'Call from Skype', self.DATA_TYPE)
self.call_type = call_type
self.user_start_call = user_start_call
self.src_call = source
self.dst_call = destination
self.video_conference = video_conference
class SkypeTransferFileEvent(time_events.PosixTimeEvent):
DATA_TYPE = u'skype:event:transferfile'
def __init__(self, row, timestamp, action_type, source, destination):
super(SkypeTransferFileEvent, self).__init__(
timestamp, u'File transfer from Skype', self.DATA_TYPE)
self.offset = row['id']
self.action_type = action_type
self.source = source
self.destination = destination
self.transferred_filepath = row['filepath']
self.transferred_filename = row['filename']
try:
self.transferred_filesize = int(row['filesize'])
except ValueError:
logging.debug(u'Unknown filesize {0:s}'.format(
self.transferred_filename))
self.transferred_filesize = 0
class SkypePlugin(interface.SQLitePlugin):
NAME = u'skype'
DESCRIPTION = u'Parser for Skype SQLite database files.'
QUERY_DEST_FROM_TRANSFER = (
u'SELECT parent_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
QUERY_SOURCE_FROM_TRANSFER = (
u'SELECT pk_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
QUERIES = [
((u'SELECT c.id, c.participants, c.friendlyname AS title, '
u'm.author AS author, m.from_dispname AS from_displayname, '
u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m '
u'WHERE c.name = m.chatname'), u'ParseChat'),
((u'SELECT id, fullname, given_displayname, emails, '
u'country, profile_timestamp, authreq_timestamp, '
u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, '
u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'),
((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, '
u'body AS msg_sms FROM SMSes'), u'ParseSMS'),
((u'SELECT id, partner_handle, partner_dispname, offer_send_list, '
u'starttime, accepttime, finishtime, filepath, filename, filesize, '
u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'),
((u'SELECT c.id, cm.guid, c.is_incoming, '
u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, '
u'cm.start_timestamp AS accept_call, cm.call_duration '
u'FROM Calls c, CallMembers cm '
u'WHERE c.id = cm.call_db_id;'), u'ParseCall')]
REQUIRED_TABLES = frozenset([
u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes',
u'Transfers', u'CallMembers', u'Calls'])
def ParseAccountInformation(
self, parser_mediator, row, query=None, **unused_kwargs):
if row['profile_timestamp']:
event_object = SkypeAccountEvent(
row['profile_timestamp'], u'Profile Changed', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['authreq_timestamp']:
event_object = SkypeAccountEvent(
row['authreq_timestamp'], u'Authenticate Request', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastonline_timestamp']:
event_object = SkypeAccountEvent(
row['lastonline_timestamp'], u'Last Online', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['mood_timestamp']:
event_object = SkypeAccountEvent(
row['mood_timestamp'], u'Mood Event', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['sent_authrequest_time']:
event_object = SkypeAccountEvent(
row['sent_authrequest_time'], u'Auth Request Sent', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastused_timestamp']:
event_object = SkypeAccountEvent(
row['lastused_timestamp'], u'Last Used', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs):
to_account = u''
accounts = []
participants = row['participants'].split(' ')
for participant in participants:
if participant != row['author']:
accounts.append(participant)
to_account = u', '.join(accounts)
if not to_account:
if row['dialog_partner']:
to_account = row['dialog_partner']
else:
to_account = u'Unknown User'
event_object = SkypeChatEvent(row, to_account)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs):
dst_number = row['dstnum_sms'].replace(u' ', u'')
event_object = SkypeSMSEvent(row, dst_number)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs):
try:
aux = row['guid']
if aux:
aux_list = aux.split(u'-')
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux = u'Unknown [no GUID]'
dst_aux = u'Unknown [no GUID]'
except IndexError:
src_aux = u'Unknown [{0:s}]'.format(row['guid'])
dst_aux = u'Unknown [{0:s}]'.format(row['guid'])
if row['is_incoming'] == u'0':
user_start_call = True
source = src_aux
if row['ip_address']:
destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address'])
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
if row['videostatus'] == u'3':
video_conference = True
else:
video_conference = False
event_object = SkypeCallEvent(
row['try_call'], u'WAITING', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accept_call']:
event_object = SkypeCallEvent(
row['accept_call'], u'ACCEPTED', user_start_call, source,
destination, video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['call_duration']:
try:
timestamp = int(row['accept_call']) + int(row['call_duration'])
event_object = SkypeCallEvent(
timestamp, u'FINISHED', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
except ValueError:
logging.debug((
u'[{0:s}] Unable to determine when the call {1:s} was '
u'finished.').format(self.NAME, row['id']))
def ParseFileTransfer(
self, parser_mediator, row, cache=None, database=None, query=None,
**unused_kwargs):
source_dict = cache.GetResults(u'source')
if not source_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER)
cache.CacheQueryResults(
results, 'source', 'pk_id', ('skypeid', 'skypename'))
source_dict = cache.GetResults(u'source')
dest_dict = cache.GetResults(u'destination')
if not dest_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER)
cache.CacheQueryResults(
results, 'destination', 'parent_id', ('skypeid', 'skypename'))
dest_dict = cache.GetResults(u'destination')
source = u'Unknown'
destination = u'Unknown'
if row['parent_id']:
destination = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
skype_id, skype_name = source_dict.get(row['parent_id'], [None, None])
if skype_name:
source = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
else:
source = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
if row['pk_id']:
skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None])
if skype_name:
destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
if row['status'] == 8:
if row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'GETSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accepttime']:
event_object = SkypeTransferFileEvent(
row, row['accepttime'], u'ACCEPTED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['finishtime']:
event_object = SkypeTransferFileEvent(
row, row['finishtime'], u'FINISHED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
elif row['status'] == 2 and row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'SENDSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
| true | true |
79002937f63cc83abc4079baace1cf6ec297c5e3 | 6,525 | py | Python | tradingview_ta/technicals.py | Chizkiyahu/python-tradingview-ta | 84777c72a3b6ef8706fc01434ce2daf1628e3027 | [
"MIT"
] | null | null | null | tradingview_ta/technicals.py | Chizkiyahu/python-tradingview-ta | 84777c72a3b6ef8706fc01434ce2daf1628e3027 | [
"MIT"
] | null | null | null | tradingview_ta/technicals.py | Chizkiyahu/python-tradingview-ta | 84777c72a3b6ef8706fc01434ce2daf1628e3027 | [
"MIT"
] | 1 | 2021-11-03T15:20:48.000Z | 2021-11-03T15:20:48.000Z | # Tradingview Technical Analysis (tradingview-ta)
# Author: deathlyface (https://github.com/deathlyface)
# Rewritten from https://www.tradingview.com/static/bundles/technicals.f2e6e6a51aebb6cd46f8.js
# License: MIT
class Recommendation:
buy = "BUY"
strong_buy = "STRONG_BUY"
sell = "SELL"
strong_sell = "STRONG_SELL"
neutral = "NEUTRAL"
error = "ERROR"
class Compute:
def MA(ma, close):
"""Compute Moving Average
Args:
ma (float): MA value
close (float): Close value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (ma < close):
return Recommendation.buy
elif (ma > close):
return Recommendation.sell
else:
return Recommendation.neutral
def RSI(rsi, rsi1):
"""Compute Relative Strength Index
Args:
rsi (float): RSI value
rsi1 (float): RSI[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (rsi < 30 and rsi1 > rsi):
return Recommendation.buy
elif (rsi > 70 and rsi1 < rsi):
return Recommendation.sell
else:
return Recommendation.neutral
def Stoch(k, d, k1, d1):
"""Compute Stochastic
Args:
k (float): Stoch.K value
d (float): Stoch.D value
k1 (float): Stoch.K[1] value
d1 (float): Stoch.D[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (k < 20 and d < 20 and k > d and k1 < d1):
return Recommendation.buy
elif (k > 80 and d > 80 and k < d and k1 > d1):
return Recommendation.sell
else:
return Recommendation.neutral
def CCI20(cci20, cci201):
"""Compute Commodity Channel Index 20
Args:
cci20 (float): CCI20 value
cci201 ([type]): CCI20[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (cci20 < -100 and cci20 > cci201):
return Recommendation.buy
elif (cci20 > 100 and cci20 < cci201):
return Recommendation.sell
else:
return Recommendation.neutral
def ADX(adx, adxpdi, adxndi, adxpdi1, adxndi1):
"""Compute Average Directional Index
Args:
adx (float): ADX value
adxpdi (float): ADX+DI value
adxndi (float): ADX-DI value
adxpdi1 (float): ADX+DI[1] value
adxndi1 (float): ADX-DI[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (adx > 20 and adxpdi1 < adxndi1 and adxpdi > adxndi):
return Recommendation.buy
elif (adx > 20 and adxpdi1 > adxndi1 and adxpdi < adxndi):
return Recommendation.sell
else:
return Recommendation.neutral
def AO(ao, ao1):
"""Compute Awesome Oscillator
Args:
ao (float): AO value
ao1 (float): AO[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (ao > 0 and ao1 < 0 or ao > 0 and ao1 > 0 and ao > ao1):
return Recommendation.buy
elif (ao < 0 and ao1 > 0 or ao < 0 and ao1 < 0 and ao < ao1):
return Recommendation.sell
else:
return Recommendation.neutral
def Mom(mom, mom1):
"""Compute Momentum
Args:
mom (float): Mom value
mom1 (float): Mom[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (mom < mom1):
return Recommendation.buy
elif (mom > mom1):
return Recommendation.sell
else:
return Recommendation.neutral
def MACD(macd, signal):
"""Compute Moving Average Convergence/Divergence
Args:
macd (float): MACD.macd value
signal (float): MACD.signal value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (macd > signal):
return Recommendation.buy
elif (macd < signal):
return Recommendation.sell
else:
return Recommendation.neutral
def BBBuy(close, bblower):
"""Compute Bull Bear Buy
Args:
close (float): close value
bblower (float): BB.lower value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (close < bblower):
return Recommendation.buy
else:
return Recommendation.neutral
def BBSell(close, bbupper):
"""Compute Bull Bear Sell
Args:
close (float): close value
bbupper (float): BB.upper value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (close > bbupper):
return Recommendation.sell
else:
return Recommendation.neutral
def PSAR(psar, open):
"""Compute Parabolic Stop-And-Reverse
Args:
psar (float): P.SAR value
open (float): open value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (psar < open):
return Recommendation.buy
elif (psar > open):
return Recommendation.sell
else:
return Recommendation.neutral
def Recommend(value):
"""Compute Recommend
Args:
value (float): recommend value
Returns:
string: "STRONG_BUY", "BUY", "NEUTRAL", "SELL", "STRONG_SELL", or "ERROR"
"""
if (value >= -1 and value < -.5):
return Recommendation.strong_sell
elif (value >= -.5 and value < 0):
return Recommendation.sell
elif (value == 0):
return Recommendation.neutral
elif (value > 0 and value <= .5):
return Recommendation.buy
elif (value > .5 and value <= 1):
return Recommendation.strong_buy
else:
return Recommendation.error
def Simple(value):
"""Compute Simple
Args:
value (float): Rec.X value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (value == -1):
return Recommendation.sell
elif (value == 1):
return Recommendation.buy
else:
return Recommendation.neutral
| 27.1875 | 94 | 0.517241 |
class Recommendation:
buy = "BUY"
strong_buy = "STRONG_BUY"
sell = "SELL"
strong_sell = "STRONG_SELL"
neutral = "NEUTRAL"
error = "ERROR"
class Compute:
def MA(ma, close):
if (ma < close):
return Recommendation.buy
elif (ma > close):
return Recommendation.sell
else:
return Recommendation.neutral
def RSI(rsi, rsi1):
if (rsi < 30 and rsi1 > rsi):
return Recommendation.buy
elif (rsi > 70 and rsi1 < rsi):
return Recommendation.sell
else:
return Recommendation.neutral
def Stoch(k, d, k1, d1):
if (k < 20 and d < 20 and k > d and k1 < d1):
return Recommendation.buy
elif (k > 80 and d > 80 and k < d and k1 > d1):
return Recommendation.sell
else:
return Recommendation.neutral
def CCI20(cci20, cci201):
if (cci20 < -100 and cci20 > cci201):
return Recommendation.buy
elif (cci20 > 100 and cci20 < cci201):
return Recommendation.sell
else:
return Recommendation.neutral
def ADX(adx, adxpdi, adxndi, adxpdi1, adxndi1):
if (adx > 20 and adxpdi1 < adxndi1 and adxpdi > adxndi):
return Recommendation.buy
elif (adx > 20 and adxpdi1 > adxndi1 and adxpdi < adxndi):
return Recommendation.sell
else:
return Recommendation.neutral
def AO(ao, ao1):
if (ao > 0 and ao1 < 0 or ao > 0 and ao1 > 0 and ao > ao1):
return Recommendation.buy
elif (ao < 0 and ao1 > 0 or ao < 0 and ao1 < 0 and ao < ao1):
return Recommendation.sell
else:
return Recommendation.neutral
def Mom(mom, mom1):
if (mom < mom1):
return Recommendation.buy
elif (mom > mom1):
return Recommendation.sell
else:
return Recommendation.neutral
def MACD(macd, signal):
if (macd > signal):
return Recommendation.buy
elif (macd < signal):
return Recommendation.sell
else:
return Recommendation.neutral
def BBBuy(close, bblower):
if (close < bblower):
return Recommendation.buy
else:
return Recommendation.neutral
def BBSell(close, bbupper):
if (close > bbupper):
return Recommendation.sell
else:
return Recommendation.neutral
def PSAR(psar, open):
if (psar < open):
return Recommendation.buy
elif (psar > open):
return Recommendation.sell
else:
return Recommendation.neutral
def Recommend(value):
if (value >= -1 and value < -.5):
return Recommendation.strong_sell
elif (value >= -.5 and value < 0):
return Recommendation.sell
elif (value == 0):
return Recommendation.neutral
elif (value > 0 and value <= .5):
return Recommendation.buy
elif (value > .5 and value <= 1):
return Recommendation.strong_buy
else:
return Recommendation.error
def Simple(value):
if (value == -1):
return Recommendation.sell
elif (value == 1):
return Recommendation.buy
else:
return Recommendation.neutral
| true | true |
790029c8788cdbba6b33babc7a30f43fce87fd0f | 1,460 | py | Python | examples/select.py | fossabot/questionary | de6354aeaf23d3ed65bbcb9e60aeb27305257672 | [
"MIT"
] | null | null | null | examples/select.py | fossabot/questionary | de6354aeaf23d3ed65bbcb9e60aeb27305257672 | [
"MIT"
] | null | null | null | examples/select.py | fossabot/questionary | de6354aeaf23d3ed65bbcb9e60aeb27305257672 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Example for a list question type.
Run example by typing `python -m examples.list` in your console."""
from pprint import pprint
import questionary
from examples import custom_style_dope
from questionary import Separator, Choice, prompt
def ask_pystyle(**kwargs):
# create the question object
question = questionary.select(
'What do you want to do?',
qmark='😃',
choices=[
'Order a pizza',
'Make a reservation',
Separator(),
'Ask for opening hours',
Choice('Contact support', disabled='Unavailable at this time'),
'Talk to the receptionist'],
style=custom_style_dope,
**kwargs)
# prompt the user for an answer
return question.ask()
def ask_dictstyle(**kwargs):
questions = [
{
'type': 'select',
'name': 'theme',
'message': 'What do you want to do?',
'choices': [
'Order a pizza',
'Make a reservation',
Separator(),
'Ask for opening hours',
{
'name': 'Contact support',
'disabled': 'Unavailable at this time'
},
'Talk to the receptionist'
]
}
]
return prompt(questions, style=custom_style_dope, **kwargs)
if __name__ == '__main__':
pprint(ask_pystyle())
| 26.071429 | 75 | 0.534247 |
from pprint import pprint
import questionary
from examples import custom_style_dope
from questionary import Separator, Choice, prompt
def ask_pystyle(**kwargs):
question = questionary.select(
'What do you want to do?',
qmark='😃',
choices=[
'Order a pizza',
'Make a reservation',
Separator(),
'Ask for opening hours',
Choice('Contact support', disabled='Unavailable at this time'),
'Talk to the receptionist'],
style=custom_style_dope,
**kwargs)
return question.ask()
def ask_dictstyle(**kwargs):
questions = [
{
'type': 'select',
'name': 'theme',
'message': 'What do you want to do?',
'choices': [
'Order a pizza',
'Make a reservation',
Separator(),
'Ask for opening hours',
{
'name': 'Contact support',
'disabled': 'Unavailable at this time'
},
'Talk to the receptionist'
]
}
]
return prompt(questions, style=custom_style_dope, **kwargs)
if __name__ == '__main__':
pprint(ask_pystyle())
| true | true |
79002b18909275880ef250dcf32f1a84abde2b13 | 212 | py | Python | stripe/stripe/doctype/stripe_setting/test_stripe_setting.py | Hitesh1595/stripe | 251b89a44843d833c13500e339dda64d5bbd225d | [
"MIT"
] | null | null | null | stripe/stripe/doctype/stripe_setting/test_stripe_setting.py | Hitesh1595/stripe | 251b89a44843d833c13500e339dda64d5bbd225d | [
"MIT"
] | null | null | null | stripe/stripe/doctype/stripe_setting/test_stripe_setting.py | Hitesh1595/stripe | 251b89a44843d833c13500e339dda64d5bbd225d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, stripe and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestStripeSetting(unittest.TestCase):
pass
| 19.272727 | 45 | 0.764151 |
from __future__ import unicode_literals
import unittest
class TestStripeSetting(unittest.TestCase):
pass
| true | true |
79002c3109af890db2914a8213ecc4ee565ad635 | 14,446 | py | Python | data/data_loader.py | ShuanDeMorian/deepspeech.pytorch | 58d7a693447ead632ef9b625681790ee8b5f6b82 | [
"MIT"
] | null | null | null | data/data_loader.py | ShuanDeMorian/deepspeech.pytorch | 58d7a693447ead632ef9b625681790ee8b5f6b82 | [
"MIT"
] | null | null | null | data/data_loader.py | ShuanDeMorian/deepspeech.pytorch | 58d7a693447ead632ef9b625681790ee8b5f6b82 | [
"MIT"
] | null | null | null | import os
import subprocess
from tempfile import NamedTemporaryFile
from torch.distributed import get_rank
from torch.distributed import get_world_size
from torch.utils.data.sampler import Sampler
import librosa
import numpy as np
import scipy.signal
import torch
from scipy.io.wavfile import read
import math
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from .spec_augment import spec_augment
from hangul_utils import split_syllable_char, split_syllables, join_jamos
windows = {'hamming': scipy.signal.hamming, 'hann': scipy.signal.hann, 'blackman': scipy.signal.blackman,
'bartlett': scipy.signal.bartlett}
def load_audio(path):
# sample_rate, sound = read(path)
sound, sr = librosa.load(path, sr=16000)
# librosa.output.write_wav('org.wav', sound, sr)
# print('save 1')
# sound = sound.astype('float32') / 32767 # normalize audio
sound = librosa.util.normalize(sound) # normalize audio
sound = sound.astype('float32')
# librosa.output.write_wav('norm.wav', sound, sr)
# print('save 2')
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # multiple channels, average
return sound
class AudioParser(object):
def parse_transcript(self, transcript_path):
"""
:param transcript_path: Path where transcript is stored from the manifest file
:return: Transcript in training/testing format
"""
raise NotImplementedError
def parse_audio(self, audio_path):
"""
:param audio_path: Path where audio is stored from the manifest file
:return: Audio in training/testing format
"""
raise NotImplementedError
class NoiseInjection(object):
def __init__(self,
path=None,
sample_rate=16000,
noise_levels=(0, 0.5)):
"""
Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added.
Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py
"""
if path is not None and not os.path.exists(path):
print("Directory doesn't exist: {}".format(path))
raise IOError
self.paths = path is not None and librosa.util.find_files(path)
self.sample_rate = sample_rate
self.noise_levels = noise_levels
def inject_noise(self, data):
noise_path = np.random.choice(self.paths)
noise_level = np.random.uniform(*self.noise_levels)
return self.inject_noise_sample(data, noise_path, noise_level)
def inject_noise_sample(self, data, noise_path, noise_level):
noise_len = get_audio_length(noise_path)
data_len = len(data) / self.sample_rate
noise_start = np.random.rand() * (noise_len - data_len)
noise_end = noise_start + data_len
noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end)
assert len(data) == len(noise_dst)
noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size)
data_energy = np.sqrt(data.dot(data) / data.size)
data += noise_level * noise_dst * data_energy / noise_energy
return data
class SpectrogramParser(AudioParser):
def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False):
"""
Parses audio file into spectrogram with optional normalization and various augmentations
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param normalize(default False): Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
super(SpectrogramParser, self).__init__()
self.window_stride = audio_conf['window_stride']
self.window_size = audio_conf['window_size']
self.sample_rate = audio_conf['sample_rate']
self.window = windows.get(audio_conf['window'], windows['hamming'])
self.normalize = normalize
self.speed_volume_perturb = speed_volume_perturb
self.spec_augment = spec_augment
self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate,
audio_conf['noise_levels']) if audio_conf.get(
'noise_dir') is not None else None
self.noise_prob = audio_conf.get('noise_prob')
def parse_audio(self, audio_path,audio=None,change_speed=None):
if audio is not None:
y = audio
elif self.speed_volume_perturb:
y = load_randomly_augmented_audio(audio_path, self.sample_rate)
# librosa.output.write_wav('test.wav', y, sr=16000, norm=False)
# print('test')
else:
y = load_audio(audio_path)
# librosa.output.write_wav('y1.wav', y, sr=16000)
# print('save@@@@@@@@@@@@')
# change audio speed
if change_speed is not None:
y = librosa.effects.time_stretch(y, change_speed)
if self.noiseInjector:
add_noise = np.random.binomial(1, self.noise_prob)
if add_noise:
y = self.noiseInjector.inject_noise(y)
# librosa.output.write_wav('y2.wav', y, sr=16000)
# print('save@@@@@@@@@@@@')
# import sys
# sys.exit()
n_fft = int(self.sample_rate * self.window_size)
win_length = n_fft
hop_length = int(self.sample_rate * self.window_stride)
# STFT
D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=self.window)
spect, phase = librosa.magphase(D)
# S = log(S+1)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if self.normalize:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
if self.spec_augment:
spect = spec_augment(spect)
return spect
def parse_transcript(self, transcript_path):
raise NotImplementedError
class SpectrogramDataset(Dataset, SpectrogramParser):
def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False):
"""
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param manifest_filepath: Path to manifest csv as describe above
:param labels: String containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [x.strip().split(',') for x in ids]
self.ids = ids
self.size = len(ids)
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
try:
self.use_jamo = audio_conf['use_jamo']
except:
self.use_jamo = False
super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment)
def __getitem__(self, index):
sample = self.ids[index]
audio_path, transcript_path = sample[0], sample[1]
spect = self.parse_audio(audio_path)
transcript = self.parse_transcript(transcript_path)
return spect, transcript
def parse_transcript(self, transcript_path):
with open(transcript_path, 'r', encoding='utf8') as transcript_file:
# with open(transcript_path, 'r', encoding='utf-16') as transcript_file:
transcript = transcript_file.read().replace('\n', '')
if self.use_jamo:
transcript = split_syllables(transcript)
transcript = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))
return transcript
def __len__(self):
return self.size
def _collate_fn(batch):
def func(p):
return p[0].size(1)
batch = sorted(batch, key=lambda sample: sample[0].size(1), reverse=True)
longest_sample = max(batch, key=func)[0]
freq_size = longest_sample.size(0)
minibatch_size = len(batch)
max_seqlength = longest_sample.size(1)
inputs = torch.zeros(minibatch_size, 1, freq_size, max_seqlength)
input_percentages = torch.FloatTensor(minibatch_size)
target_sizes = torch.IntTensor(minibatch_size)
targets = []
for x in range(minibatch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(1)
inputs[x][0].narrow(1, 0, seq_length).copy_(tensor)
input_percentages[x] = seq_length / float(max_seqlength)
target_sizes[x] = len(target)
targets.extend(target)
targets = torch.IntTensor(targets)
return inputs, targets, input_percentages, target_sizes
class AudioDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
"""
Creates a data loader for AudioDatasets.
"""
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn
class BucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1):
"""
Samples batches assuming they are in order of size to batch similarly sized samples together.
"""
super(BucketingSampler, self).__init__(data_source)
self.data_source = data_source
ids = list(range(0, len(data_source)))
self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)]
def __iter__(self):
for ids in self.bins:
np.random.shuffle(ids)
yield ids
def __len__(self):
return len(self.bins)
def shuffle(self, epoch):
np.random.shuffle(self.bins)
class DistributedBucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None):
"""
Samples batches assuming they are in order of size to batch similarly sized samples together.
"""
super(DistributedBucketingSampler, self).__init__(data_source)
if num_replicas is None:
num_replicas = get_world_size()
if rank is None:
rank = get_rank()
self.data_source = data_source
self.ids = list(range(0, len(data_source)))
self.batch_size = batch_size
self.bins = [self.ids[i:i + batch_size] for i in range(0, len(self.ids), batch_size)]
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.bins) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
offset = self.rank
# add extra samples to make it evenly divisible
bins = self.bins + self.bins[:(self.total_size - len(self.bins))]
assert len(bins) == self.total_size
samples = bins[offset::self.num_replicas] # Get every Nth bin, starting from rank
return iter(samples)
def __len__(self):
return self.num_samples
def shuffle(self, epoch):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(epoch)
bin_ids = list(torch.randperm(len(self.bins), generator=g))
self.bins = [self.bins[i] for i in bin_ids]
def get_audio_length(path):
output = subprocess.check_output(['soxi -D \"%s\"' % path.strip()], shell=True)
return float(output)
def audio_with_sox(path, sample_rate, start_time, end_time):
"""
crop and resample the recording with sox and loads it.
"""
with NamedTemporaryFile(suffix=".wav") as tar_file:
tar_filename = tar_file.name
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1".format(path, sample_rate,
tar_filename, start_time,
end_time)
os.system(sox_params)
y = load_audio(tar_filename)
return y
def augment_audio_with_sox(path, sample_rate, tempo, gain):
"""
Changes tempo and gain of the recording with sox and loads it.
"""
with NamedTemporaryFile(suffix=".wav") as augmented_file:
augmented_filename = augmented_file.name
sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)]
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1".format(path, sample_rate,
augmented_filename,
" ".join(sox_augment_params))
os.system(sox_params)
y = load_audio(augmented_filename)
return y
# original tempo_range=(0.85,1.15)
# original gain_range=(-6,8)
def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85,1.15),
gain_range=(-6, 8)):
"""
Picks tempo and gain uniformly, applies it to the utterance by using sox utility.
Returns the augmented utterance.
"""
low_tempo, high_tempo = tempo_range
tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)
low_gain, high_gain = gain_range
gain_value = np.random.uniform(low=low_gain, high=high_gain)
audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,
tempo=tempo_value, gain=gain_value)
return audio
| 39.149051 | 127 | 0.631524 | import os
import subprocess
from tempfile import NamedTemporaryFile
from torch.distributed import get_rank
from torch.distributed import get_world_size
from torch.utils.data.sampler import Sampler
import librosa
import numpy as np
import scipy.signal
import torch
from scipy.io.wavfile import read
import math
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from .spec_augment import spec_augment
from hangul_utils import split_syllable_char, split_syllables, join_jamos
windows = {'hamming': scipy.signal.hamming, 'hann': scipy.signal.hann, 'blackman': scipy.signal.blackman,
'bartlett': scipy.signal.bartlett}
def load_audio(path):
sound, sr = librosa.load(path, sr=16000)
osa.util.normalize(sound)
sound = sound.astype('float32')
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1)
return sound
class AudioParser(object):
def parse_transcript(self, transcript_path):
raise NotImplementedError
def parse_audio(self, audio_path):
raise NotImplementedError
class NoiseInjection(object):
def __init__(self,
path=None,
sample_rate=16000,
noise_levels=(0, 0.5)):
if path is not None and not os.path.exists(path):
print("Directory doesn't exist: {}".format(path))
raise IOError
self.paths = path is not None and librosa.util.find_files(path)
self.sample_rate = sample_rate
self.noise_levels = noise_levels
def inject_noise(self, data):
noise_path = np.random.choice(self.paths)
noise_level = np.random.uniform(*self.noise_levels)
return self.inject_noise_sample(data, noise_path, noise_level)
def inject_noise_sample(self, data, noise_path, noise_level):
noise_len = get_audio_length(noise_path)
data_len = len(data) / self.sample_rate
noise_start = np.random.rand() * (noise_len - data_len)
noise_end = noise_start + data_len
noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end)
assert len(data) == len(noise_dst)
noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size)
data_energy = np.sqrt(data.dot(data) / data.size)
data += noise_level * noise_dst * data_energy / noise_energy
return data
class SpectrogramParser(AudioParser):
def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False):
super(SpectrogramParser, self).__init__()
self.window_stride = audio_conf['window_stride']
self.window_size = audio_conf['window_size']
self.sample_rate = audio_conf['sample_rate']
self.window = windows.get(audio_conf['window'], windows['hamming'])
self.normalize = normalize
self.speed_volume_perturb = speed_volume_perturb
self.spec_augment = spec_augment
self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate,
audio_conf['noise_levels']) if audio_conf.get(
'noise_dir') is not None else None
self.noise_prob = audio_conf.get('noise_prob')
def parse_audio(self, audio_path,audio=None,change_speed=None):
if audio is not None:
y = audio
elif self.speed_volume_perturb:
y = load_randomly_augmented_audio(audio_path, self.sample_rate)
# librosa.output.write_wav('test.wav', y, sr=16000, norm=False)
# print('test')
else:
y = load_audio(audio_path)
# librosa.output.write_wav('y1.wav', y, sr=16000)
# print('save@@@@@@@@@@@@')
# change audio speed
if change_speed is not None:
y = librosa.effects.time_stretch(y, change_speed)
if self.noiseInjector:
add_noise = np.random.binomial(1, self.noise_prob)
if add_noise:
y = self.noiseInjector.inject_noise(y)
# librosa.output.write_wav('y2.wav', y, sr=16000)
# print('save@@@@@@@@@@@@')
# import sys
# sys.exit()
n_fft = int(self.sample_rate * self.window_size)
win_length = n_fft
hop_length = int(self.sample_rate * self.window_stride)
# STFT
D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=self.window)
spect, phase = librosa.magphase(D)
# S = log(S+1)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if self.normalize:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
if self.spec_augment:
spect = spec_augment(spect)
return spect
def parse_transcript(self, transcript_path):
raise NotImplementedError
class SpectrogramDataset(Dataset, SpectrogramParser):
def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False):
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [x.strip().split(',') for x in ids]
self.ids = ids
self.size = len(ids)
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
try:
self.use_jamo = audio_conf['use_jamo']
except:
self.use_jamo = False
super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment)
def __getitem__(self, index):
sample = self.ids[index]
audio_path, transcript_path = sample[0], sample[1]
spect = self.parse_audio(audio_path)
transcript = self.parse_transcript(transcript_path)
return spect, transcript
def parse_transcript(self, transcript_path):
with open(transcript_path, 'r', encoding='utf8') as transcript_file:
# with open(transcript_path, 'r', encoding='utf-16') as transcript_file:
transcript = transcript_file.read().replace('\n', '')
if self.use_jamo:
transcript = split_syllables(transcript)
transcript = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))
return transcript
def __len__(self):
return self.size
def _collate_fn(batch):
def func(p):
return p[0].size(1)
batch = sorted(batch, key=lambda sample: sample[0].size(1), reverse=True)
longest_sample = max(batch, key=func)[0]
freq_size = longest_sample.size(0)
minibatch_size = len(batch)
max_seqlength = longest_sample.size(1)
inputs = torch.zeros(minibatch_size, 1, freq_size, max_seqlength)
input_percentages = torch.FloatTensor(minibatch_size)
target_sizes = torch.IntTensor(minibatch_size)
targets = []
for x in range(minibatch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(1)
inputs[x][0].narrow(1, 0, seq_length).copy_(tensor)
input_percentages[x] = seq_length / float(max_seqlength)
target_sizes[x] = len(target)
targets.extend(target)
targets = torch.IntTensor(targets)
return inputs, targets, input_percentages, target_sizes
class AudioDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn
class BucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1):
super(BucketingSampler, self).__init__(data_source)
self.data_source = data_source
ids = list(range(0, len(data_source)))
self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)]
def __iter__(self):
for ids in self.bins:
np.random.shuffle(ids)
yield ids
def __len__(self):
return len(self.bins)
def shuffle(self, epoch):
np.random.shuffle(self.bins)
class DistributedBucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None):
super(DistributedBucketingSampler, self).__init__(data_source)
if num_replicas is None:
num_replicas = get_world_size()
if rank is None:
rank = get_rank()
self.data_source = data_source
self.ids = list(range(0, len(data_source)))
self.batch_size = batch_size
self.bins = [self.ids[i:i + batch_size] for i in range(0, len(self.ids), batch_size)]
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.bins) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
offset = self.rank
# add extra samples to make it evenly divisible
bins = self.bins + self.bins[:(self.total_size - len(self.bins))]
assert len(bins) == self.total_size
samples = bins[offset::self.num_replicas] # Get every Nth bin, starting from rank
return iter(samples)
def __len__(self):
return self.num_samples
def shuffle(self, epoch):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(epoch)
bin_ids = list(torch.randperm(len(self.bins), generator=g))
self.bins = [self.bins[i] for i in bin_ids]
def get_audio_length(path):
output = subprocess.check_output(['soxi -D \"%s\"' % path.strip()], shell=True)
return float(output)
def audio_with_sox(path, sample_rate, start_time, end_time):
with NamedTemporaryFile(suffix=".wav") as tar_file:
tar_filename = tar_file.name
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1".format(path, sample_rate,
tar_filename, start_time,
end_time)
os.system(sox_params)
y = load_audio(tar_filename)
return y
def augment_audio_with_sox(path, sample_rate, tempo, gain):
with NamedTemporaryFile(suffix=".wav") as augmented_file:
augmented_filename = augmented_file.name
sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)]
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1".format(path, sample_rate,
augmented_filename,
" ".join(sox_augment_params))
os.system(sox_params)
y = load_audio(augmented_filename)
return y
# original tempo_range=(0.85,1.15)
# original gain_range=(-6,8)
def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85,1.15),
gain_range=(-6, 8)):
low_tempo, high_tempo = tempo_range
tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)
low_gain, high_gain = gain_range
gain_value = np.random.uniform(low=low_gain, high=high_gain)
audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,
tempo=tempo_value, gain=gain_value)
return audio
| true | true |
79002c38c89823a9661816a747317c552a3c7324 | 9,868 | py | Python | solcast/nodes.py | danhper/py-solc-ast | 6aace525d23be835c62e36410e17a657d1b4dde2 | [
"MIT"
] | null | null | null | solcast/nodes.py | danhper/py-solc-ast | 6aace525d23be835c62e36410e17a657d1b4dde2 | [
"MIT"
] | null | null | null | solcast/nodes.py | danhper/py-solc-ast | 6aace525d23be835c62e36410e17a657d1b4dde2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import functools
from copy import deepcopy
from .grammar import BASE_NODE_TYPES
class NodeBase:
"""Represents a node within the solidity AST.
Attributes:
depth: Number of nodes between this node and the SourceUnit
offset: Absolute source offsets as a (start, stop) tuple
contract_id: Contract ID as given by the standard compiler JSON
fields: List of attributes for this node
"""
def __init__(self, ast, parent):
self.depth = parent.depth + 1 if parent is not None else 0
self._parent = parent
self._children = set()
src = [int(i) for i in ast["src"].split(":")]
self.offset = (src[0], src[0] + src[1])
self.contract_id = src[2]
self.fields = sorted(ast.keys())
for key, value in ast.items():
if isinstance(value, dict) and value.get("nodeType") == "Block":
value = value["statements"]
elif key == "body" and not value:
value = []
if isinstance(value, dict):
item = node_class_factory(value, self)
if isinstance(item, NodeBase):
self._children.add(item)
setattr(self, key, item)
elif isinstance(value, list):
items = [node_class_factory(i, self) for i in value]
setattr(self, key, items)
self._children.update(i for i in items if isinstance(i, NodeBase))
else:
setattr(self, key, value)
def __hash__(self):
return hash(f"{self.nodeType}{self.depth}{self.offset}")
def __repr__(self):
repr_str = f"<{self.nodeType}"
if hasattr(self, "nodes"):
repr_str += " iterable"
if hasattr(self, "type"):
if isinstance(self.type, str):
repr_str += f" {self.type}"
else:
repr_str += f" {self.type._display()}"
if self._display():
repr_str += f" '{self._display()}'"
else:
repr_str += " object"
return f"{repr_str}>"
def _display(self):
if hasattr(self, "name") and hasattr(self, "value"):
return f"{self.name} = {self.value}"
for attr in ("name", "value", "absolutePath"):
if hasattr(self, attr):
return f"{getattr(self, attr)}"
return ""
def children(
self,
depth=None,
include_self=False,
include_parents=True,
include_children=True,
required_offset=None,
offset_limits=None,
filters=None,
exclude_filter=None,
):
"""Get childen nodes of this node.
Arguments:
depth: Number of levels of children to traverse. 0 returns only this node.
include_self: Includes this node in the results.
include_parents: Includes nodes that match in the results, when they also have
child nodes that match.
include_children: If True, as soon as a match is found it's children will not
be included in the search.
required_offset: Only match nodes with a source offset that contains this offset.
offset_limits: Only match nodes when their source offset is contained inside
this source offset.
filters: Dictionary of {attribute: value} that children must match. Can also
be given as a list of dicts, children that match one of the dicts
will be returned.
exclude_filter: Dictionary of {attribute:value} that children cannot match.
Returns:
List of node objects."""
if filters is None:
filters = {}
if exclude_filter is None:
exclude_filter = {}
if isinstance(filters, dict):
filters = [filters]
filter_fn = functools.partial(
_check_filters, required_offset, offset_limits, filters, exclude_filter
)
find_fn = functools.partial(_find_children, filter_fn, include_parents, include_children)
result = find_fn(find_fn, depth, self)
if include_self or not result or result[0] != self:
return result
return result[1:]
def parents(self, depth=-1, filters=None):
"""Get parent nodes of this node.
Arguments:
depth: Depth limit. If given as a negative value, it will be subtracted
from this object's depth.
filters: Dictionary of {attribute: value} that parents must match.
Returns: list of nodes"""
if filters and not isinstance(filters, dict):
raise TypeError("Filters must be a dict")
if depth < 0:
depth = self.depth + depth
if depth >= self.depth or depth < 0:
raise IndexError("Given depth exceeds node depth")
node_list = []
parent = self
while True:
parent = parent._parent
if not filters or _check_filter(parent, filters, {}):
node_list.append(parent)
if parent.depth == depth:
return node_list
def parent(self, depth=-1, filters=None):
"""Get a parent node of this node.
Arguments:
depth: Depth limit. If given as a negative value, it will be subtracted
from this object's depth. The parent at this exact depth is returned.
filters: Dictionary of {attribute: value} that the parent must match.
If a filter value is given, will return the first parent that meets the filters
up to the given depth. If none is found, returns None.
If no filter is given, returns the parent at the given depth."""
if filters and not isinstance(filters, dict):
raise TypeError("Filters must be a dict")
if depth < 0:
depth = self.depth + depth
if depth >= self.depth or depth < 0:
raise IndexError("Given depth exceeds node depth")
parent = self
while parent.depth > depth:
parent = parent._parent
if parent.depth == depth and not filters:
return parent
if filters and _check_filter(parent, filters, {}):
return parent
return None
def is_child_of(self, node):
"""Checks if this object is a child of the given node object."""
if node.depth >= self.depth:
return False
return self.parent(node.depth) == node
def is_parent_of(self, node):
"""Checks if this object is a parent of the given node object."""
if node.depth <= self.depth:
return False
return node.parent(self.depth) == self
def get(self, key, default=None):
"""
Gets an attribute from this node, if that attribute exists.
Arguments:
key: Field name to return. May contain decimals to return a value
from a child node.
default: Default value to return.
Returns: Field value if it exists. Default value if not.
"""
if key is None:
raise TypeError("Cannot match against None")
obj = self
for k in key.split("."):
if isinstance(obj, dict):
obj = obj.get(k)
else:
obj = getattr(obj, k, None)
return obj or default
class IterableNodeBase(NodeBase):
def __getitem__(self, key):
if isinstance(key, str):
try:
return next(i for i in self.nodes if getattr(i, "name", None) == key)
except StopIteration:
raise KeyError(key)
return self.nodes[key]
def __iter__(self):
return iter(self.nodes)
def __len__(self):
return len(self.nodes)
def __contains__(self, obj):
return obj in self.nodes
def node_class_factory(ast, parent):
ast = deepcopy(ast)
if not isinstance(ast, dict) or "nodeType" not in ast:
return ast
if "body" in ast:
ast["nodes"] = ast.pop("body")
base_class = IterableNodeBase if "nodes" in ast else NodeBase
base_type = next((k for k, v in BASE_NODE_TYPES.items() if ast["nodeType"] in v), None)
if base_type:
ast["baseNodeType"] = base_type
return type(ast["nodeType"], (base_class,), {})(ast, parent)
def _check_filters(required_offset, offset_limits, filters, exclude, node):
if required_offset and not is_inside_offset(required_offset, node.offset):
return False
if offset_limits and not is_inside_offset(node.offset, offset_limits):
return False
for f in filters:
if _check_filter(node, f, exclude):
return True
return False
def _check_filter(node, filters, exclude):
for key, value in filters.items():
if node.get(key) != value:
return False
for key, value in exclude.items():
if node.get(key) == value:
return False
return True
def _find_children(filter_fn, include_parents, include_children, find_fn, depth, node):
if depth is not None:
depth -= 1
if depth < 0:
return [node] if filter_fn(node) else []
if not include_children and filter_fn(node):
return [node]
node_list = []
for child in node._children:
node_list.extend(find_fn(find_fn, depth, child))
if (include_parents or not node_list) and filter_fn(node):
node_list.insert(0, node)
return node_list
def is_inside_offset(inner, outer):
"""Checks if the first offset is contained in the second offset
Args:
inner: inner offset tuple
outer: outer offset tuple
Returns: bool"""
return outer[0] <= inner[0] <= inner[1] <= outer[1]
| 35.496403 | 97 | 0.588164 |
import functools
from copy import deepcopy
from .grammar import BASE_NODE_TYPES
class NodeBase:
def __init__(self, ast, parent):
self.depth = parent.depth + 1 if parent is not None else 0
self._parent = parent
self._children = set()
src = [int(i) for i in ast["src"].split(":")]
self.offset = (src[0], src[0] + src[1])
self.contract_id = src[2]
self.fields = sorted(ast.keys())
for key, value in ast.items():
if isinstance(value, dict) and value.get("nodeType") == "Block":
value = value["statements"]
elif key == "body" and not value:
value = []
if isinstance(value, dict):
item = node_class_factory(value, self)
if isinstance(item, NodeBase):
self._children.add(item)
setattr(self, key, item)
elif isinstance(value, list):
items = [node_class_factory(i, self) for i in value]
setattr(self, key, items)
self._children.update(i for i in items if isinstance(i, NodeBase))
else:
setattr(self, key, value)
def __hash__(self):
return hash(f"{self.nodeType}{self.depth}{self.offset}")
def __repr__(self):
repr_str = f"<{self.nodeType}"
if hasattr(self, "nodes"):
repr_str += " iterable"
if hasattr(self, "type"):
if isinstance(self.type, str):
repr_str += f" {self.type}"
else:
repr_str += f" {self.type._display()}"
if self._display():
repr_str += f" '{self._display()}'"
else:
repr_str += " object"
return f"{repr_str}>"
def _display(self):
if hasattr(self, "name") and hasattr(self, "value"):
return f"{self.name} = {self.value}"
for attr in ("name", "value", "absolutePath"):
if hasattr(self, attr):
return f"{getattr(self, attr)}"
return ""
def children(
self,
depth=None,
include_self=False,
include_parents=True,
include_children=True,
required_offset=None,
offset_limits=None,
filters=None,
exclude_filter=None,
):
if filters is None:
filters = {}
if exclude_filter is None:
exclude_filter = {}
if isinstance(filters, dict):
filters = [filters]
filter_fn = functools.partial(
_check_filters, required_offset, offset_limits, filters, exclude_filter
)
find_fn = functools.partial(_find_children, filter_fn, include_parents, include_children)
result = find_fn(find_fn, depth, self)
if include_self or not result or result[0] != self:
return result
return result[1:]
def parents(self, depth=-1, filters=None):
if filters and not isinstance(filters, dict):
raise TypeError("Filters must be a dict")
if depth < 0:
depth = self.depth + depth
if depth >= self.depth or depth < 0:
raise IndexError("Given depth exceeds node depth")
node_list = []
parent = self
while True:
parent = parent._parent
if not filters or _check_filter(parent, filters, {}):
node_list.append(parent)
if parent.depth == depth:
return node_list
def parent(self, depth=-1, filters=None):
if filters and not isinstance(filters, dict):
raise TypeError("Filters must be a dict")
if depth < 0:
depth = self.depth + depth
if depth >= self.depth or depth < 0:
raise IndexError("Given depth exceeds node depth")
parent = self
while parent.depth > depth:
parent = parent._parent
if parent.depth == depth and not filters:
return parent
if filters and _check_filter(parent, filters, {}):
return parent
return None
def is_child_of(self, node):
if node.depth >= self.depth:
return False
return self.parent(node.depth) == node
def is_parent_of(self, node):
if node.depth <= self.depth:
return False
return node.parent(self.depth) == self
def get(self, key, default=None):
if key is None:
raise TypeError("Cannot match against None")
obj = self
for k in key.split("."):
if isinstance(obj, dict):
obj = obj.get(k)
else:
obj = getattr(obj, k, None)
return obj or default
class IterableNodeBase(NodeBase):
def __getitem__(self, key):
if isinstance(key, str):
try:
return next(i for i in self.nodes if getattr(i, "name", None) == key)
except StopIteration:
raise KeyError(key)
return self.nodes[key]
def __iter__(self):
return iter(self.nodes)
def __len__(self):
return len(self.nodes)
def __contains__(self, obj):
return obj in self.nodes
def node_class_factory(ast, parent):
ast = deepcopy(ast)
if not isinstance(ast, dict) or "nodeType" not in ast:
return ast
if "body" in ast:
ast["nodes"] = ast.pop("body")
base_class = IterableNodeBase if "nodes" in ast else NodeBase
base_type = next((k for k, v in BASE_NODE_TYPES.items() if ast["nodeType"] in v), None)
if base_type:
ast["baseNodeType"] = base_type
return type(ast["nodeType"], (base_class,), {})(ast, parent)
def _check_filters(required_offset, offset_limits, filters, exclude, node):
if required_offset and not is_inside_offset(required_offset, node.offset):
return False
if offset_limits and not is_inside_offset(node.offset, offset_limits):
return False
for f in filters:
if _check_filter(node, f, exclude):
return True
return False
def _check_filter(node, filters, exclude):
for key, value in filters.items():
if node.get(key) != value:
return False
for key, value in exclude.items():
if node.get(key) == value:
return False
return True
def _find_children(filter_fn, include_parents, include_children, find_fn, depth, node):
if depth is not None:
depth -= 1
if depth < 0:
return [node] if filter_fn(node) else []
if not include_children and filter_fn(node):
return [node]
node_list = []
for child in node._children:
node_list.extend(find_fn(find_fn, depth, child))
if (include_parents or not node_list) and filter_fn(node):
node_list.insert(0, node)
return node_list
def is_inside_offset(inner, outer):
return outer[0] <= inner[0] <= inner[1] <= outer[1]
| true | true |
79002cbf125f8a2ca0c43c5f81dc3b744e72c14d | 4,034 | py | Python | ms/storage/backends/google_appengine.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 16 | 2015-01-02T15:39:04.000Z | 2016-03-17T06:38:46.000Z | ms/storage/backends/google_appengine.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 37 | 2015-01-28T20:58:05.000Z | 2016-03-22T04:01:32.000Z | ms/storage/backends/google_appengine.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 8 | 2015-04-08T02:26:03.000Z | 2016-03-04T05:56:24.000Z |
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import google
from google.appengine.ext import ndb
import google.appengine.api.memcache as google_memcache
import google.appengine.ext.deferred as google_deferred
from google.appengine.datastore.datastore_query import Cursor as GoogleCursor
def raise_(ex):
raise ex
class FutureWrapper( ndb.Future ):
state = ndb.Future.FINISHING
_done = True
def __init__( self, result ):
self.result = result
def get_result( self ):
return self.result
def done( self ):
return True
def wait( self ):
pass
def check_success( self ):
return None
def get_exception( self ):
return None
def get_traceback( self ):
return None
# TODO: wrap query for one item into a future
class FutureQueryWrapper( object ):
def __init__(self, query_fut):
self.query_fut = query_fut
def get_result( self ):
res = self.query_fut.get_result()
if res != None and len(res) > 0:
return res[0]
else:
return None
def done( self ):
return self.query_fut.done()
def wait( self):
return self.query_fut.wait()
def check_success( self ):
return self.query_fut.check_success()
def get_exception( self ):
return self.query_fut.get_exception()
def get_traceback( self ):
return self.query_fut.get_traceback()
# aliases for types
Model = ndb.Model
Integer = ndb.IntegerProperty
Float = ndb.FloatProperty
String = ndb.StringProperty
Text = ndb.TextProperty
Key = ndb.KeyProperty
Boolean = ndb.BooleanProperty
Json = ndb.JsonProperty
Blob = ndb.BlobProperty
Computed = ndb.ComputedProperty
Pickled = ndb.PickleProperty
Cursor = GoogleCursor
# aliases for keys
make_key = ndb.Key
def wait_futures( future_list ):
"""
Wait for all of a list of futures to finish.
Works with FutureWrapper.
"""
# see if any of these are NOT futures...then just wrap them into a future object
# that implements a get_result()
ret = []
futs = []
for f in future_list:
if f is None:
continue
if not isinstance( f, ndb.Future ) and not isinstance( f, FutureWrapper ):
# definitely not a future
ret.append( FutureWrapper( f ) )
else:
# a future or something compatible
futs.append( f )
ndb.Future.wait_all( futs )
return futs + ret
deferred = google_deferred
concurrent = ndb.tasklet
concurrent_return = (lambda x: (raise_(ndb.Return( x ))))
# asynchronous operations
get_multi_async = ndb.get_multi_async
put_multi_async = ndb.put_multi_async
# synchronous operations
get_multi = ndb.get_multi
put_multi = ndb.put_multi
delete_multi = ndb.delete_multi
# aliases for memcache
memcache = google_memcache
# aliases for transaction
transaction = ndb.transaction
transaction_async = ndb.transaction_async
transactional = ndb.transactional
# alises for query predicates
opAND = ndb.AND
opOR = ndb.OR
# aliases for top-level asynchronous loop
toplevel = ndb.toplevel
# aliases for common exceptions
RequestDeadlineExceededError = google.appengine.runtime.DeadlineExceededError
APIRequestDeadlineExceededError = google.appengine.runtime.apiproxy_errors.DeadlineExceededError
URLRequestDeadlineExceededError = google.appengine.api.urlfetch_errors.DeadlineExceededError
TransactionFailedError = google.appengine.ext.db.TransactionFailedError
| 25.694268 | 96 | 0.715419 |
import google
from google.appengine.ext import ndb
import google.appengine.api.memcache as google_memcache
import google.appengine.ext.deferred as google_deferred
from google.appengine.datastore.datastore_query import Cursor as GoogleCursor
def raise_(ex):
raise ex
class FutureWrapper( ndb.Future ):
state = ndb.Future.FINISHING
_done = True
def __init__( self, result ):
self.result = result
def get_result( self ):
return self.result
def done( self ):
return True
def wait( self ):
pass
def check_success( self ):
return None
def get_exception( self ):
return None
def get_traceback( self ):
return None
class FutureQueryWrapper( object ):
def __init__(self, query_fut):
self.query_fut = query_fut
def get_result( self ):
res = self.query_fut.get_result()
if res != None and len(res) > 0:
return res[0]
else:
return None
def done( self ):
return self.query_fut.done()
def wait( self):
return self.query_fut.wait()
def check_success( self ):
return self.query_fut.check_success()
def get_exception( self ):
return self.query_fut.get_exception()
def get_traceback( self ):
return self.query_fut.get_traceback()
Model = ndb.Model
Integer = ndb.IntegerProperty
Float = ndb.FloatProperty
String = ndb.StringProperty
Text = ndb.TextProperty
Key = ndb.KeyProperty
Boolean = ndb.BooleanProperty
Json = ndb.JsonProperty
Blob = ndb.BlobProperty
Computed = ndb.ComputedProperty
Pickled = ndb.PickleProperty
Cursor = GoogleCursor
make_key = ndb.Key
def wait_futures( future_list ):
ret = []
futs = []
for f in future_list:
if f is None:
continue
if not isinstance( f, ndb.Future ) and not isinstance( f, FutureWrapper ):
ret.append( FutureWrapper( f ) )
else:
futs.append( f )
ndb.Future.wait_all( futs )
return futs + ret
deferred = google_deferred
concurrent = ndb.tasklet
concurrent_return = (lambda x: (raise_(ndb.Return( x ))))
get_multi_async = ndb.get_multi_async
put_multi_async = ndb.put_multi_async
get_multi = ndb.get_multi
put_multi = ndb.put_multi
delete_multi = ndb.delete_multi
memcache = google_memcache
transaction = ndb.transaction
transaction_async = ndb.transaction_async
transactional = ndb.transactional
opAND = ndb.AND
opOR = ndb.OR
toplevel = ndb.toplevel
RequestDeadlineExceededError = google.appengine.runtime.DeadlineExceededError
APIRequestDeadlineExceededError = google.appengine.runtime.apiproxy_errors.DeadlineExceededError
URLRequestDeadlineExceededError = google.appengine.api.urlfetch_errors.DeadlineExceededError
TransactionFailedError = google.appengine.ext.db.TransactionFailedError
| true | true |
79002cd5a4e3ac20272d3ce8d02f9c6329a8853a | 150 | py | Python | pysecm/ric/__init__.py | bostonrwalker/pysecm | 76fa1d537c6f222214d7582d723ea9b9b67c87b9 | [
"MIT"
] | null | null | null | pysecm/ric/__init__.py | bostonrwalker/pysecm | 76fa1d537c6f222214d7582d723ea9b9b67c87b9 | [
"MIT"
] | null | null | null | pysecm/ric/__init__.py | bostonrwalker/pysecm | 76fa1d537c6f222214d7582d723ea9b9b67c87b9 | [
"MIT"
] | null | null | null | from .ric import RIC
import pysecm.ric.commodity
import pysecm.ric.equity
import pysecm.ric.fixed_income
import pysecm.ric.fx
import pysecm.ric.index
| 21.428571 | 30 | 0.833333 | from .ric import RIC
import pysecm.ric.commodity
import pysecm.ric.equity
import pysecm.ric.fixed_income
import pysecm.ric.fx
import pysecm.ric.index
| true | true |
79002dabc6886764e0fc25e29a3878f17d75ef03 | 35,617 | py | Python | ironic/tests/unit/db/test_nodes.py | Rachit7194/ironic | a17b20d12554133931f44c78c415f2ea0f61ac74 | [
"Apache-2.0"
] | null | null | null | ironic/tests/unit/db/test_nodes.py | Rachit7194/ironic | a17b20d12554133931f44c78c415f2ea0f61ac74 | [
"Apache-2.0"
] | null | null | null | ironic/tests/unit/db/test_nodes.py | Rachit7194/ironic | a17b20d12554133931f44c78c415f2ea0f61ac74 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating Nodes via the DB API"""
import datetime
import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from ironic.common import exception
from ironic.common import states
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
class DbNodeTestCase(base.DbTestCase):
def test_create_node(self):
node = utils.create_test_node()
self.assertEqual([], node.tags)
self.assertEqual([], node.traits)
def test_create_node_with_tags(self):
self.assertRaises(exception.InvalidParameterValue,
utils.create_test_node,
tags=['tag1', 'tag2'])
def test_create_node_with_traits(self):
self.assertRaises(exception.InvalidParameterValue,
utils.create_test_node,
traits=['trait1', 'trait2'])
def test_create_node_already_exists(self):
utils.create_test_node()
self.assertRaises(exception.NodeAlreadyExists,
utils.create_test_node)
def test_create_node_instance_already_associated(self):
instance = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
self.assertRaises(exception.InstanceAssociated,
utils.create_test_node,
uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
def test_create_node_name_duplicate(self):
node = utils.create_test_node(name='spam')
self.assertRaises(exception.DuplicateName,
utils.create_test_node,
name=node.name)
def test_get_node_by_id(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_id(node.id)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_uuid(node.uuid)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_name(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_name(node.name)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual(node.name, res.name)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, 99)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
'12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_name,
'spam-eggs-bacon-spam')
def test_get_nodeinfo_list_defaults(self):
node_id_list = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
node_id_list.append(node.id)
res = [i[0] for i in self.dbapi.get_nodeinfo_list()]
self.assertEqual(sorted(res), sorted(node_id_list))
def test_get_nodeinfo_list_with_cols(self):
uuids = {}
extras = {}
for i in range(1, 6):
uuid = uuidutils.generate_uuid()
extra = {'foo': i}
node = utils.create_test_node(extra=extra, uuid=uuid)
uuids[node.id] = uuid
extras[node.id] = extra
res = self.dbapi.get_nodeinfo_list(columns=['id', 'extra', 'uuid'])
self.assertEqual(extras, dict((r[0], r[1]) for r in res))
self.assertEqual(uuids, dict((r[0], r[2]) for r in res))
def test_get_nodeinfo_list_with_filters(self):
node1 = utils.create_test_node(
driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid(),
maintenance=True,
fault='boom',
resource_class='foo',
conductor_group='group1')
node3 = utils.create_test_node(
driver='driver-one',
uuid=uuidutils.generate_uuid(),
reservation='another-fake-host')
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'driver-one'})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': True})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': False})
self.assertEqual(sorted([node2.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'reserved': True})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'reserved': False})
self.assertEqual([node2.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': False})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r.id for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'resource_class': 'foo'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'conductor_group': 'group1'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'conductor_group': 'group2'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'reserved_by_any_of': ['fake-host',
'another-fake-host']})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r.id for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'id': node1.id})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
# ensure unknown filters explode
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_nodeinfo_list,
filters=filters)
# even with good filters present
filters = {'bad_filter': 'foo', 'id': node1.id}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_nodeinfo_list,
filters=filters)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_provision(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
# node with provision_updated timeout
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=past)
# node with None in provision_updated_at
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.DEPLOYWAIT)
# node without timeout
utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(filters={'provisioned_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.DEPLOYWAIT})
self.assertEqual([node2.id], [r[0] for r in res])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_inspection(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
# node with provision_updated timeout
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=past)
# node with None in provision_updated_at
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.INSPECTING)
# node without timeout
utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(
filters={'inspection_started_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.INSPECTING})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_nodeinfo_list_description(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='Hello')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='World!')
res = self.dbapi.get_nodeinfo_list(
filters={'description_contains': 'Hello'})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'description_contains':
'World!'})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_node_list(self):
uuids = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
uuids.append(six.text_type(node['uuid']))
res = self.dbapi.get_node_list()
res_uuids = [r.uuid for r in res]
six.assertCountEqual(self, uuids, res_uuids)
for r in res:
self.assertEqual([], r.tags)
self.assertEqual([], r.traits)
def test_get_node_list_with_filters(self):
ch1 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
ch2 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
node1 = utils.create_test_node(
driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid(),
chassis_id=ch1['id'])
node2 = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid(),
chassis_id=ch2['id'],
maintenance=True,
fault='boom',
resource_class='foo',
conductor_group='group1',
power_state='power on')
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch1['uuid']})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch2['uuid']})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'driver-one'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'resource_class': 'foo'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'conductor_group': 'group1'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'conductor_group': 'group2'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'id': node1.id})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
uuids = [uuidutils.generate_uuid(),
node1.uuid,
uuidutils.generate_uuid()]
res = self.dbapi.get_node_list(filters={'uuid_in': uuids})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'with_power_state': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'with_power_state': False})
self.assertEqual([node1.id], [r.id for r in res])
# ensure unknown filters explode
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_node_list,
filters=filters)
# even with good filters present
filters = {'bad_filter': 'foo', 'id': node1.id}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_node_list,
filters=filters)
def test_get_node_list_description(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='Hello')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='World!')
res = self.dbapi.get_node_list(filters={
'description_contains': 'Hello'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={
'description_contains': 'World!'})
self.assertEqual([node2.id], [r.id for r in res])
def test_get_node_list_chassis_not_found(self):
self.assertRaises(exception.ChassisNotFound,
self.dbapi.get_node_list,
{'chassis_uuid': uuidutils.generate_uuid()})
def test_get_node_by_instance(self):
node = utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_instance(node.instance_uuid)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_instance_wrong_uuid(self):
utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.InstanceNotFound,
self.dbapi.get_node_by_instance,
'12345678-9999-0000-bbbb-123456789012')
def test_get_node_by_instance_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID,
self.dbapi.get_node_by_instance,
'fake_uuid')
def test_destroy_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, node.id)
def test_destroy_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid, node.uuid)
def test_destroy_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.destroy_node,
'12345678-9999-0000-aaaa-123456789012')
def test_ports_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_ports_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_tags_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
self.assertTrue(self.dbapi.node_tag_exists(node.id, tag.tag))
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_tag_exists, node.id, tag.tag)
def test_tags_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
self.assertTrue(self.dbapi.node_tag_exists(node.id, tag.tag))
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_tag_exists, node.id, tag.tag)
def test_volume_connector_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
connector = utils.create_test_volume_connector(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_id, connector.id)
def test_volume_connector_get_destroyed_after_destroying_a_node_uuid(self):
node = utils.create_test_node()
connector = utils.create_test_volume_connector(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_id, connector.id)
def test_volume_target_gets_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
target = utils.create_test_volume_target(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_id, target.id)
def test_volume_target_gets_destroyed_after_destroying_a_node_uuid(self):
node = utils.create_test_node()
target = utils.create_test_volume_target(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_id, target.id)
def test_traits_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait))
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
def test_traits_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait))
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
def test_allocations_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
allocation = utils.create_test_allocation(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.AllocationNotFound,
self.dbapi.get_allocation_by_id, allocation.id)
def test_update_node(self):
node = utils.create_test_node()
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual(new_extra, res.extra)
self.assertEqual([], res.tags)
self.assertEqual([], res.traits)
def test_update_node_with_tags(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual([tag.tag], [t.tag for t in res.tags])
def test_update_node_with_traits(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual([trait.trait], [t.trait for t in res.traits])
def test_update_node_not_found(self):
node_uuid = uuidutils.generate_uuid()
new_extra = {'foo': 'bar'}
self.assertRaises(exception.NodeNotFound, self.dbapi.update_node,
node_uuid, {'extra': new_extra})
def test_update_node_uuid(self):
node = utils.create_test_node()
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.update_node, node.id,
{'uuid': ''})
def test_update_node_associate_and_disassociate(self):
node = utils.create_test_node()
new_i_uuid = uuidutils.generate_uuid()
res = self.dbapi.update_node(node.id, {'instance_uuid': new_i_uuid})
self.assertEqual(new_i_uuid, res.instance_uuid)
res = self.dbapi.update_node(node.id, {'instance_uuid': None})
self.assertIsNone(res.instance_uuid)
def test_update_node_instance_already_associated(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid())
new_i_uuid = uuidutils.generate_uuid()
self.dbapi.update_node(node1.id, {'instance_uuid': new_i_uuid})
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.InstanceAssociated,
self.dbapi.update_node,
node2.id,
{'instance_uuid': new_i_uuid})
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_provision(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
self.assertEqual(mocked_time,
timeutils.normalize_time(res['provision_updated_at']))
def test_update_node_name_duplicate(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
name='spam')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.DuplicateName,
self.dbapi.update_node,
node2.id,
{'name': node1.name})
def test_update_node_no_provision(self):
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'extra': {'foo': 'bar'}})
self.assertIsNone(res['provision_updated_at'])
self.assertIsNone(res['inspection_started_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_started_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_started_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_finished_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_finished_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_finished_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_finished_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_started_at'])
def test_reserve_node(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
uuid = node.uuid
r1 = 'fake-reservation'
# reserve the node
res = self.dbapi.reserve_node(r1, uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
# check reservation
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r1, res.reservation)
def test_release_reservation(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
self.dbapi.reserve_node(r1, uuid)
# release reservation
self.dbapi.release_node(r1, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertIsNone(res.reservation)
def test_reservation_of_reserved_node_fails(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
# reserve the node
self.dbapi.reserve_node(r1, uuid)
# another host fails to reserve or release
self.assertRaises(exception.NodeLocked,
self.dbapi.reserve_node,
r2, uuid)
self.assertRaises(exception.NodeLocked,
self.dbapi.release_node,
r2, uuid)
def test_reservation_after_release(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
self.dbapi.reserve_node(r1, uuid)
self.dbapi.release_node(r1, uuid)
# another host succeeds
self.dbapi.reserve_node(r2, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r2, res.reservation)
def test_reservation_in_exception_message(self):
node = utils.create_test_node()
uuid = node.uuid
r = 'fake-reservation'
self.dbapi.reserve_node(r, uuid)
exc = self.assertRaises(exception.NodeLocked, self.dbapi.reserve_node,
'another', uuid)
self.assertIn(r, str(exc))
def test_reservation_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.uuid)
def test_release_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.uuid)
def test_release_non_locked_node(self):
node = utils.create_test_node()
self.assertIsNone(node.reservation)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.uuid)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_touch_node_provisioning(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
node = utils.create_test_node()
# assert provision_updated_at is None
self.assertIsNone(node.provision_updated_at)
self.dbapi.touch_node_provisioning(node.uuid)
node = self.dbapi.get_node_by_uuid(node.uuid)
# assert provision_updated_at has been updated
self.assertEqual(test_time,
timeutils.normalize_time(node.provision_updated_at))
def test_touch_node_provisioning_not_found(self):
self.assertRaises(
exception.NodeNotFound,
self.dbapi.touch_node_provisioning, uuidutils.generate_uuid())
def test_get_node_by_port_addresses(self):
wrong_node = utils.create_test_node(
driver='driver-one',
uuid=uuidutils.generate_uuid())
node = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid())
addresses = []
for i in (1, 2, 3):
address = '52:54:00:cf:2d:4%s' % i
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node.id, address=address)
if i > 1:
addresses.append(address)
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=wrong_node.id,
address='aa:bb:cc:dd:ee:ff')
res = self.dbapi.get_node_by_port_addresses(addresses)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual([], res.traits)
def test_get_node_by_port_addresses_not_found(self):
node = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node.id,
address='aa:bb:cc:dd:ee:ff')
self.assertRaisesRegex(exception.NodeNotFound,
'was not found',
self.dbapi.get_node_by_port_addresses,
['11:22:33:44:55:66'])
def test_get_node_by_port_addresses_multiple_found(self):
node1 = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
addresses = ['52:54:00:cf:2d:4%s' % i for i in (1, 2)]
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node1.id,
address=addresses[0])
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node2.id,
address=addresses[1])
self.assertRaisesRegex(exception.NodeNotFound,
'Multiple nodes',
self.dbapi.get_node_by_port_addresses,
addresses)
| 41.608645 | 79 | 0.610411 |
import datetime
import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from ironic.common import exception
from ironic.common import states
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
class DbNodeTestCase(base.DbTestCase):
def test_create_node(self):
node = utils.create_test_node()
self.assertEqual([], node.tags)
self.assertEqual([], node.traits)
def test_create_node_with_tags(self):
self.assertRaises(exception.InvalidParameterValue,
utils.create_test_node,
tags=['tag1', 'tag2'])
def test_create_node_with_traits(self):
self.assertRaises(exception.InvalidParameterValue,
utils.create_test_node,
traits=['trait1', 'trait2'])
def test_create_node_already_exists(self):
utils.create_test_node()
self.assertRaises(exception.NodeAlreadyExists,
utils.create_test_node)
def test_create_node_instance_already_associated(self):
instance = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
self.assertRaises(exception.InstanceAssociated,
utils.create_test_node,
uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
def test_create_node_name_duplicate(self):
node = utils.create_test_node(name='spam')
self.assertRaises(exception.DuplicateName,
utils.create_test_node,
name=node.name)
def test_get_node_by_id(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_id(node.id)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_uuid(node.uuid)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_name(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_name(node.name)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual(node.name, res.name)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, 99)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
'12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_name,
'spam-eggs-bacon-spam')
def test_get_nodeinfo_list_defaults(self):
node_id_list = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
node_id_list.append(node.id)
res = [i[0] for i in self.dbapi.get_nodeinfo_list()]
self.assertEqual(sorted(res), sorted(node_id_list))
def test_get_nodeinfo_list_with_cols(self):
uuids = {}
extras = {}
for i in range(1, 6):
uuid = uuidutils.generate_uuid()
extra = {'foo': i}
node = utils.create_test_node(extra=extra, uuid=uuid)
uuids[node.id] = uuid
extras[node.id] = extra
res = self.dbapi.get_nodeinfo_list(columns=['id', 'extra', 'uuid'])
self.assertEqual(extras, dict((r[0], r[1]) for r in res))
self.assertEqual(uuids, dict((r[0], r[2]) for r in res))
def test_get_nodeinfo_list_with_filters(self):
node1 = utils.create_test_node(
driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid(),
maintenance=True,
fault='boom',
resource_class='foo',
conductor_group='group1')
node3 = utils.create_test_node(
driver='driver-one',
uuid=uuidutils.generate_uuid(),
reservation='another-fake-host')
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'driver-one'})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': True})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': False})
self.assertEqual(sorted([node2.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'reserved': True})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'reserved': False})
self.assertEqual([node2.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': False})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r.id for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'resource_class': 'foo'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'conductor_group': 'group1'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'conductor_group': 'group2'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'reserved_by_any_of': ['fake-host',
'another-fake-host']})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r.id for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'id': node1.id})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_nodeinfo_list,
filters=filters)
filters = {'bad_filter': 'foo', 'id': node1.id}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_nodeinfo_list,
filters=filters)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_provision(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=past)
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.DEPLOYWAIT)
utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(filters={'provisioned_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.DEPLOYWAIT})
self.assertEqual([node2.id], [r[0] for r in res])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_inspection(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=past)
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.INSPECTING)
utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(
filters={'inspection_started_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.INSPECTING})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_nodeinfo_list_description(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='Hello')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='World!')
res = self.dbapi.get_nodeinfo_list(
filters={'description_contains': 'Hello'})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'description_contains':
'World!'})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_node_list(self):
uuids = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
uuids.append(six.text_type(node['uuid']))
res = self.dbapi.get_node_list()
res_uuids = [r.uuid for r in res]
six.assertCountEqual(self, uuids, res_uuids)
for r in res:
self.assertEqual([], r.tags)
self.assertEqual([], r.traits)
def test_get_node_list_with_filters(self):
ch1 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
ch2 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
node1 = utils.create_test_node(
driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid(),
chassis_id=ch1['id'])
node2 = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid(),
chassis_id=ch2['id'],
maintenance=True,
fault='boom',
resource_class='foo',
conductor_group='group1',
power_state='power on')
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch1['uuid']})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch2['uuid']})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'driver-one'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'resource_class': 'foo'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'conductor_group': 'group1'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'conductor_group': 'group2'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'id': node1.id})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
uuids = [uuidutils.generate_uuid(),
node1.uuid,
uuidutils.generate_uuid()]
res = self.dbapi.get_node_list(filters={'uuid_in': uuids})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'with_power_state': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'with_power_state': False})
self.assertEqual([node1.id], [r.id for r in res])
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_node_list,
filters=filters)
filters = {'bad_filter': 'foo', 'id': node1.id}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_node_list,
filters=filters)
def test_get_node_list_description(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='Hello')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='World!')
res = self.dbapi.get_node_list(filters={
'description_contains': 'Hello'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={
'description_contains': 'World!'})
self.assertEqual([node2.id], [r.id for r in res])
def test_get_node_list_chassis_not_found(self):
self.assertRaises(exception.ChassisNotFound,
self.dbapi.get_node_list,
{'chassis_uuid': uuidutils.generate_uuid()})
def test_get_node_by_instance(self):
node = utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_instance(node.instance_uuid)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_instance_wrong_uuid(self):
utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.InstanceNotFound,
self.dbapi.get_node_by_instance,
'12345678-9999-0000-bbbb-123456789012')
def test_get_node_by_instance_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID,
self.dbapi.get_node_by_instance,
'fake_uuid')
def test_destroy_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, node.id)
def test_destroy_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid, node.uuid)
def test_destroy_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.destroy_node,
'12345678-9999-0000-aaaa-123456789012')
def test_ports_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_ports_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_tags_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
self.assertTrue(self.dbapi.node_tag_exists(node.id, tag.tag))
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_tag_exists, node.id, tag.tag)
def test_tags_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
self.assertTrue(self.dbapi.node_tag_exists(node.id, tag.tag))
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_tag_exists, node.id, tag.tag)
def test_volume_connector_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
connector = utils.create_test_volume_connector(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_id, connector.id)
def test_volume_connector_get_destroyed_after_destroying_a_node_uuid(self):
node = utils.create_test_node()
connector = utils.create_test_volume_connector(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_id, connector.id)
def test_volume_target_gets_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
target = utils.create_test_volume_target(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_id, target.id)
def test_volume_target_gets_destroyed_after_destroying_a_node_uuid(self):
node = utils.create_test_node()
target = utils.create_test_volume_target(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_id, target.id)
def test_traits_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait))
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
def test_traits_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait))
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
def test_allocations_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
allocation = utils.create_test_allocation(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.AllocationNotFound,
self.dbapi.get_allocation_by_id, allocation.id)
def test_update_node(self):
node = utils.create_test_node()
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual(new_extra, res.extra)
self.assertEqual([], res.tags)
self.assertEqual([], res.traits)
def test_update_node_with_tags(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual([tag.tag], [t.tag for t in res.tags])
def test_update_node_with_traits(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual([trait.trait], [t.trait for t in res.traits])
def test_update_node_not_found(self):
node_uuid = uuidutils.generate_uuid()
new_extra = {'foo': 'bar'}
self.assertRaises(exception.NodeNotFound, self.dbapi.update_node,
node_uuid, {'extra': new_extra})
def test_update_node_uuid(self):
node = utils.create_test_node()
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.update_node, node.id,
{'uuid': ''})
def test_update_node_associate_and_disassociate(self):
node = utils.create_test_node()
new_i_uuid = uuidutils.generate_uuid()
res = self.dbapi.update_node(node.id, {'instance_uuid': new_i_uuid})
self.assertEqual(new_i_uuid, res.instance_uuid)
res = self.dbapi.update_node(node.id, {'instance_uuid': None})
self.assertIsNone(res.instance_uuid)
def test_update_node_instance_already_associated(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid())
new_i_uuid = uuidutils.generate_uuid()
self.dbapi.update_node(node1.id, {'instance_uuid': new_i_uuid})
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.InstanceAssociated,
self.dbapi.update_node,
node2.id,
{'instance_uuid': new_i_uuid})
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_provision(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
self.assertEqual(mocked_time,
timeutils.normalize_time(res['provision_updated_at']))
def test_update_node_name_duplicate(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
name='spam')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.DuplicateName,
self.dbapi.update_node,
node2.id,
{'name': node1.name})
def test_update_node_no_provision(self):
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'extra': {'foo': 'bar'}})
self.assertIsNone(res['provision_updated_at'])
self.assertIsNone(res['inspection_started_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_started_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_started_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_finished_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_finished_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_finished_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_finished_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_started_at'])
def test_reserve_node(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
uuid = node.uuid
r1 = 'fake-reservation'
res = self.dbapi.reserve_node(r1, uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r1, res.reservation)
def test_release_reservation(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
self.dbapi.reserve_node(r1, uuid)
self.dbapi.release_node(r1, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertIsNone(res.reservation)
def test_reservation_of_reserved_node_fails(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
self.dbapi.reserve_node(r1, uuid)
self.assertRaises(exception.NodeLocked,
self.dbapi.reserve_node,
r2, uuid)
self.assertRaises(exception.NodeLocked,
self.dbapi.release_node,
r2, uuid)
def test_reservation_after_release(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
self.dbapi.reserve_node(r1, uuid)
self.dbapi.release_node(r1, uuid)
self.dbapi.reserve_node(r2, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r2, res.reservation)
def test_reservation_in_exception_message(self):
node = utils.create_test_node()
uuid = node.uuid
r = 'fake-reservation'
self.dbapi.reserve_node(r, uuid)
exc = self.assertRaises(exception.NodeLocked, self.dbapi.reserve_node,
'another', uuid)
self.assertIn(r, str(exc))
def test_reservation_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.uuid)
def test_release_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.uuid)
def test_release_non_locked_node(self):
node = utils.create_test_node()
self.assertIsNone(node.reservation)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.uuid)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_touch_node_provisioning(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
node = utils.create_test_node()
self.assertIsNone(node.provision_updated_at)
self.dbapi.touch_node_provisioning(node.uuid)
node = self.dbapi.get_node_by_uuid(node.uuid)
self.assertEqual(test_time,
timeutils.normalize_time(node.provision_updated_at))
def test_touch_node_provisioning_not_found(self):
self.assertRaises(
exception.NodeNotFound,
self.dbapi.touch_node_provisioning, uuidutils.generate_uuid())
def test_get_node_by_port_addresses(self):
wrong_node = utils.create_test_node(
driver='driver-one',
uuid=uuidutils.generate_uuid())
node = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid())
addresses = []
for i in (1, 2, 3):
address = '52:54:00:cf:2d:4%s' % i
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node.id, address=address)
if i > 1:
addresses.append(address)
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=wrong_node.id,
address='aa:bb:cc:dd:ee:ff')
res = self.dbapi.get_node_by_port_addresses(addresses)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual([], res.traits)
def test_get_node_by_port_addresses_not_found(self):
node = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node.id,
address='aa:bb:cc:dd:ee:ff')
self.assertRaisesRegex(exception.NodeNotFound,
'was not found',
self.dbapi.get_node_by_port_addresses,
['11:22:33:44:55:66'])
def test_get_node_by_port_addresses_multiple_found(self):
node1 = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
addresses = ['52:54:00:cf:2d:4%s' % i for i in (1, 2)]
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node1.id,
address=addresses[0])
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node2.id,
address=addresses[1])
self.assertRaisesRegex(exception.NodeNotFound,
'Multiple nodes',
self.dbapi.get_node_by_port_addresses,
addresses)
| true | true |
79002f415063a696a36d82b0c3b625aaecab009c | 10,937 | py | Python | gs-scheduler/global_scheduler2/policy_dockerfile/lowlatency/GE_GSCH_low_latency.py | gedge-platform/GEdge-Platform | b5cbe63089cf3d3263683cbcd5ec3d10ad85779b | [
"Apache-2.0"
] | 13 | 2020-10-14T07:45:08.000Z | 2021-10-01T08:19:56.000Z | gs-scheduler/global_scheduler2/policy_dockerfile/lowlatency/GE_GSCH_low_latency.py | gedge-platform/GEdge-Platform | b5cbe63089cf3d3263683cbcd5ec3d10ad85779b | [
"Apache-2.0"
] | null | null | null | gs-scheduler/global_scheduler2/policy_dockerfile/lowlatency/GE_GSCH_low_latency.py | gedge-platform/GEdge-Platform | b5cbe63089cf3d3263683cbcd5ec3d10ad85779b | [
"Apache-2.0"
] | 17 | 2020-11-09T05:16:42.000Z | 2021-12-28T08:04:33.000Z | from kafka import KafkaProducer
from kafka import KafkaConsumer
from kafka import KafkaAdminClient
import json
from json import dumps
from json import loads
import time
import os
import requests
import sys
import GE_GSCH_low_define as lowDefine
'''
{'requestID': 'req-f6720a0e-e3df-455a-825d-f8c80cedc2d9',
'date': '2021-10-18 13:46:30', 'status': 'create',
'fileID': 'b469e54a-721f-4c55-b43e-d09088556031', 'failCnt': 0,
'env': {
'type': 'global',
'targetClusters': ['c1', ['c2', 'c3'], 'c4'],
'priority': 'GLowLatencyPriority',
'option': {
'sourceCluster': 'c1',
'sourceNode': 'a-worker-node01'
}
}
}
'''
class GLowLatencyPriority_Job:
def __init__(self,request_data_dic):
self.job_name = lowDefine.SELF_POLICY_NAME
self.requestDataDic = request_data_dic
self.requestID=request_data_dic['requestID']
self.fileID=request_data_dic['fileID']
self.failCnt=request_data_dic['failCnt']
self.env=request_data_dic['env']
self.targetClusters=self.env['targetClusters']
self.sourceCluster=self.env['option']['sourceCluster']
self.sourceNode=self.env['option']['sourceNode']
self.sharedClusters = self.get_shared_clusters()
self.producer= KafkaProducer(acks=0,
compression_type='gzip',
bootstrap_servers=[lowDefine.KAFKA_SERVER_URL],
value_serializer=lambda x: dumps(x).encode('utf-8'))
def get_shared_clusters(self):
for item in self.targetClusters :
if type(item).__name__ == list :
if len(item) > 1 :
return item
else :
return None
else :
print()
#apply low-latency yaml with
def check_res_fail(self, res):
if res == None:
return True
if 'hcode' not in res:
return True
if 'lcode' not in res:
return True
if 'msg' not in res:
return True
if 'result' not in res['msg']:
return True
return False
def request_clusters_latency_from_clusterAgent(self,clusters):
try :
temp_msg = {'source':{'type':'none'},
'target':{'type':'cluster', 'object':self.sourceCluster},
'hcode':200,
'lcode':1,
'msg':{'requestID': self.requestID,'sourceNode': self.sourceNode,'targetClusters': clusters }
}
self.producer.send(lowDefine.GLOBAL_SCHEDULER_GLOBAL_TOPIC_NAME,value=temp_msg)
self.producer.flush()
except:
return 'process_fail'
return 'process_success'
def wait_request_clusters_latency_from_clusterAgent(self):
ordered_cluster_list =[]
res = self.wait_consumer()
if res == None:
print('res is None')
return 'process_fail', ordered_cluster_list
is_process_fail = self.check_res_fail(res)
hcode = res['hcode']
lcode = res['lcode']
result = res['msg']['result']
'''
result: [ {cluster: c3, latency: 11 },
{cluster: c2, latency: 34 } ]
'''
if is_process_fail:
print('Fail Job:', res)
return 'process_fail', ordered_cluster_list
else:
if hcode == 200 and lcode == 2:
for item in result :
ordered_cluster_list.append(item['cluster'])
return 'process_success', ordered_cluster_list
else :
return 'process_fail', ordered_cluster_list
def apply_yaml_to_ClusterAgent(self,cluster):
print('apply_yaml_to_ClusterAgent:',cluster)
try :
temp_msg = {'source':{'type':'none'},
'target':{'type':'cluster', 'object':cluster},
'hcode':210,
'lcode':1,
'msg':{'requestID': self.requestID,'fileID':self.fileID,'requestData':self.requestDataDic }
}
self.producer.send(lowDefine.GLOBAL_SCHEDULER_GLOBAL_TOPIC_NAME,value=temp_msg)
self.producer.flush()
except:
return 'process_fail'
return 'process_success'
def wait_apply_yaml_to_ClusterAgent(self):
res = self.wait_consumer()
if res == None:
print('res is None')
return 'process_fail'
is_process_fail = self.check_res_fail(res)
hcode = res['hcode']
lcode = res['lcode']
result = res['msg']['result']
print('hcode :hcode,result',hcode,lcode,result)
if is_process_fail:
print('Fail Job:', res)
return 'process_fail'
else:
if hcode == 210 and lcode == 2:
if result == 'success' :
return 'apply_success'
elif result == 'fail' :
return 'apply_fail'
elif result == 'cancel' :
return 'cancel'
else :
return 'process_fail'
else:
return 'process_fail'
def wait_consumer(self):
print('wait_consumer')
consumer = KafkaConsumer(
self.requestID,
bootstrap_servers=[lowDefine.KAFKA_SERVER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=self.requestID,
value_deserializer=lambda x: loads(x.decode('utf-8')),
consumer_timeout_ms=1000*10
)
print('w-1')
res = None
for message in consumer:
print("Topic: %s, Partition: %d, Offset: %d, Key: %s, Value: %s" % ( message.topic, message.partition, message.offset, message.key, message.value ))
res = message.value
break
consumer.close()
return res
def start_job_processor():
print('start_job_processor')
while 1 :
#read dispatched queue
print('1')
try :
res = requests.get(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/policys/'+lowDefine.SELF_POLICY_NAME)
except:
print('wait front server to run',lowDefine.FRONT_SERVER_SERVER_URL)
time.sleep(5)
continue
if res.status_code == 200 :
print('2')
request_data_dic = json.loads(res.json())
print('request_data_dic',request_data_dic)
GE_Request_Job = GLowLatencyPriority_Job(request_data_dic)
print('3')
#send topic message
'''
return values
'apply_success' : apply is success
'process_success' :
'process_fail': raise error in process(apply or wait consumer, request latency)
'apply_fail' : apply is fail
'''
is_whole_process_status = None
for item in GE_Request_Job.targetClusters :
print('type(item)',type(item),item)
if type(item).__name__ == 'list' and len(item) > 1 :
r = GE_Request_Job.request_clusters_latency_from_clusterAgent(item)
if r == 'process_fail' :
print('internal error : request_clusters_latency_from_clusterAgent')
continue
r,clusters = GE_Request_Job.wait_request_clusters_latency_from_clusterAgent()
if r == 'process_fail' :
print('internal error : wait_request_clusters_latency_from_clusterAgent')
continue
for t_cluster in clusters:
r = GE_Request_Job.apply_yaml_to_ClusterAgent(t_cluster)
if r == 'process_fail' :
print('internal error : apply_yaml_to_ClusterAgent')
continue
r = GE_Request_Job.wait_apply_yaml_to_ClusterAgent()
if r == 'process_fail' :
print('internal error : wait_apply_yaml_to_ClusterAgent')
continue
elif r == 'apply_success' or r == 'cancel':
print('---pply_success or cancel',r)
is_whole_process_status = r
break
elif r == 'apply_fail' :
is_whole_process_status = r
continue
if r == 'apply_success' or r == 'cancel':
break
else :
r = GE_Request_Job.apply_yaml_to_ClusterAgent(item)
if r == 'process_fail' :
print('internal error : apply_yaml_to_ClusterAgent')
continue
r = GE_Request_Job.wait_apply_yaml_to_ClusterAgent()
if r == 'process_fail' :
print('internal error : wait_apply_yaml_to_ClusterAgent')
continue
elif r == 'apply_success' or r == 'cancel':
is_whole_process_status = r
print('apply_success or cancel:',r)
break
elif r == 'apply_fail':
is_whole_process_status = r
print('apply_fail')
continue
print('==============')
if is_whole_process_status == 'apply_fail' :
#GE_Request_Job.requestDataDic['status'] = 'failed'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/failed')
elif is_whole_process_status == 'apply_success' :
#GE_Request_Job.requestDataDic['status'] = 'completed'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/completed')
elif is_whole_process_status == 'cancel' :
#GE_Request_Job.requestDataDic['status'] = 'cancel'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/canceled')
else :
#GE_Request_Job.requestDataDic['status'] = 'cancel'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/canceled')
else:
print('despatched queue is empty')
time.sleep(5)
continue
#time.sleep(1)
if __name__ == '__main__':
start_job_processor()
| 40.507407 | 161 | 0.540642 | from kafka import KafkaProducer
from kafka import KafkaConsumer
from kafka import KafkaAdminClient
import json
from json import dumps
from json import loads
import time
import os
import requests
import sys
import GE_GSCH_low_define as lowDefine
class GLowLatencyPriority_Job:
def __init__(self,request_data_dic):
self.job_name = lowDefine.SELF_POLICY_NAME
self.requestDataDic = request_data_dic
self.requestID=request_data_dic['requestID']
self.fileID=request_data_dic['fileID']
self.failCnt=request_data_dic['failCnt']
self.env=request_data_dic['env']
self.targetClusters=self.env['targetClusters']
self.sourceCluster=self.env['option']['sourceCluster']
self.sourceNode=self.env['option']['sourceNode']
self.sharedClusters = self.get_shared_clusters()
self.producer= KafkaProducer(acks=0,
compression_type='gzip',
bootstrap_servers=[lowDefine.KAFKA_SERVER_URL],
value_serializer=lambda x: dumps(x).encode('utf-8'))
def get_shared_clusters(self):
for item in self.targetClusters :
if type(item).__name__ == list :
if len(item) > 1 :
return item
else :
return None
else :
print()
def check_res_fail(self, res):
if res == None:
return True
if 'hcode' not in res:
return True
if 'lcode' not in res:
return True
if 'msg' not in res:
return True
if 'result' not in res['msg']:
return True
return False
def request_clusters_latency_from_clusterAgent(self,clusters):
try :
temp_msg = {'source':{'type':'none'},
'target':{'type':'cluster', 'object':self.sourceCluster},
'hcode':200,
'lcode':1,
'msg':{'requestID': self.requestID,'sourceNode': self.sourceNode,'targetClusters': clusters }
}
self.producer.send(lowDefine.GLOBAL_SCHEDULER_GLOBAL_TOPIC_NAME,value=temp_msg)
self.producer.flush()
except:
return 'process_fail'
return 'process_success'
def wait_request_clusters_latency_from_clusterAgent(self):
ordered_cluster_list =[]
res = self.wait_consumer()
if res == None:
print('res is None')
return 'process_fail', ordered_cluster_list
is_process_fail = self.check_res_fail(res)
hcode = res['hcode']
lcode = res['lcode']
result = res['msg']['result']
if is_process_fail:
print('Fail Job:', res)
return 'process_fail', ordered_cluster_list
else:
if hcode == 200 and lcode == 2:
for item in result :
ordered_cluster_list.append(item['cluster'])
return 'process_success', ordered_cluster_list
else :
return 'process_fail', ordered_cluster_list
def apply_yaml_to_ClusterAgent(self,cluster):
print('apply_yaml_to_ClusterAgent:',cluster)
try :
temp_msg = {'source':{'type':'none'},
'target':{'type':'cluster', 'object':cluster},
'hcode':210,
'lcode':1,
'msg':{'requestID': self.requestID,'fileID':self.fileID,'requestData':self.requestDataDic }
}
self.producer.send(lowDefine.GLOBAL_SCHEDULER_GLOBAL_TOPIC_NAME,value=temp_msg)
self.producer.flush()
except:
return 'process_fail'
return 'process_success'
def wait_apply_yaml_to_ClusterAgent(self):
res = self.wait_consumer()
if res == None:
print('res is None')
return 'process_fail'
is_process_fail = self.check_res_fail(res)
hcode = res['hcode']
lcode = res['lcode']
result = res['msg']['result']
print('hcode :hcode,result',hcode,lcode,result)
if is_process_fail:
print('Fail Job:', res)
return 'process_fail'
else:
if hcode == 210 and lcode == 2:
if result == 'success' :
return 'apply_success'
elif result == 'fail' :
return 'apply_fail'
elif result == 'cancel' :
return 'cancel'
else :
return 'process_fail'
else:
return 'process_fail'
def wait_consumer(self):
print('wait_consumer')
consumer = KafkaConsumer(
self.requestID,
bootstrap_servers=[lowDefine.KAFKA_SERVER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=self.requestID,
value_deserializer=lambda x: loads(x.decode('utf-8')),
consumer_timeout_ms=1000*10
)
print('w-1')
res = None
for message in consumer:
print("Topic: %s, Partition: %d, Offset: %d, Key: %s, Value: %s" % ( message.topic, message.partition, message.offset, message.key, message.value ))
res = message.value
break
consumer.close()
return res
def start_job_processor():
print('start_job_processor')
while 1 :
print('1')
try :
res = requests.get(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/policys/'+lowDefine.SELF_POLICY_NAME)
except:
print('wait front server to run',lowDefine.FRONT_SERVER_SERVER_URL)
time.sleep(5)
continue
if res.status_code == 200 :
print('2')
request_data_dic = json.loads(res.json())
print('request_data_dic',request_data_dic)
GE_Request_Job = GLowLatencyPriority_Job(request_data_dic)
print('3')
is_whole_process_status = None
for item in GE_Request_Job.targetClusters :
print('type(item)',type(item),item)
if type(item).__name__ == 'list' and len(item) > 1 :
r = GE_Request_Job.request_clusters_latency_from_clusterAgent(item)
if r == 'process_fail' :
print('internal error : request_clusters_latency_from_clusterAgent')
continue
r,clusters = GE_Request_Job.wait_request_clusters_latency_from_clusterAgent()
if r == 'process_fail' :
print('internal error : wait_request_clusters_latency_from_clusterAgent')
continue
for t_cluster in clusters:
r = GE_Request_Job.apply_yaml_to_ClusterAgent(t_cluster)
if r == 'process_fail' :
print('internal error : apply_yaml_to_ClusterAgent')
continue
r = GE_Request_Job.wait_apply_yaml_to_ClusterAgent()
if r == 'process_fail' :
print('internal error : wait_apply_yaml_to_ClusterAgent')
continue
elif r == 'apply_success' or r == 'cancel':
print('---pply_success or cancel',r)
is_whole_process_status = r
break
elif r == 'apply_fail' :
is_whole_process_status = r
continue
if r == 'apply_success' or r == 'cancel':
break
else :
r = GE_Request_Job.apply_yaml_to_ClusterAgent(item)
if r == 'process_fail' :
print('internal error : apply_yaml_to_ClusterAgent')
continue
r = GE_Request_Job.wait_apply_yaml_to_ClusterAgent()
if r == 'process_fail' :
print('internal error : wait_apply_yaml_to_ClusterAgent')
continue
elif r == 'apply_success' or r == 'cancel':
is_whole_process_status = r
print('apply_success or cancel:',r)
break
elif r == 'apply_fail':
is_whole_process_status = r
print('apply_fail')
continue
print('==============')
if is_whole_process_status == 'apply_fail' :
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/failed')
elif is_whole_process_status == 'apply_success' :
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/completed')
elif is_whole_process_status == 'cancel' :
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/canceled')
else :
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/canceled')
else:
print('despatched queue is empty')
time.sleep(5)
continue
if __name__ == '__main__':
start_job_processor()
| true | true |
79002f43bf6e70842ea37699f5200d88ba408601 | 7,762 | py | Python | sdk/python/pulumi_azure_native/netapp/v20200901/account.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/netapp/v20200901/account.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/netapp/v20200901/account.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Account']
class Account(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
NetApp account resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_name'] = account_name
__props__['active_directories'] = active_directories
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-nextgen:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/latest:Account"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Account")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Account, __self__).__init__(
'azure-native:netapp/v20200901:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["active_directories"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.065789 | 2,057 | 0.675213 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Account']
class Account(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_name'] = account_name
__props__['active_directories'] = active_directories
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-nextgen:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/latest:Account"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Account")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Account, __self__).__init__(
'azure-native:netapp/v20200901:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["active_directories"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]:
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
79002f5baaebe85ae8242a63f88448cdbd57bc0a | 533 | py | Python | datadog_checks_dev/datadog_checks/dev/tooling/commands/__init__.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 663 | 2016-08-23T05:23:45.000Z | 2022-03-29T00:37:23.000Z | datadog_checks_dev/datadog_checks/dev/tooling/commands/__init__.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 6,642 | 2016-06-09T16:29:20.000Z | 2022-03-31T22:24:09.000Z | datadog_checks_dev/datadog_checks/dev/tooling/commands/__init__.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 1,222 | 2017-01-27T15:51:38.000Z | 2022-03-31T18:17:51.000Z | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .agent import agent
from .ci import ci
from .clean import clean
from .config import config
from .create import create
from .dep import dep
from .docs import docs
from .env import env
from .meta import meta
from .release import release
from .run import run
from .test import test
from .validate import validate
ALL_COMMANDS = (agent, ci, clean, config, create, dep, docs, env, meta, release, run, test, validate)
| 28.052632 | 101 | 0.763602 |
from .agent import agent
from .ci import ci
from .clean import clean
from .config import config
from .create import create
from .dep import dep
from .docs import docs
from .env import env
from .meta import meta
from .release import release
from .run import run
from .test import test
from .validate import validate
ALL_COMMANDS = (agent, ci, clean, config, create, dep, docs, env, meta, release, run, test, validate)
| true | true |
79002fc8e9f765eae20f2d0e5638eed8ec574acd | 2,701 | py | Python | tests/basics/LateClosureAssignment.py | Mortal/Nuitka | 5150eeff7ff845ed4993c773449cd81b7f127c6b | [
"Apache-2.0"
] | null | null | null | tests/basics/LateClosureAssignment.py | Mortal/Nuitka | 5150eeff7ff845ed4993c773449cd81b7f127c6b | [
"Apache-2.0"
] | null | null | null | tests/basics/LateClosureAssignment.py | Mortal/Nuitka | 5150eeff7ff845ed4993c773449cd81b7f127c6b | [
"Apache-2.0"
] | 1 | 2018-12-16T23:51:18.000Z | 2018-12-16T23:51:18.000Z | # Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
def closureTest1():
# Assign, but the value is not supposed to be used by the function, instead the later
# update is effective.
d = 1
def subby():
return d
d = 22222*2222
return subby()
def closureTest2():
# Using a closure variable that is not initialized at the time it is closured should
# work as well.
def subby():
return d
d = 2222*2222
return subby()
def closureTest3():
def subby():
return undefined_global # @UndefinedVariable
try:
return subby()
except NameError:
return 88
d = 1
def scopeTest4():
try:
return d
d = 1
except UnboundLocalError as e:
return repr(e)
print("Test closure where value is overwritten:", closureTest1())
print("Test closure where value is assigned only late:", closureTest2())
print("Test function where closured value is never assigned:", closureTest3())
print("Scope test where UnboundLocalError is expected:", scopeTest4())
def function():
pass
class ClosureLocalizerClass:
print("Function before assigned in a class:", function)
function = 1
print("Function after it was assigned in class:", function)
ClosureLocalizerClass()
def ClosureLocalizerFunction():
try:
function = function
print("Function didn't give unbound local error")
except UnboundLocalError as e:
print("Function gave unbound local error when accessing function before assignment:", repr(e))
ClosureLocalizerFunction()
class X:
def __init__(self, x):
self.x = x
def changingClosure():
print("Changing a closure taken value after it was taken.")
a = 1
def closureTaker():
return X(a)
x = closureTaker()
a=2
print("Closure value first time:", x.x)
x = closureTaker()
print("Closure value second time:", x.x)
changingClosure()
| 23.902655 | 102 | 0.675676 |
from __future__ import print_function
def closureTest1():
d = 1
def subby():
return d
d = 22222*2222
return subby()
def closureTest2():
def subby():
return d
d = 2222*2222
return subby()
def closureTest3():
def subby():
return undefined_global
try:
return subby()
except NameError:
return 88
d = 1
def scopeTest4():
try:
return d
d = 1
except UnboundLocalError as e:
return repr(e)
print("Test closure where value is overwritten:", closureTest1())
print("Test closure where value is assigned only late:", closureTest2())
print("Test function where closured value is never assigned:", closureTest3())
print("Scope test where UnboundLocalError is expected:", scopeTest4())
def function():
pass
class ClosureLocalizerClass:
print("Function before assigned in a class:", function)
function = 1
print("Function after it was assigned in class:", function)
ClosureLocalizerClass()
def ClosureLocalizerFunction():
try:
function = function
print("Function didn't give unbound local error")
except UnboundLocalError as e:
print("Function gave unbound local error when accessing function before assignment:", repr(e))
ClosureLocalizerFunction()
class X:
def __init__(self, x):
self.x = x
def changingClosure():
print("Changing a closure taken value after it was taken.")
a = 1
def closureTaker():
return X(a)
x = closureTaker()
a=2
print("Closure value first time:", x.x)
x = closureTaker()
print("Closure value second time:", x.x)
changingClosure()
| true | true |
79003124fbb1cb58aae990f0214882ce3dfac658 | 5,878 | py | Python | mdrsl/rule_models/mids/objective_function/mids_objective_function_statistics.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 3 | 2020-08-03T19:25:44.000Z | 2021-06-27T22:25:55.000Z | mdrsl/rule_models/mids/objective_function/mids_objective_function_statistics.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | null | null | null | mdrsl/rule_models/mids/objective_function/mids_objective_function_statistics.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 2 | 2020-08-07T22:54:28.000Z | 2021-02-18T06:11:01.000Z | from typing import Optional, Dict
from tabulate import tabulate
import pandas as pd
from mdrsl.utils.value_collection import ValueCollector
class MIDSObjectiveFunctionStatistics:
def __init__(self):
self.last_f0: Optional[int] = None
self.last_f1: Optional[int] = None
self.last_f2: Optional[int] = None
self.last_f3: Optional[int] = None
self.last_f4: Optional[int] = None
self.last_f5: Optional[int] = None
self.last_f6: Optional[int] = None
self.last_f7: Optional[int] = None
self.last_f_total: Optional[int] = None
self.value_collectors = dict(
f0=ValueCollector(),
f1=ValueCollector(),
f2=ValueCollector(),
f3=ValueCollector(),
f4=ValueCollector(),
f5=ValueCollector(),
f6=ValueCollector(),
f_total=ValueCollector()
)
def add_values(self, f0, f1, f2, f3, f4, f5, f6, f_total):
self.last_f0 = f0
self.last_f1 = f1
self.last_f2 = f2
self.last_f3 = f3
self.last_f4 = f4
self.last_f5 = f5
self.last_f6 = f6
self.last_f_total = f_total
self.value_collectors['f0'].add_value(f0)
self.value_collectors['f1'].add_value(f1)
self.value_collectors['f2'].add_value(f2)
self.value_collectors['f3'].add_value(f3)
self.value_collectors['f4'].add_value(f4)
self.value_collectors['f5'].add_value(f5)
self.value_collectors['f6'].add_value(f6)
self.value_collectors['f_total'].add_value(f_total)
def values_to_pandas_dataframe(self) -> Optional[pd.DataFrame]:
if ValueCollector.collect_values:
columns = ['type', 'value']
data = []
for function_name, value_collector in self.value_collectors.items():
for value in value_collector.values:
data.append([function_name, value])
df = pd.DataFrame(data=data, columns=columns)
return df
else:
return None
def values_to_pandas_dataframe2(self) -> Optional[pd.DataFrame]:
if ValueCollector.collect_values:
columns = ['call_index', 'type', 'value']
data = []
for function_name, value_collector in self.value_collectors.items():
for call_index, value in enumerate(value_collector.values):
data.append([call_index, function_name, value])
df = pd.DataFrame(data=data, columns=columns)
return df
else:
return None
def get_last_f_values(self) -> Dict[str, float]:
return dict(
f0=self.last_f0,
f1=self.last_f1,
f2=self.last_f2,
f3=self.last_f3,
f4=self.last_f4,
f5=self.last_f5,
f6=self.last_f6,
f_total=self.last_f_total)
def __str__(self):
table_str = tabulate(
[
['count',
self.value_collectors['f0'].count,
self.value_collectors['f1'].count,
self.value_collectors['f2'].count,
self.value_collectors['f3'].count,
self.value_collectors['f4'].count,
self.value_collectors['f5'].count,
self.value_collectors['f6'].count,
self.value_collectors['f_total'].count
],
['sum',
self.value_collectors['f0'].sum,
self.value_collectors['f1'].sum,
self.value_collectors['f2'].sum,
self.value_collectors['f3'].sum,
self.value_collectors['f4'].sum,
self.value_collectors['f5'].sum,
self.value_collectors['f6'].sum,
self.value_collectors['f_total'].sum
],
['min',
self.value_collectors['f0'].min,
self.value_collectors['f1'].min,
self.value_collectors['f2'].min,
self.value_collectors['f3'].min,
self.value_collectors['f4'].min,
self.value_collectors['f5'].min,
self.value_collectors['f6'].min,
self.value_collectors['f_total'].min
],
['avg',
self.value_collectors['f0'].get_avg(),
self.value_collectors['f1'].get_avg(),
self.value_collectors['f2'].get_avg(),
self.value_collectors['f3'].get_avg(),
self.value_collectors['f4'].get_avg(),
self.value_collectors['f5'].get_avg(),
self.value_collectors['f6'].get_avg(),
self.value_collectors['f_total'].get_avg()
],
['max',
self.value_collectors['f0'].max,
self.value_collectors['f1'].max,
self.value_collectors['f2'].max,
self.value_collectors['f3'].max,
self.value_collectors['f4'].max,
self.value_collectors['f5'].max,
self.value_collectors['f6'].max,
self.value_collectors['f_total'].max
],
['last_val',
self.last_f0,
self.last_f1,
self.last_f2,
self.last_f3,
self.last_f4,
self.last_f5,
self.last_f6,
self.last_f_total
]
],
headers=['type', 'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f_total']
)
return table_str
if __name__ == '__main__':
vc = ValueCollector()
vc.add_value(1)
vc.add_value(2)
vc.add_value(3)
print(vc)
| 35.409639 | 81 | 0.525859 | from typing import Optional, Dict
from tabulate import tabulate
import pandas as pd
from mdrsl.utils.value_collection import ValueCollector
class MIDSObjectiveFunctionStatistics:
def __init__(self):
self.last_f0: Optional[int] = None
self.last_f1: Optional[int] = None
self.last_f2: Optional[int] = None
self.last_f3: Optional[int] = None
self.last_f4: Optional[int] = None
self.last_f5: Optional[int] = None
self.last_f6: Optional[int] = None
self.last_f7: Optional[int] = None
self.last_f_total: Optional[int] = None
self.value_collectors = dict(
f0=ValueCollector(),
f1=ValueCollector(),
f2=ValueCollector(),
f3=ValueCollector(),
f4=ValueCollector(),
f5=ValueCollector(),
f6=ValueCollector(),
f_total=ValueCollector()
)
def add_values(self, f0, f1, f2, f3, f4, f5, f6, f_total):
self.last_f0 = f0
self.last_f1 = f1
self.last_f2 = f2
self.last_f3 = f3
self.last_f4 = f4
self.last_f5 = f5
self.last_f6 = f6
self.last_f_total = f_total
self.value_collectors['f0'].add_value(f0)
self.value_collectors['f1'].add_value(f1)
self.value_collectors['f2'].add_value(f2)
self.value_collectors['f3'].add_value(f3)
self.value_collectors['f4'].add_value(f4)
self.value_collectors['f5'].add_value(f5)
self.value_collectors['f6'].add_value(f6)
self.value_collectors['f_total'].add_value(f_total)
def values_to_pandas_dataframe(self) -> Optional[pd.DataFrame]:
if ValueCollector.collect_values:
columns = ['type', 'value']
data = []
for function_name, value_collector in self.value_collectors.items():
for value in value_collector.values:
data.append([function_name, value])
df = pd.DataFrame(data=data, columns=columns)
return df
else:
return None
def values_to_pandas_dataframe2(self) -> Optional[pd.DataFrame]:
if ValueCollector.collect_values:
columns = ['call_index', 'type', 'value']
data = []
for function_name, value_collector in self.value_collectors.items():
for call_index, value in enumerate(value_collector.values):
data.append([call_index, function_name, value])
df = pd.DataFrame(data=data, columns=columns)
return df
else:
return None
def get_last_f_values(self) -> Dict[str, float]:
return dict(
f0=self.last_f0,
f1=self.last_f1,
f2=self.last_f2,
f3=self.last_f3,
f4=self.last_f4,
f5=self.last_f5,
f6=self.last_f6,
f_total=self.last_f_total)
def __str__(self):
table_str = tabulate(
[
['count',
self.value_collectors['f0'].count,
self.value_collectors['f1'].count,
self.value_collectors['f2'].count,
self.value_collectors['f3'].count,
self.value_collectors['f4'].count,
self.value_collectors['f5'].count,
self.value_collectors['f6'].count,
self.value_collectors['f_total'].count
],
['sum',
self.value_collectors['f0'].sum,
self.value_collectors['f1'].sum,
self.value_collectors['f2'].sum,
self.value_collectors['f3'].sum,
self.value_collectors['f4'].sum,
self.value_collectors['f5'].sum,
self.value_collectors['f6'].sum,
self.value_collectors['f_total'].sum
],
['min',
self.value_collectors['f0'].min,
self.value_collectors['f1'].min,
self.value_collectors['f2'].min,
self.value_collectors['f3'].min,
self.value_collectors['f4'].min,
self.value_collectors['f5'].min,
self.value_collectors['f6'].min,
self.value_collectors['f_total'].min
],
['avg',
self.value_collectors['f0'].get_avg(),
self.value_collectors['f1'].get_avg(),
self.value_collectors['f2'].get_avg(),
self.value_collectors['f3'].get_avg(),
self.value_collectors['f4'].get_avg(),
self.value_collectors['f5'].get_avg(),
self.value_collectors['f6'].get_avg(),
self.value_collectors['f_total'].get_avg()
],
['max',
self.value_collectors['f0'].max,
self.value_collectors['f1'].max,
self.value_collectors['f2'].max,
self.value_collectors['f3'].max,
self.value_collectors['f4'].max,
self.value_collectors['f5'].max,
self.value_collectors['f6'].max,
self.value_collectors['f_total'].max
],
['last_val',
self.last_f0,
self.last_f1,
self.last_f2,
self.last_f3,
self.last_f4,
self.last_f5,
self.last_f6,
self.last_f_total
]
],
headers=['type', 'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f_total']
)
return table_str
if __name__ == '__main__':
vc = ValueCollector()
vc.add_value(1)
vc.add_value(2)
vc.add_value(3)
print(vc)
| true | true |
79003367c52efec90642fcb48d84092c47890a17 | 2,002 | py | Python | script/StockScraper-master/update_market_cap_yahoo.py | pettersoderlund/fondout | 99b14eaa8c6eb56fd862ab9bdf6acc8d537d4a31 | [
"BSD-3-Clause"
] | null | null | null | script/StockScraper-master/update_market_cap_yahoo.py | pettersoderlund/fondout | 99b14eaa8c6eb56fd862ab9bdf6acc8d537d4a31 | [
"BSD-3-Clause"
] | 4 | 2016-10-18T18:30:08.000Z | 2016-11-05T09:22:29.000Z | script/StockScraper-master/update_market_cap_yahoo.py | pettersoderlund/fondout | 99b14eaa8c6eb56fd862ab9bdf6acc8d537d4a31 | [
"BSD-3-Clause"
] | null | null | null | """ YQL out mkt cap and currency to fill out yahoo table """
""" TODO: retreive lists of 100 symbols from database and update"""
""" Results are intented to use while matching yahoo tickers, which one has mkt cap? which ones has sector? """
import mysql.connector
import stockretriever
import sys
import time
from random import randint
cnx = mysql.connector.connect(user='root', password='root', database='yahoo')
cursor = cnx.cursor()
sleeptime = 10
add_market_cap = ("INSERT INTO stocks "
"(symbol, market_cap, currency) "
"VALUES (%s, %s, %s) "
"ON DUPLICATE KEY UPDATE market_cap=VALUES(market_cap), currency=VALUES(currency)")
get_new_symbols = """SELECT symbol
FROM yahoo.stocks
WHERE market_cap is NULL
and currency is NULL"""
try:
cursor.execute(get_new_symbols)
except mysql.connector.errors.IntegrityError, e:
print(e)
for result in cursor.fetchall():
for symbol in result:
data = []
market_cap = ""
currency = ""
try:
data = stockretriever.get_current_info([symbol])
except TypeError as e:
#print "Typerror {0}: {1}".format(e.errno, e.strerror)
print "Type error, could not fetch current info on ", symbol
except Exception as e:
print(e)
try:
currency = data['Currency']
market_cap = data['MarketCapitalization']
except Exception as e:
print "No currency or mkt cap error", e
continue
data_company = (symbol, market_cap, currency)
try:
cursor.execute(add_market_cap, data_company)
except mysql.connector.errors.IntegrityError, e:
print(e)
continue
try:
print "Success updating", symbol, currency, market_cap
except UnicodeEncodeError as e:
print e
cnx.commit()
time.sleep(randint(0,sleeptime))
cursor.close()
cnx.close()
| 27.805556 | 111 | 0.618881 | """ YQL out mkt cap and currency to fill out yahoo table """
""" TODO: retreive lists of 100 symbols from database and update"""
""" Results are intented to use while matching yahoo tickers, which one has mkt cap? which ones has sector? """
import mysql.connector
import stockretriever
import sys
import time
from random import randint
cnx = mysql.connector.connect(user='root', password='root', database='yahoo')
cursor = cnx.cursor()
sleeptime = 10
add_market_cap = ("INSERT INTO stocks "
"(symbol, market_cap, currency) "
"VALUES (%s, %s, %s) "
"ON DUPLICATE KEY UPDATE market_cap=VALUES(market_cap), currency=VALUES(currency)")
get_new_symbols = """SELECT symbol
FROM yahoo.stocks
WHERE market_cap is NULL
and currency is NULL"""
try:
cursor.execute(get_new_symbols)
except mysql.connector.errors.IntegrityError, e:
print(e)
for result in cursor.fetchall():
for symbol in result:
data = []
market_cap = ""
currency = ""
try:
data = stockretriever.get_current_info([symbol])
except TypeError as e:
print "Type error, could not fetch current info on ", symbol
except Exception as e:
print(e)
try:
currency = data['Currency']
market_cap = data['MarketCapitalization']
except Exception as e:
print "No currency or mkt cap error", e
continue
data_company = (symbol, market_cap, currency)
try:
cursor.execute(add_market_cap, data_company)
except mysql.connector.errors.IntegrityError, e:
print(e)
continue
try:
print "Success updating", symbol, currency, market_cap
except UnicodeEncodeError as e:
print e
cnx.commit()
time.sleep(randint(0,sleeptime))
cursor.close()
cnx.close()
| false | true |
790033685ccacb6d853dabbe6f28c62b0cb1fbbf | 930 | py | Python | Reduce_hessian/tests/B1.py | kuanhanl/k_aug | 5ceaccbf9e699a9dffe284de686f1b623cafbec5 | [
"BSD-3-Clause"
] | null | null | null | Reduce_hessian/tests/B1.py | kuanhanl/k_aug | 5ceaccbf9e699a9dffe284de686f1b623cafbec5 | [
"BSD-3-Clause"
] | null | null | null | Reduce_hessian/tests/B1.py | kuanhanl/k_aug | 5ceaccbf9e699a9dffe284de686f1b623cafbec5 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue May 12 14:25:43 2020
@author: greg6
"""
import numpy as np
t = [i for i in range(3)]
lam = [100+i*10 for i in range(2)]
com = ["A","B","C"]
S = dict()
for l in lam:
for u,c in enumerate(com):
S[(l,c)] = l+0.1*u
C = dict()
for i in t:
for u,c in enumerate(com):
C[(i,c)] = (i+0.1*u)
nt = len(t)
nw = len(lam)
nc = len(com)
nparams = 2
nd = nw*nt
ntheta = nc*(nw+nt)+nparams
B_matrix = np.zeros((ntheta,nw*nt))
for i, t in enumerate(t):
for j, l in enumerate(lam):
for k, c in enumerate(com):
# r_idx1 = k*nt+i
r_idx1 = i * nc + k
r_idx2 = j * nc + k + nc * nt
# r_idx2 = j * nc + k + nc * nw
# c_idx = i+j*nt
c_idx = i * nw + j
# print(j, k, r_idx2)
B_matrix[r_idx1, c_idx] = S[l, c]
# try:
B_matrix[r_idx2, c_idx] = C[t, c] | 20.666667 | 45 | 0.476344 |
import numpy as np
t = [i for i in range(3)]
lam = [100+i*10 for i in range(2)]
com = ["A","B","C"]
S = dict()
for l in lam:
for u,c in enumerate(com):
S[(l,c)] = l+0.1*u
C = dict()
for i in t:
for u,c in enumerate(com):
C[(i,c)] = (i+0.1*u)
nt = len(t)
nw = len(lam)
nc = len(com)
nparams = 2
nd = nw*nt
ntheta = nc*(nw+nt)+nparams
B_matrix = np.zeros((ntheta,nw*nt))
for i, t in enumerate(t):
for j, l in enumerate(lam):
for k, c in enumerate(com):
r_idx1 = i * nc + k
r_idx2 = j * nc + k + nc * nt
c_idx = i * nw + j
B_matrix[r_idx1, c_idx] = S[l, c]
B_matrix[r_idx2, c_idx] = C[t, c] | true | true |
790033a5fdaa75c3d3375d2484b4f4254fdf6bff | 41,874 | py | Python | mne/io/kit/kit.py | vpeterson/mne-python | a6e2222a7e76f5b13a371697b1b61d22ac5bf67d | [
"BSD-3-Clause"
] | 3 | 2021-01-04T08:45:56.000Z | 2021-05-19T12:25:59.000Z | mne/io/kit/kit.py | vpeterson/mne-python | a6e2222a7e76f5b13a371697b1b61d22ac5bf67d | [
"BSD-3-Clause"
] | null | null | null | mne/io/kit/kit.py | vpeterson/mne-python | a6e2222a7e76f5b13a371697b1b61d22ac5bf67d | [
"BSD-3-Clause"
] | 2 | 2021-04-28T11:52:52.000Z | 2021-05-05T02:36:32.000Z | """Conversion tool from SQD to FIF.
RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py.
"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
from collections import defaultdict, OrderedDict
from math import sin, cos
from os import SEEK_CUR, path as op
from struct import unpack
import numpy as np
from scipy import linalg
from ..pick import pick_types
from ...utils import (verbose, logger, warn, fill_doc, _check_option,
_stamp_to_dt)
from ...transforms import apply_trans, als_ras_trans
from ..base import BaseRaw
from ..utils import _mult_cal_one
from ...epochs import BaseEpochs
from ..constants import FIFF
from ..meas_info import _empty_info
from .constants import KIT, LEGACY_AMP_PARAMS
from .coreg import read_mrk
from ...event import read_events
from .._digitization import _set_dig_kit
def _call_digitization(info, mrk, elp, hsp, kit_info):
# Use values from kit_info only if all others are None
if mrk is None and elp is None and hsp is None:
mrk = kit_info.get('mrk', None)
elp = kit_info.get('elp', None)
hsp = kit_info.get('hsp', None)
# prepare mrk
if isinstance(mrk, list):
mrk = [read_mrk(marker) if isinstance(marker, str)
else marker for marker in mrk]
mrk = np.mean(mrk, axis=0)
# setup digitization
if mrk is not None and elp is not None and hsp is not None:
dig_points, dev_head_t = _set_dig_kit(
mrk, elp, hsp, kit_info['eeg_dig'])
info['dig'] = dig_points
info['dev_head_t'] = dev_head_t
elif mrk is not None or elp is not None or hsp is not None:
raise ValueError("mrk, elp and hsp need to be provided as a group "
"(all or none)")
return info
class UnsupportedKITFormat(ValueError):
"""Our reader is not guaranteed to work with old files."""
def __init__(self, sqd_version, *args, **kwargs): # noqa: D102
self.sqd_version = sqd_version
ValueError.__init__(self, *args, **kwargs)
@fill_doc
class RawKIT(BaseRaw):
"""Raw object from KIT SQD file.
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>' | None
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes. If None, no synthesized channel is generated.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event. If None, stim must also be set to None.
%(preload)s
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Notes
-----
``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
Polhemus FastScan system. hsp refers to the headshape surface points. elp
refers to the points in head-space that corresponds to the HPI points.
Currently, '*.elp' and '*.hsp' files are NOT supported.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, stim_code='binary',
allow_unknown_format=False, standardize_names=None,
verbose=None): # noqa: D102
logger.info('Extracting SQD Parameters from %s...' % input_fname)
input_fname = op.abspath(input_fname)
self.preload = False
logger.info('Creating Raw.info structure...')
info, kit_info = get_kit_info(
input_fname, allow_unknown_format, standardize_names)
kit_info['slope'] = slope
kit_info['stimthresh'] = stimthresh
if kit_info['acq_type'] != KIT.CONTINUOUS:
raise TypeError('SQD file contains epochs, not raw data. Wrong '
'reader.')
logger.info('Creating Info structure...')
last_samps = [kit_info['n_samples'] - 1]
self._raw_extras = [kit_info]
self._set_stimchannels(info, stim, stim_code)
super(RawKIT, self).__init__(
info, preload, last_samps=last_samps, filenames=[input_fname],
raw_extras=self._raw_extras, verbose=verbose)
self.info = _call_digitization(
info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)
logger.info('Ready.')
def read_stim_ch(self, buffer_size=1e5):
"""Read events from data.
Parameter
---------
buffer_size : int
The size of chunk to by which the data are scanned.
Returns
-------
events : array, [samples]
The event vector (1 x samples).
"""
buffer_size = int(buffer_size)
start = int(self.first_samp)
stop = int(self.last_samp + 1)
pick = pick_types(self.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_ch = np.empty((1, stop), dtype=np.int64)
for b_start in range(start, stop, buffer_size):
b_stop = b_start + buffer_size
x = self[pick, b_start:b_stop][0]
stim_ch[:, b_start:b_start + x.shape[1]] = x
return stim_ch
def _set_stimchannels(self, info, stim, stim_code):
"""Specify how the trigger channel is synthesized from analog channels.
Has to be done before loading data. For a RawKIT instance that has been
created with preload=True, this method will raise a
NotImplementedError.
Parameters
----------
info : instance of MeasInfo
The measurement info.
stim : list of int | '<' | '>'
Can be submitted as list of trigger channels.
If a list is not specified, the default triggers extracted from
misc channels will be used with specified directionality.
'<' means that largest values assigned to the first channel
in sequence.
'>' means the largest trigger assigned to the last channel
in sequence.
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
"""
if self.preload:
raise NotImplementedError("Can't change stim channel after "
"loading data")
_check_option('stim_code', stim_code, ['binary', 'channel'])
if stim is not None:
if isinstance(stim, str):
picks = _default_stim_chs(info)
if stim == '<':
stim = picks[::-1]
elif stim == '>':
stim = picks
else:
raise ValueError("stim needs to be list of int, '>' or "
"'<', not %r" % str(stim))
else:
stim = np.asarray(stim, int)
if stim.max() >= self._raw_extras[0]['nchan']:
raise ValueError(
'Got stim=%s, but sqd file only has %i channels' %
(stim, self._raw_extras[0]['nchan']))
# modify info
nchan = self._raw_extras[0]['nchan'] + 1
info['chs'].append(dict(
cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0,
unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE,
ch_name='STI 014',
coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan),
kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN))
info._update_redundant()
self._raw_extras[0]['stim'] = stim
self._raw_extras[0]['stim_code'] = stim_code
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
sqd = self._raw_extras[fi]
nchan = sqd['nchan']
data_left = (stop - start) * nchan
conv_factor = sqd['conv_factor']
n_bytes = sqd['dtype'].itemsize
assert n_bytes in (2, 4)
# Read up to 100 MB of data at a time.
blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan)
with open(self._filenames[fi], 'rb', buffering=0) as fid:
# extract data
pointer = start * nchan * n_bytes
fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer)
stim = sqd['stim']
for blk_start in np.arange(0, data_left, blk_size) // nchan:
blk_size = min(blk_size, data_left - blk_start * nchan)
block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size)
block = block.reshape(nchan, -1, order='F').astype(float)
blk_stop = blk_start + block.shape[1]
data_view = data[:, blk_start:blk_stop]
block *= conv_factor
# Create a synthetic stim channel
if stim is not None:
stim_ch = _make_stim_channel(
block[stim, :], sqd['slope'], sqd['stimthresh'],
sqd['stim_code'], stim)
block = np.vstack((block, stim_ch))
_mult_cal_one(data_view, block, idx, cals, mult)
# cals are all unity, so can be ignored
def _default_stim_chs(info):
"""Return default stim channels for SQD files."""
return pick_types(info, meg=False, ref_meg=False, misc=True,
exclude=[])[:8]
def _make_stim_channel(trigger_chs, slope, threshold, stim_code,
trigger_values):
"""Create synthetic stim channel from multiple trigger channels."""
if slope == '+':
trig_chs_bin = trigger_chs > threshold
elif slope == '-':
trig_chs_bin = trigger_chs < threshold
else:
raise ValueError("slope needs to be '+' or '-'")
# trigger value
if stim_code == 'binary':
trigger_values = 2 ** np.arange(len(trigger_chs))
elif stim_code != 'channel':
raise ValueError("stim_code must be 'binary' or 'channel', got %s" %
repr(stim_code))
trig_chs = trig_chs_bin * trigger_values[:, np.newaxis]
return np.array(trig_chs.sum(axis=0), ndmin=2)
class EpochsKIT(BaseEpochs):
"""Epochs Array object from KIT SQD file.
Parameters
----------
input_fname : str
Path to the sqd file.
events : str | array, shape (n_events, 3)
Path to events file. If array, it is the events typically returned
by the read_events function. If some events don't match the events
of interest as specified by event_id,they will be marked as 'IGNORED'
in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Notes
-----
``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
Polhemus FastScan system. hsp refers to the headshape surface points. elp
refers to the points in head-space that corresponds to the HPI points.
Currently, '*.elp' and '*.hsp' files are NOT supported.
See Also
--------
mne.Epochs : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, events, event_id=None, tmin=0,
baseline=None, reject=None, flat=None, reject_tmin=None,
reject_tmax=None, mrk=None, elp=None, hsp=None,
allow_unknown_format=False, standardize_names=None,
verbose=None): # noqa: D102
if isinstance(events, str):
events = read_events(events)
logger.info('Extracting KIT Parameters from %s...' % input_fname)
input_fname = op.abspath(input_fname)
self.info, kit_info = get_kit_info(
input_fname, allow_unknown_format, standardize_names)
kit_info.update(filename=input_fname)
self._raw_extras = [kit_info]
self._filenames = []
if len(events) != self._raw_extras[0]['n_epochs']:
raise ValueError('Event list does not match number of epochs.')
if self._raw_extras[0]['acq_type'] == KIT.EPOCHS:
self._raw_extras[0]['data_length'] = KIT.INT
else:
raise TypeError('SQD file contains raw data, not epochs or '
'average. Wrong reader.')
if event_id is None: # convert to int to make typing-checks happy
event_id = {str(e): int(e) for e in np.unique(events[:, 2])}
for key, val in event_id.items():
if val not in events[:, 2]:
raise ValueError('No matching events found for %s '
'(event id %i)' % (key, val))
data = self._read_kit_data()
assert data.shape == (self._raw_extras[0]['n_epochs'],
self.info['nchan'],
self._raw_extras[0]['frame_length'])
tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin
super(EpochsKIT, self).__init__(
self.info, data, events, event_id, tmin, tmax, baseline,
reject=reject, flat=flat, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, filename=input_fname, verbose=verbose)
self.info = _call_digitization(
info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)
logger.info('Ready.')
def _read_kit_data(self):
"""Read epochs data.
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
"""
info = self._raw_extras[0]
epoch_length = info['frame_length']
n_epochs = info['n_epochs']
n_samples = info['n_samples']
filename = info['filename']
dtype = info['dtype']
nchan = info['nchan']
with open(filename, 'rb', buffering=0) as fid:
fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'])
count = n_samples * nchan
data = np.fromfile(fid, dtype=dtype, count=count)
data = data.reshape((n_samples, nchan)).T
data = data * info['conv_factor']
data = data.reshape((nchan, n_epochs, epoch_length))
data = data.transpose((1, 0, 2))
return data
def _read_dir(fid):
return dict(offset=np.fromfile(fid, np.uint32, 1)[0],
size=np.fromfile(fid, np.int32, 1)[0],
max_count=np.fromfile(fid, np.int32, 1)[0],
count=np.fromfile(fid, np.int32, 1)[0])
@verbose
def get_kit_info(rawfile, allow_unknown_format, standardize_names=None,
verbose=None):
"""Extract all the information from the sqd/con file.
Parameters
----------
rawfile : str
KIT file to be read.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
info : instance of Info
An Info for the instance.
sqd : dict
A dict containing all the sqd parameter settings.
"""
sqd = dict()
sqd['rawfile'] = rawfile
unsupported_format = False
sqd['dirs'] = dirs = list()
with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug
#
# directories (0)
#
dirs.append(_read_dir(fid))
dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1))
assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count']
#
# system (1)
#
fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset'])
# check file format version
version, revision = unpack('2i', fid.read(2 * KIT.INT))
if version < 2 or (version == 2 and revision < 3):
version_string = "V%iR%03i" % (version, revision)
if allow_unknown_format:
unsupported_format = True
logger.warning("Force loading KIT format %s", version_string)
else:
raise UnsupportedKITFormat(
version_string,
"SQD file format %s is not officially supported. "
"Set allow_unknown_format=True to load it anyways." %
(version_string,))
sysid = unpack('i', fid.read(KIT.INT))[0]
# basic info
system_name = unpack('128s', fid.read(128))[0].decode()
# model name
model_name = unpack('128s', fid.read(128))[0].decode()
# channels
sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0]
comment = unpack('256s', fid.read(256))[0].decode()
create_time, last_modified_time = unpack('2i', fid.read(2 * KIT.INT))
fid.seek(KIT.INT * 3, SEEK_CUR) # reserved
dewar_style = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
fll_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
trigger_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
adboard_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 29, SEEK_CUR) # reserved
if version < 2 or (version == 2 and revision <= 3):
adc_range = float(unpack('i', fid.read(KIT.INT))[0])
else:
adc_range = unpack('d', fid.read(KIT.DOUBLE))[0]
adc_polarity, adc_allocated, adc_stored = unpack('3i',
fid.read(3 * KIT.INT))
system_name = system_name.replace('\x00', '')
system_name = system_name.strip().replace('\n', '/')
model_name = model_name.replace('\x00', '')
model_name = model_name.strip().replace('\n', '/')
full_version = f'V{version:d}R{revision:03d}'
logger.debug("SQD file basic information:")
logger.debug("Meg160 version = %s", full_version)
logger.debug("System ID = %i", sysid)
logger.debug("System name = %s", system_name)
logger.debug("Model name = %s", model_name)
logger.debug("Channel count = %i", channel_count)
logger.debug("Comment = %s", comment)
logger.debug("Dewar style = %i", dewar_style)
logger.debug("FLL type = %i", fll_type)
logger.debug("Trigger type = %i", trigger_type)
logger.debug("A/D board type = %i", adboard_type)
logger.debug("ADC range = +/-%s[V]", adc_range / 2.)
logger.debug("ADC allocate = %i[bit]", adc_allocated)
logger.debug("ADC bit = %i[bit]", adc_stored)
# MGH description: 'acquisition (megacq) VectorView system at NMR-MGH'
description = \
f'{system_name} ({sysid}) {full_version} {model_name}'
sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}'))
# check that we can read this file
if fll_type not in KIT.FLL_SETTINGS:
fll_types = sorted(KIT.FLL_SETTINGS.keys())
use_fll_type = fll_types[
np.searchsorted(fll_types, fll_type) - 1]
warn('Unknown site filter settings (FLL) for system '
'"%s" model "%s" (ID %s), will assume FLL %d->%d, check '
'your data for correctness, including channel scales and '
'filter settings!'
% (system_name, model_name, sysid, fll_type, use_fll_type))
fll_type = use_fll_type
#
# channel information (4)
#
chan_dir = dirs[KIT.DIR_INDEX_CHANNELS]
chan_offset, chan_size = chan_dir['offset'], chan_dir['size']
sqd['channels'] = channels = []
exg_gains = list()
for i in range(channel_count):
fid.seek(chan_offset + chan_size * i)
channel_type, = unpack('i', fid.read(KIT.INT))
# System 52 mislabeled reference channels as NULL. This was fixed
# in system 53; not sure about 51...
if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL:
channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE
if channel_type in KIT.CHANNELS_MEG:
if channel_type not in KIT.CH_TO_FIFF_COIL:
raise NotImplementedError(
"KIT channel type %i can not be read. Please contact "
"the mne-python developers." % channel_type)
channels.append({
'type': channel_type,
# (x, y, z, theta, phi) for all MEG channels. Some channel
# types have additional information which we're not using.
'loc': np.fromfile(fid, dtype='d', count=5),
})
if channel_type in KIT.CHANNEL_NAME_NCHAR:
fid.seek(16, SEEK_CUR) # misc fields
channels[-1]['name'] = _read_name(fid, channel_type)
elif channel_type in KIT.CHANNELS_MISC:
channel_no, = unpack('i', fid.read(KIT.INT))
fid.seek(4, SEEK_CUR)
name = _read_name(fid, channel_type)
channels.append({
'type': channel_type,
'no': channel_no,
'name': name,
})
if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG):
offset = 6 if channel_type == KIT.CHANNEL_EEG else 8
fid.seek(offset, SEEK_CUR)
exg_gains.append(np.fromfile(fid, 'd', 1)[0])
elif channel_type == KIT.CHANNEL_NULL:
channels.append({'type': channel_type})
else:
raise IOError("Unknown KIT channel type: %i" % channel_type)
exg_gains = np.array(exg_gains)
#
# Channel sensitivity information: (5)
#
# only sensor channels requires gain. the additional misc channels
# (trigger channels, audio and voice channels) are passed
# through unaffected
fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset'])
# (offset [Volt], gain [Tesla/Volt]) for each channel
sensitivity = np.fromfile(fid, dtype='d', count=channel_count * 2)
sensitivity.shape = (channel_count, 2)
channel_offset, channel_gain = sensitivity.T
assert (channel_offset == 0).all() # otherwise we have a problem
#
# amplifier gain (7)
#
fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset'])
amp_data = unpack('i', fid.read(KIT.INT))[0]
if fll_type >= 100: # Kapper Type
# gain: mask bit
gain1 = (amp_data & 0x00007000) >> 12
gain2 = (amp_data & 0x70000000) >> 28
gain3 = (amp_data & 0x07000000) >> 24
amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3])
# filter settings
hpf = (amp_data & 0x00000700) >> 8
lpf = (amp_data & 0x00070000) >> 16
bef = (amp_data & 0x00000003) >> 0
else: # Hanger Type
# gain
input_gain = (amp_data & 0x1800) >> 11
output_gain = (amp_data & 0x0007) >> 0
amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain]
# filter settings
hpf = (amp_data & 0x007) >> 4
lpf = (amp_data & 0x0700) >> 8
bef = (amp_data & 0xc000) >> 14
hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type]
sqd['highpass'] = KIT.HPFS[hpf_options][hpf]
sqd['lowpass'] = KIT.LPFS[lpf_options][lpf]
sqd['notch'] = KIT.BEFS[bef_options][bef]
#
# Acquisition Parameters (8)
#
fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset'])
sqd['acq_type'], = acq_type, = unpack('i', fid.read(KIT.INT))
sqd['sfreq'], = unpack('d', fid.read(KIT.DOUBLE))
if acq_type == KIT.CONTINUOUS:
# samples_count, = unpack('i', fid.read(KIT.INT))
fid.seek(KIT.INT, SEEK_CUR)
sqd['n_samples'], = unpack('i', fid.read(KIT.INT))
elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS:
sqd['frame_length'], = unpack('i', fid.read(KIT.INT))
sqd['pretrigger_length'], = unpack('i', fid.read(KIT.INT))
sqd['average_count'], = unpack('i', fid.read(KIT.INT))
sqd['n_epochs'], = unpack('i', fid.read(KIT.INT))
if acq_type == KIT.EVOKED:
sqd['n_samples'] = sqd['frame_length']
else:
sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs']
else:
raise IOError("Invalid acquisition type: %i. Your file is neither "
"continuous nor epoched data." % (acq_type,))
#
# digitization information (12 and 26)
#
dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS]
cor_dir = dirs[KIT.DIR_INDEX_COREG]
dig = dict()
hsp = list()
if dig_dir['count'] > 0 and cor_dir['count'] > 0:
# directories (0)
fid.seek(dig_dir['offset'])
for _ in range(dig_dir['count']):
name = _read_name(fid, n=8).strip()
# Sometimes there are mismatches (e.g., AFz vs AFZ) between
# the channel name and its digitized, name, so let's be case
# insensitive. It will also prevent collisions with HSP
name = name.lower()
rr = np.fromfile(fid, 'd', 3)
if name:
assert name not in dig
dig[name] = rr
else:
hsp.append(rr)
# nasion, lpa, rpa, HPI in native space
elp = [dig.pop(key) for key in (
'fidnz', 'fidt9', 'fidt10',
'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')]
if 'hpi_5' in dig and dig['hpi_5'].any():
elp.append(dig.pop('hpi_5'))
elp = np.array(elp)
hsp = np.array(hsp, float).reshape(-1, 3)
assert elp.shape in ((7, 3), (8, 3))
# coregistration
fid.seek(cor_dir['offset'])
mrk = np.zeros((elp.shape[0] - 3, 3))
for _ in range(cor_dir['count']):
done = np.fromfile(fid, np.int32, 1)[0]
fid.seek(16 * KIT.DOUBLE + # meg_to_mri
16 * KIT.DOUBLE, # mri_to_meg
SEEK_CUR)
marker_count = np.fromfile(fid, np.int32, 1)[0]
if not done:
continue
assert marker_count >= len(mrk)
for mi in range(len(mrk)):
mri_type, meg_type, mri_done, meg_done = \
np.fromfile(fid, np.int32, 4)
assert meg_done
fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos
mrk[mi] = np.fromfile(fid, 'd', 3)
fid.seek(256, SEEK_CUR) # marker_file (char)
sqd.update(hsp=hsp, elp=elp, mrk=mrk)
all_names = set(ch.get('name', '') for ch in channels)
if standardize_names is None and all_names.difference({'', 'EEG'}):
standardize_names = True
warn('standardize_names defaults to True in 0.21 but will change '
'to False in 0.22', DeprecationWarning)
# precompute conversion factor for reading data
if unsupported_format:
if sysid not in LEGACY_AMP_PARAMS:
raise IOError("Legacy parameters for system ID %i unavailable" %
(sysid,))
adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid]
is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels])
ad_to_volt = adc_range / (2 ** adc_stored)
ad_to_tesla = ad_to_volt / amp_gain * channel_gain
conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt)
# XXX this is a bit of a hack. Should probably do this more cleanly at
# some point... the 2 ** (adc_stored - 14) was emperically determined using
# the test files with known amplitudes. The conv_factors need to be
# replaced by these values otherwise we're off by a factor off 5000.0
# for the EEG data.
is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)
for ch in channels]
exg_gains /= 2 ** (adc_stored - 14)
conv_factor[is_exg] = exg_gains
sqd['conv_factor'] = conv_factor[:, np.newaxis]
# Create raw.info dict for raw fif object with SQD data
info = _empty_info(float(sqd['sfreq']))
info.update(meas_date=_stamp_to_dt((create_time, 0)),
lowpass=sqd['lowpass'],
highpass=sqd['highpass'], kit_system_id=sysid,
description=description)
# Creates a list of dicts of meg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = fiff_channels = []
channel_index = defaultdict(lambda: 0)
sqd['eeg_dig'] = OrderedDict()
for idx, ch in enumerate(channels, 1):
if ch['type'] in KIT.CHANNELS_MEG:
ch_name = ch.get('name', '')
if ch_name == '' or standardize_names:
ch_name = 'MEG %03d' % idx
# create three orthogonal vector
# ch_angles[0]: theta, ch_angles[1]: phi
theta, phi = np.radians(ch['loc'][3:])
x = sin(theta) * cos(phi)
y = sin(theta) * sin(phi)
z = cos(theta)
vec_z = np.array([x, y, z])
vec_z /= linalg.norm(vec_z)
vec_x = np.zeros(vec_z.size, dtype=np.float64)
if vec_z[1] < vec_z[2]:
if vec_z[0] < vec_z[1]:
vec_x[0] = 1.0
else:
vec_x[1] = 1.0
elif vec_z[0] < vec_z[2]:
vec_x[0] = 1.0
else:
vec_x[2] = 1.0
vec_x -= np.sum(vec_x * vec_z) * vec_z
vec_x /= linalg.norm(vec_x)
vec_y = np.cross(vec_z, vec_x)
# transform to Neuromag like coordinate space
vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z))
vecs = apply_trans(als_ras_trans, vecs)
unit = FIFF.FIFF_UNIT_T
loc = vecs.ravel()
else:
ch_type_label = KIT.CH_LABEL[ch['type']]
channel_index[ch_type_label] += 1
ch_type_index = channel_index[ch_type_label]
ch_name = ch.get('name', '')
eeg_name = ch_name.lower()
# some files have all EEG labeled as EEG
if ch_name in ('', 'EEG') or standardize_names:
ch_name = '%s %03i' % (ch_type_label, ch_type_index)
unit = FIFF.FIFF_UNIT_V
loc = np.zeros(12)
if eeg_name and eeg_name in dig:
loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name]
fiff_channels.append(dict(
cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE,
unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name,
coord_frame=FIFF.FIFFV_COORD_DEVICE,
coil_type=KIT.CH_TO_FIFF_COIL[ch['type']],
kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc))
info._update_redundant()
return info, sqd
def _read_name(fid, ch_type=None, n=None):
n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type]
return fid.read(n).split(b'\x00')[0].decode('utf-8')
@fill_doc
def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, stim_code='binary',
allow_unknown_format=False, standardize_names=None,
verbose=None):
"""Reader function for Ricoh/KIT conversion to FIF.
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
%(preload)s
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
raw : instance of RawKIT
A Raw object containing KIT data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
If mrk, hsp or elp are array_like inputs, then the numbers in xyz
coordinates should be in units of meters.
"""
return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
stim=stim, slope=slope, stimthresh=stimthresh,
preload=preload, stim_code=stim_code,
allow_unknown_format=allow_unknown_format,
standardize_names=standardize_names, verbose=verbose)
@fill_doc
def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None,
hsp=None, allow_unknown_format=False,
standardize_names=None, verbose=None):
"""Reader function for Ricoh/KIT epochs files.
Parameters
----------
input_fname : str
Path to the sqd file.
events : array, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs.
Notes
-----
.. versionadded:: 0.9.0
"""
epochs = EpochsKIT(input_fname=input_fname, events=events,
event_id=event_id, mrk=mrk, elp=elp, hsp=hsp,
allow_unknown_format=allow_unknown_format,
standardize_names=standardize_names,
verbose=verbose)
return epochs
| 43.437759 | 79 | 0.58676 |
from collections import defaultdict, OrderedDict
from math import sin, cos
from os import SEEK_CUR, path as op
from struct import unpack
import numpy as np
from scipy import linalg
from ..pick import pick_types
from ...utils import (verbose, logger, warn, fill_doc, _check_option,
_stamp_to_dt)
from ...transforms import apply_trans, als_ras_trans
from ..base import BaseRaw
from ..utils import _mult_cal_one
from ...epochs import BaseEpochs
from ..constants import FIFF
from ..meas_info import _empty_info
from .constants import KIT, LEGACY_AMP_PARAMS
from .coreg import read_mrk
from ...event import read_events
from .._digitization import _set_dig_kit
def _call_digitization(info, mrk, elp, hsp, kit_info):
if mrk is None and elp is None and hsp is None:
mrk = kit_info.get('mrk', None)
elp = kit_info.get('elp', None)
hsp = kit_info.get('hsp', None)
if isinstance(mrk, list):
mrk = [read_mrk(marker) if isinstance(marker, str)
else marker for marker in mrk]
mrk = np.mean(mrk, axis=0)
if mrk is not None and elp is not None and hsp is not None:
dig_points, dev_head_t = _set_dig_kit(
mrk, elp, hsp, kit_info['eeg_dig'])
info['dig'] = dig_points
info['dev_head_t'] = dev_head_t
elif mrk is not None or elp is not None or hsp is not None:
raise ValueError("mrk, elp and hsp need to be provided as a group "
"(all or none)")
return info
class UnsupportedKITFormat(ValueError):
def __init__(self, sqd_version, *args, **kwargs):
self.sqd_version = sqd_version
ValueError.__init__(self, *args, **kwargs)
@fill_doc
class RawKIT(BaseRaw):
@verbose
def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, stim_code='binary',
allow_unknown_format=False, standardize_names=None,
verbose=None):
logger.info('Extracting SQD Parameters from %s...' % input_fname)
input_fname = op.abspath(input_fname)
self.preload = False
logger.info('Creating Raw.info structure...')
info, kit_info = get_kit_info(
input_fname, allow_unknown_format, standardize_names)
kit_info['slope'] = slope
kit_info['stimthresh'] = stimthresh
if kit_info['acq_type'] != KIT.CONTINUOUS:
raise TypeError('SQD file contains epochs, not raw data. Wrong '
'reader.')
logger.info('Creating Info structure...')
last_samps = [kit_info['n_samples'] - 1]
self._raw_extras = [kit_info]
self._set_stimchannels(info, stim, stim_code)
super(RawKIT, self).__init__(
info, preload, last_samps=last_samps, filenames=[input_fname],
raw_extras=self._raw_extras, verbose=verbose)
self.info = _call_digitization(
info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)
logger.info('Ready.')
def read_stim_ch(self, buffer_size=1e5):
buffer_size = int(buffer_size)
start = int(self.first_samp)
stop = int(self.last_samp + 1)
pick = pick_types(self.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_ch = np.empty((1, stop), dtype=np.int64)
for b_start in range(start, stop, buffer_size):
b_stop = b_start + buffer_size
x = self[pick, b_start:b_stop][0]
stim_ch[:, b_start:b_start + x.shape[1]] = x
return stim_ch
def _set_stimchannels(self, info, stim, stim_code):
if self.preload:
raise NotImplementedError("Can't change stim channel after "
"loading data")
_check_option('stim_code', stim_code, ['binary', 'channel'])
if stim is not None:
if isinstance(stim, str):
picks = _default_stim_chs(info)
if stim == '<':
stim = picks[::-1]
elif stim == '>':
stim = picks
else:
raise ValueError("stim needs to be list of int, '>' or "
"'<', not %r" % str(stim))
else:
stim = np.asarray(stim, int)
if stim.max() >= self._raw_extras[0]['nchan']:
raise ValueError(
'Got stim=%s, but sqd file only has %i channels' %
(stim, self._raw_extras[0]['nchan']))
# modify info
nchan = self._raw_extras[0]['nchan'] + 1
info['chs'].append(dict(
cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0,
unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE,
ch_name='STI 014',
coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan),
kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN))
info._update_redundant()
self._raw_extras[0]['stim'] = stim
self._raw_extras[0]['stim_code'] = stim_code
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
sqd = self._raw_extras[fi]
nchan = sqd['nchan']
data_left = (stop - start) * nchan
conv_factor = sqd['conv_factor']
n_bytes = sqd['dtype'].itemsize
assert n_bytes in (2, 4)
# Read up to 100 MB of data at a time.
blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan)
with open(self._filenames[fi], 'rb', buffering=0) as fid:
# extract data
pointer = start * nchan * n_bytes
fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer)
stim = sqd['stim']
for blk_start in np.arange(0, data_left, blk_size) // nchan:
blk_size = min(blk_size, data_left - blk_start * nchan)
block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size)
block = block.reshape(nchan, -1, order='F').astype(float)
blk_stop = blk_start + block.shape[1]
data_view = data[:, blk_start:blk_stop]
block *= conv_factor
# Create a synthetic stim channel
if stim is not None:
stim_ch = _make_stim_channel(
block[stim, :], sqd['slope'], sqd['stimthresh'],
sqd['stim_code'], stim)
block = np.vstack((block, stim_ch))
_mult_cal_one(data_view, block, idx, cals, mult)
# cals are all unity, so can be ignored
def _default_stim_chs(info):
return pick_types(info, meg=False, ref_meg=False, misc=True,
exclude=[])[:8]
def _make_stim_channel(trigger_chs, slope, threshold, stim_code,
trigger_values):
if slope == '+':
trig_chs_bin = trigger_chs > threshold
elif slope == '-':
trig_chs_bin = trigger_chs < threshold
else:
raise ValueError("slope needs to be '+' or '-'")
# trigger value
if stim_code == 'binary':
trigger_values = 2 ** np.arange(len(trigger_chs))
elif stim_code != 'channel':
raise ValueError("stim_code must be 'binary' or 'channel', got %s" %
repr(stim_code))
trig_chs = trig_chs_bin * trigger_values[:, np.newaxis]
return np.array(trig_chs.sum(axis=0), ndmin=2)
class EpochsKIT(BaseEpochs):
@verbose
def __init__(self, input_fname, events, event_id=None, tmin=0,
baseline=None, reject=None, flat=None, reject_tmin=None,
reject_tmax=None, mrk=None, elp=None, hsp=None,
allow_unknown_format=False, standardize_names=None,
verbose=None): # noqa: D102
if isinstance(events, str):
events = read_events(events)
logger.info('Extracting KIT Parameters from %s...' % input_fname)
input_fname = op.abspath(input_fname)
self.info, kit_info = get_kit_info(
input_fname, allow_unknown_format, standardize_names)
kit_info.update(filename=input_fname)
self._raw_extras = [kit_info]
self._filenames = []
if len(events) != self._raw_extras[0]['n_epochs']:
raise ValueError('Event list does not match number of epochs.')
if self._raw_extras[0]['acq_type'] == KIT.EPOCHS:
self._raw_extras[0]['data_length'] = KIT.INT
else:
raise TypeError('SQD file contains raw data, not epochs or '
'average. Wrong reader.')
if event_id is None: # convert to int to make typing-checks happy
event_id = {str(e): int(e) for e in np.unique(events[:, 2])}
for key, val in event_id.items():
if val not in events[:, 2]:
raise ValueError('No matching events found for %s '
'(event id %i)' % (key, val))
data = self._read_kit_data()
assert data.shape == (self._raw_extras[0]['n_epochs'],
self.info['nchan'],
self._raw_extras[0]['frame_length'])
tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin
super(EpochsKIT, self).__init__(
self.info, data, events, event_id, tmin, tmax, baseline,
reject=reject, flat=flat, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, filename=input_fname, verbose=verbose)
self.info = _call_digitization(
info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)
logger.info('Ready.')
def _read_kit_data(self):
info = self._raw_extras[0]
epoch_length = info['frame_length']
n_epochs = info['n_epochs']
n_samples = info['n_samples']
filename = info['filename']
dtype = info['dtype']
nchan = info['nchan']
with open(filename, 'rb', buffering=0) as fid:
fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'])
count = n_samples * nchan
data = np.fromfile(fid, dtype=dtype, count=count)
data = data.reshape((n_samples, nchan)).T
data = data * info['conv_factor']
data = data.reshape((nchan, n_epochs, epoch_length))
data = data.transpose((1, 0, 2))
return data
def _read_dir(fid):
return dict(offset=np.fromfile(fid, np.uint32, 1)[0],
size=np.fromfile(fid, np.int32, 1)[0],
max_count=np.fromfile(fid, np.int32, 1)[0],
count=np.fromfile(fid, np.int32, 1)[0])
@verbose
def get_kit_info(rawfile, allow_unknown_format, standardize_names=None,
verbose=None):
sqd = dict()
sqd['rawfile'] = rawfile
unsupported_format = False
sqd['dirs'] = dirs = list()
with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug
#
# directories (0)
#
dirs.append(_read_dir(fid))
dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1))
assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count']
#
# system (1)
#
fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset'])
# check file format version
version, revision = unpack('2i', fid.read(2 * KIT.INT))
if version < 2 or (version == 2 and revision < 3):
version_string = "V%iR%03i" % (version, revision)
if allow_unknown_format:
unsupported_format = True
logger.warning("Force loading KIT format %s", version_string)
else:
raise UnsupportedKITFormat(
version_string,
"SQD file format %s is not officially supported. "
"Set allow_unknown_format=True to load it anyways." %
(version_string,))
sysid = unpack('i', fid.read(KIT.INT))[0]
# basic info
system_name = unpack('128s', fid.read(128))[0].decode()
# model name
model_name = unpack('128s', fid.read(128))[0].decode()
# channels
sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0]
comment = unpack('256s', fid.read(256))[0].decode()
create_time, last_modified_time = unpack('2i', fid.read(2 * KIT.INT))
fid.seek(KIT.INT * 3, SEEK_CUR) # reserved
dewar_style = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
fll_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
trigger_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
adboard_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 29, SEEK_CUR) # reserved
if version < 2 or (version == 2 and revision <= 3):
adc_range = float(unpack('i', fid.read(KIT.INT))[0])
else:
adc_range = unpack('d', fid.read(KIT.DOUBLE))[0]
adc_polarity, adc_allocated, adc_stored = unpack('3i',
fid.read(3 * KIT.INT))
system_name = system_name.replace('\x00', '')
system_name = system_name.strip().replace('\n', '/')
model_name = model_name.replace('\x00', '')
model_name = model_name.strip().replace('\n', '/')
full_version = f'V{version:d}R{revision:03d}'
logger.debug("SQD file basic information:")
logger.debug("Meg160 version = %s", full_version)
logger.debug("System ID = %i", sysid)
logger.debug("System name = %s", system_name)
logger.debug("Model name = %s", model_name)
logger.debug("Channel count = %i", channel_count)
logger.debug("Comment = %s", comment)
logger.debug("Dewar style = %i", dewar_style)
logger.debug("FLL type = %i", fll_type)
logger.debug("Trigger type = %i", trigger_type)
logger.debug("A/D board type = %i", adboard_type)
logger.debug("ADC range = +/-%s[V]", adc_range / 2.)
logger.debug("ADC allocate = %i[bit]", adc_allocated)
logger.debug("ADC bit = %i[bit]", adc_stored)
# MGH description: 'acquisition (megacq) VectorView system at NMR-MGH'
description = \
f'{system_name} ({sysid}) {full_version} {model_name}'
sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}'))
# check that we can read this file
if fll_type not in KIT.FLL_SETTINGS:
fll_types = sorted(KIT.FLL_SETTINGS.keys())
use_fll_type = fll_types[
np.searchsorted(fll_types, fll_type) - 1]
warn('Unknown site filter settings (FLL) for system '
'"%s" model "%s" (ID %s), will assume FLL %d->%d, check '
'your data for correctness, including channel scales and '
'filter settings!'
% (system_name, model_name, sysid, fll_type, use_fll_type))
fll_type = use_fll_type
#
# channel information (4)
#
chan_dir = dirs[KIT.DIR_INDEX_CHANNELS]
chan_offset, chan_size = chan_dir['offset'], chan_dir['size']
sqd['channels'] = channels = []
exg_gains = list()
for i in range(channel_count):
fid.seek(chan_offset + chan_size * i)
channel_type, = unpack('i', fid.read(KIT.INT))
# System 52 mislabeled reference channels as NULL. This was fixed
# in system 53; not sure about 51...
if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL:
channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE
if channel_type in KIT.CHANNELS_MEG:
if channel_type not in KIT.CH_TO_FIFF_COIL:
raise NotImplementedError(
"KIT channel type %i can not be read. Please contact "
"the mne-python developers." % channel_type)
channels.append({
'type': channel_type,
# (x, y, z, theta, phi) for all MEG channels. Some channel
# types have additional information which we're not using.
'loc': np.fromfile(fid, dtype='d', count=5),
})
if channel_type in KIT.CHANNEL_NAME_NCHAR:
fid.seek(16, SEEK_CUR)
channels[-1]['name'] = _read_name(fid, channel_type)
elif channel_type in KIT.CHANNELS_MISC:
channel_no, = unpack('i', fid.read(KIT.INT))
fid.seek(4, SEEK_CUR)
name = _read_name(fid, channel_type)
channels.append({
'type': channel_type,
'no': channel_no,
'name': name,
})
if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG):
offset = 6 if channel_type == KIT.CHANNEL_EEG else 8
fid.seek(offset, SEEK_CUR)
exg_gains.append(np.fromfile(fid, 'd', 1)[0])
elif channel_type == KIT.CHANNEL_NULL:
channels.append({'type': channel_type})
else:
raise IOError("Unknown KIT channel type: %i" % channel_type)
exg_gains = np.array(exg_gains)
fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset'])
sensitivity = np.fromfile(fid, dtype='d', count=channel_count * 2)
sensitivity.shape = (channel_count, 2)
channel_offset, channel_gain = sensitivity.T
assert (channel_offset == 0).all()
fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset'])
amp_data = unpack('i', fid.read(KIT.INT))[0]
if fll_type >= 100:
gain1 = (amp_data & 0x00007000) >> 12
gain2 = (amp_data & 0x70000000) >> 28
gain3 = (amp_data & 0x07000000) >> 24
amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3])
hpf = (amp_data & 0x00000700) >> 8
lpf = (amp_data & 0x00070000) >> 16
bef = (amp_data & 0x00000003) >> 0
else:
input_gain = (amp_data & 0x1800) >> 11
output_gain = (amp_data & 0x0007) >> 0
amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain]
hpf = (amp_data & 0x007) >> 4
lpf = (amp_data & 0x0700) >> 8
bef = (amp_data & 0xc000) >> 14
hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type]
sqd['highpass'] = KIT.HPFS[hpf_options][hpf]
sqd['lowpass'] = KIT.LPFS[lpf_options][lpf]
sqd['notch'] = KIT.BEFS[bef_options][bef]
fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset'])
sqd['acq_type'], = acq_type, = unpack('i', fid.read(KIT.INT))
sqd['sfreq'], = unpack('d', fid.read(KIT.DOUBLE))
if acq_type == KIT.CONTINUOUS:
fid.seek(KIT.INT, SEEK_CUR)
sqd['n_samples'], = unpack('i', fid.read(KIT.INT))
elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS:
sqd['frame_length'], = unpack('i', fid.read(KIT.INT))
sqd['pretrigger_length'], = unpack('i', fid.read(KIT.INT))
sqd['average_count'], = unpack('i', fid.read(KIT.INT))
sqd['n_epochs'], = unpack('i', fid.read(KIT.INT))
if acq_type == KIT.EVOKED:
sqd['n_samples'] = sqd['frame_length']
else:
sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs']
else:
raise IOError("Invalid acquisition type: %i. Your file is neither "
"continuous nor epoched data." % (acq_type,))
dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS]
cor_dir = dirs[KIT.DIR_INDEX_COREG]
dig = dict()
hsp = list()
if dig_dir['count'] > 0 and cor_dir['count'] > 0:
fid.seek(dig_dir['offset'])
for _ in range(dig_dir['count']):
name = _read_name(fid, n=8).strip()
# insensitive. It will also prevent collisions with HSP
name = name.lower()
rr = np.fromfile(fid, 'd', 3)
if name:
assert name not in dig
dig[name] = rr
else:
hsp.append(rr)
# nasion, lpa, rpa, HPI in native space
elp = [dig.pop(key) for key in (
'fidnz', 'fidt9', 'fidt10',
'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')]
if 'hpi_5' in dig and dig['hpi_5'].any():
elp.append(dig.pop('hpi_5'))
elp = np.array(elp)
hsp = np.array(hsp, float).reshape(-1, 3)
assert elp.shape in ((7, 3), (8, 3))
# coregistration
fid.seek(cor_dir['offset'])
mrk = np.zeros((elp.shape[0] - 3, 3))
for _ in range(cor_dir['count']):
done = np.fromfile(fid, np.int32, 1)[0]
fid.seek(16 * KIT.DOUBLE + # meg_to_mri
16 * KIT.DOUBLE, # mri_to_meg
SEEK_CUR)
marker_count = np.fromfile(fid, np.int32, 1)[0]
if not done:
continue
assert marker_count >= len(mrk)
for mi in range(len(mrk)):
mri_type, meg_type, mri_done, meg_done = \
np.fromfile(fid, np.int32, 4)
assert meg_done
fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos
mrk[mi] = np.fromfile(fid, 'd', 3)
fid.seek(256, SEEK_CUR) # marker_file (char)
sqd.update(hsp=hsp, elp=elp, mrk=mrk)
all_names = set(ch.get('name', '') for ch in channels)
if standardize_names is None and all_names.difference({'', 'EEG'}):
standardize_names = True
warn('standardize_names defaults to True in 0.21 but will change '
'to False in 0.22', DeprecationWarning)
# precompute conversion factor for reading data
if unsupported_format:
if sysid not in LEGACY_AMP_PARAMS:
raise IOError("Legacy parameters for system ID %i unavailable" %
(sysid,))
adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid]
is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels])
ad_to_volt = adc_range / (2 ** adc_stored)
ad_to_tesla = ad_to_volt / amp_gain * channel_gain
conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt)
# XXX this is a bit of a hack. Should probably do this more cleanly at
# some point... the 2 ** (adc_stored - 14) was emperically determined using
# the test files with known amplitudes. The conv_factors need to be
# replaced by these values otherwise we're off by a factor off 5000.0
is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)
for ch in channels]
exg_gains /= 2 ** (adc_stored - 14)
conv_factor[is_exg] = exg_gains
sqd['conv_factor'] = conv_factor[:, np.newaxis]
info = _empty_info(float(sqd['sfreq']))
info.update(meas_date=_stamp_to_dt((create_time, 0)),
lowpass=sqd['lowpass'],
highpass=sqd['highpass'], kit_system_id=sysid,
description=description)
logger.info('Setting channel info structure...')
info['chs'] = fiff_channels = []
channel_index = defaultdict(lambda: 0)
sqd['eeg_dig'] = OrderedDict()
for idx, ch in enumerate(channels, 1):
if ch['type'] in KIT.CHANNELS_MEG:
ch_name = ch.get('name', '')
if ch_name == '' or standardize_names:
ch_name = 'MEG %03d' % idx
theta, phi = np.radians(ch['loc'][3:])
x = sin(theta) * cos(phi)
y = sin(theta) * sin(phi)
z = cos(theta)
vec_z = np.array([x, y, z])
vec_z /= linalg.norm(vec_z)
vec_x = np.zeros(vec_z.size, dtype=np.float64)
if vec_z[1] < vec_z[2]:
if vec_z[0] < vec_z[1]:
vec_x[0] = 1.0
else:
vec_x[1] = 1.0
elif vec_z[0] < vec_z[2]:
vec_x[0] = 1.0
else:
vec_x[2] = 1.0
vec_x -= np.sum(vec_x * vec_z) * vec_z
vec_x /= linalg.norm(vec_x)
vec_y = np.cross(vec_z, vec_x)
vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z))
vecs = apply_trans(als_ras_trans, vecs)
unit = FIFF.FIFF_UNIT_T
loc = vecs.ravel()
else:
ch_type_label = KIT.CH_LABEL[ch['type']]
channel_index[ch_type_label] += 1
ch_type_index = channel_index[ch_type_label]
ch_name = ch.get('name', '')
eeg_name = ch_name.lower()
if ch_name in ('', 'EEG') or standardize_names:
ch_name = '%s %03i' % (ch_type_label, ch_type_index)
unit = FIFF.FIFF_UNIT_V
loc = np.zeros(12)
if eeg_name and eeg_name in dig:
loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name]
fiff_channels.append(dict(
cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE,
unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name,
coord_frame=FIFF.FIFFV_COORD_DEVICE,
coil_type=KIT.CH_TO_FIFF_COIL[ch['type']],
kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc))
info._update_redundant()
return info, sqd
def _read_name(fid, ch_type=None, n=None):
n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type]
return fid.read(n).split(b'\x00')[0].decode('utf-8')
@fill_doc
def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, stim_code='binary',
allow_unknown_format=False, standardize_names=None,
verbose=None):
return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
stim=stim, slope=slope, stimthresh=stimthresh,
preload=preload, stim_code=stim_code,
allow_unknown_format=allow_unknown_format,
standardize_names=standardize_names, verbose=verbose)
@fill_doc
def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None,
hsp=None, allow_unknown_format=False,
standardize_names=None, verbose=None):
epochs = EpochsKIT(input_fname=input_fname, events=events,
event_id=event_id, mrk=mrk, elp=elp, hsp=hsp,
allow_unknown_format=allow_unknown_format,
standardize_names=standardize_names,
verbose=verbose)
return epochs
| true | true |
7900344115555e3f7c00990bba7697ab8d2f9bac | 599 | py | Python | wireframe.py | fwidmaier/mesh_handler | bba4144f5d525feef955369ed4fd446324024e6a | [
"MIT"
] | null | null | null | wireframe.py | fwidmaier/mesh_handler | bba4144f5d525feef955369ed4fd446324024e6a | [
"MIT"
] | null | null | null | wireframe.py | fwidmaier/mesh_handler | bba4144f5d525feef955369ed4fd446324024e6a | [
"MIT"
] | null | null | null | """
Script to show the wireframe of a given mesh (read from a file) in an interactive
Viewer.
"""
from viewer import *
from mesh.obj import OBJFile
import sys
if __name__ == "__main__":
app = Viewer()
if len(sys.argv) > 1:
try:
obj = OBJFile.read(sys.argv[1])
app.scene.addObject(obj)
app.title(sys.argv[1])
app.scene.setTarget(obj.centroid)
except Exception as e:
raise e
else:
print("No input file given. Nothing to render.")
print("Try 'python3 wireframe.py yourobj.obj'")
app.show()
| 23.038462 | 81 | 0.592654 |
from viewer import *
from mesh.obj import OBJFile
import sys
if __name__ == "__main__":
app = Viewer()
if len(sys.argv) > 1:
try:
obj = OBJFile.read(sys.argv[1])
app.scene.addObject(obj)
app.title(sys.argv[1])
app.scene.setTarget(obj.centroid)
except Exception as e:
raise e
else:
print("No input file given. Nothing to render.")
print("Try 'python3 wireframe.py yourobj.obj'")
app.show()
| true | true |
79003460bc2034505a75076d7dacde6c8f02aca5 | 6,146 | py | Python | lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 12 | 2018-11-20T04:30:49.000Z | 2021-11-09T12:34:26.000Z | lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 1 | 2019-01-24T15:56:15.000Z | 2019-05-31T07:56:55.000Z | lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 6 | 2018-06-29T03:45:50.000Z | 2022-03-18T01:51:45.000Z | # coding: utf-8
"""
vautoscaling
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_vautoscaling.model.process import Process # noqa: F401,E501
class ResumeProcessesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'return_code': 'str',
'return_message': 'str',
'total_rows': 'int',
'process_list': 'list[Process]'
}
attribute_map = {
'request_id': 'requestId',
'return_code': 'returnCode',
'return_message': 'returnMessage',
'total_rows': 'totalRows',
'process_list': 'processList'
}
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, process_list=None): # noqa: E501
"""ResumeProcessesResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._return_code = None
self._return_message = None
self._total_rows = None
self._process_list = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if return_code is not None:
self.return_code = return_code
if return_message is not None:
self.return_message = return_message
if total_rows is not None:
self.total_rows = total_rows
if process_list is not None:
self.process_list = process_list
@property
def request_id(self):
"""Gets the request_id of this ResumeProcessesResponse. # noqa: E501
:return: The request_id of this ResumeProcessesResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ResumeProcessesResponse.
:param request_id: The request_id of this ResumeProcessesResponse. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def return_code(self):
"""Gets the return_code of this ResumeProcessesResponse. # noqa: E501
:return: The return_code of this ResumeProcessesResponse. # noqa: E501
:rtype: str
"""
return self._return_code
@return_code.setter
def return_code(self, return_code):
"""Sets the return_code of this ResumeProcessesResponse.
:param return_code: The return_code of this ResumeProcessesResponse. # noqa: E501
:type: str
"""
self._return_code = return_code
@property
def return_message(self):
"""Gets the return_message of this ResumeProcessesResponse. # noqa: E501
:return: The return_message of this ResumeProcessesResponse. # noqa: E501
:rtype: str
"""
return self._return_message
@return_message.setter
def return_message(self, return_message):
"""Sets the return_message of this ResumeProcessesResponse.
:param return_message: The return_message of this ResumeProcessesResponse. # noqa: E501
:type: str
"""
self._return_message = return_message
@property
def total_rows(self):
"""Gets the total_rows of this ResumeProcessesResponse. # noqa: E501
:return: The total_rows of this ResumeProcessesResponse. # noqa: E501
:rtype: int
"""
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
"""Sets the total_rows of this ResumeProcessesResponse.
:param total_rows: The total_rows of this ResumeProcessesResponse. # noqa: E501
:type: int
"""
self._total_rows = total_rows
@property
def process_list(self):
"""Gets the process_list of this ResumeProcessesResponse. # noqa: E501
:return: The process_list of this ResumeProcessesResponse. # noqa: E501
:rtype: list[Process]
"""
return self._process_list
@process_list.setter
def process_list(self, process_list):
"""Sets the process_list of this ResumeProcessesResponse.
:param process_list: The process_list of this ResumeProcessesResponse. # noqa: E501
:type: list[Process]
"""
self._process_list = process_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResumeProcessesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.453704 | 129 | 0.607062 |
import pprint
import re
import six
from ncloud_vautoscaling.model.process import Process
class ResumeProcessesResponse(object):
swagger_types = {
'request_id': 'str',
'return_code': 'str',
'return_message': 'str',
'total_rows': 'int',
'process_list': 'list[Process]'
}
attribute_map = {
'request_id': 'requestId',
'return_code': 'returnCode',
'return_message': 'returnMessage',
'total_rows': 'totalRows',
'process_list': 'processList'
}
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, process_list=None):
self._request_id = None
self._return_code = None
self._return_message = None
self._total_rows = None
self._process_list = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if return_code is not None:
self.return_code = return_code
if return_message is not None:
self.return_message = return_message
if total_rows is not None:
self.total_rows = total_rows
if process_list is not None:
self.process_list = process_list
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, request_id):
self._request_id = request_id
@property
def return_code(self):
return self._return_code
@return_code.setter
def return_code(self, return_code):
self._return_code = return_code
@property
def return_message(self):
return self._return_message
@return_message.setter
def return_message(self, return_message):
self._return_message = return_message
@property
def total_rows(self):
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
self._total_rows = total_rows
@property
def process_list(self):
return self._process_list
@process_list.setter
def process_list(self, process_list):
self._process_list = process_list
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ResumeProcessesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
7900355bbe26186ac5dbd81b76fbdbe822cdd10a | 105,956 | py | Python | models/transformer.py | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | b43fb91cf99ee3ffaf137cd0be87b67448995c9b | [
"MIT"
] | null | null | null | models/transformer.py | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | b43fb91cf99ee3ffaf137cd0be87b67448995c9b | [
"MIT"
] | null | null | null | models/transformer.py | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | b43fb91cf99ee3ffaf137cd0be87b67448995c9b | [
"MIT"
] | 1 | 2021-06-01T17:58:43.000Z | 2021-06-01T17:58:43.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy.random import uniform
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from bert import BertTokenizer
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
from bert import BertModel
@register_model('transformer')
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
@register_model('transformers2')
class TransformerS2Model(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerS2Encoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., input feeding/teacher
forcing) to the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad())
bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask)
bert_encoder_out = bert_encoder_out[self.bert_output_layer]
if self.mask_cls_sep:
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls())
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep())
bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous()
bert_encoder_out = {
'bert_encoder_out': bert_encoder_out,
'bert_encoder_padding_mask': bert_encoder_padding_mask,
}
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs)
return decoder_out
@register_model('transformerstack')
class TransformerModelStack(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoderStack(args, tgt_dict, embed_tokens)
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.layer_norm(x)
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, bert_outs, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if bert_outs['bert_encoder_out'] is not None:
bert_outs['bert_encoder_out'] = \
bert_outs['bert_encoder_out'].index_select(1, new_order)
if bert_outs['bert_encoder_padding_mask'] is not None:
bert_outs['bert_encoder_padding_mask'] = \
bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)
return encoder_out, bert_outs
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerS2Encoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.output_mask = nn.Softmax(dim = 0)
self.t_layer = nn.Linear(512, 1)
self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings)
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])
bert_gates = [x == 1 for x in bert_gates]
assert len(bert_gates) == args.encoder_layers
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerS2EncoderLayer(args, bert_gate=bert_gates[i])
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim))))
self.mask_layers = nn.ModuleList([])
self.mask_layers.extend([
TransformerEncoderLayer(args)
for i in range(2)
])
if args.encoder_normalize_before:
self.mask_layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
'''
self.x = None
self.unmask_output = None
self.mask_output = None
self.encoder_vocab_output = None
self.backwards = 0
'''
self.i = 0
def forward(self, src_tokens, src_lengths, bert_encoder_out):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
# T x B mask model
###########
###########
###########
'''
mask_output = self.mask(src_tokens , x)
p = mask_output
p = p.transpose(0, 1)
t_p = torch.argsort(p,dim=1)
ratio = 0.2
self.ratio = ratio
p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))
self.p_mask = p_mask
p_mask = p_mask.unsqueeze(-1).transpose(0,1)
self.mask_output = p
if self.training:
x = x * p_mask.detach()
else:
x = x
###########
###########
###########
# t_p[t_p>t_p.size*ratio] = 1
# t_p[t_p<=t_p.size*ratio] = 0
# t_p.permute(1,0)
# model.encoder.mask_output
'''
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])
if self.layer_norm:
x = self.layer_norm(x)
# if self.training:
'''
self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out)
'''
'''
##########################
if self.i%1==0:
import scipy.io as scio
self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out)
scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()})
self.i+=1
########################
'''
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
self.src_tokens = src_tokens
x = self.embed_scale * self.embed_tokens(src_tokens)
'''
ratio = 0.3
mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False)
if mask is not None:
'''
'''
if x.size(1)<10:
mask = [4]
else:
mask = [7,9]
x[:, mask] = self.mask_embedding
'''
mask_output = self.mask(src_tokens , x)
p = mask_output
p = p
t_p = torch.argsort(p,dim=1)
ratio = 0.2
self.ratio = ratio
p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))
self.p_mask = p_mask
p_mask = p_mask.unsqueeze(-1)
self.mask_output = p
x = x * p_mask.detach()
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])
if self.layer_norm:
x = self.layer_norm(x)
encoder_vocab_output = self.output_vocab_linear(x)
self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1)
self.token = src_tokens
return encoder_vocab_output
def mask(self, src_tokens, x):
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.mask_layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.mask_layer_norm(x)
x = self.t_layer(x).squeeze(-1)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf')))
return self.output_mask(x).transpose(0, 1)
def reorder_encoder_out(self, encoder_out, bert_outs, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if bert_outs['bert_encoder_out'] is not None:
bert_outs['bert_encoder_out'] = \
bert_outs['bert_encoder_out'].index_select(1, new_order)
if bert_outs['bert_encoder_padding_mask'] is not None:
bert_outs['bert_encoder_padding_mask'] = \
bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)
return encoder_out, bert_outs
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])
bert_gates = [x == 1 for x in bert_gates]
assert len(bert_gates) == args.decoder_layers
print('bert_gates', bert_gates)
self.layers = nn.ModuleList([])
decoder_no_bert = getattr(args, 'decoder_no_bert', False)
if decoder_no_bert:
self.layers.extend([
TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])
for i in range(args.decoder_layers)
])
else:
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])
for i in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
bert_encoder_out['bert_encoder_out'],
bert_encoder_out['bert_encoder_padding_mask'],
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoderStack(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayerStack(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
bert_encoder_out['bert_encoder_out'],
bert_encoder_out['bert_encoder_padding_mask'],
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
self.attn_weight = attn_weight
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerS2EncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args, bert_gate=True):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = False
self.encoder_bert_mixup = False
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x2 = F.dropout(x2, p=self.dropout, training=self.training)
ratios = self.get_ratio()
x = residual + ratios[0] * x1 + ratios[1] * x2
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = False
self.encoder_bert_mixup = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x2, _ = self.bert_attn(
query=x,
key=bert_encoder_out,
value=bert_encoder_out,
key_padding_mask=bert_encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x2 = F.dropout(x2, p=self.dropout, training=self.training)
ratios = self.get_ratio()
x = residual + ratios[0] * x1 + ratios[1] * x2
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerStandardDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
# bert_out_dim = args.bert_out_dim
# self.bert_attn = MultiheadAttention(
# self.embed_dim, args.decoder_attention_heads,
# kdim=bert_out_dim, vdim=bert_out_dim,
# dropout=args.attention_dropout, encoder_decoder_attention=True
# )
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
# x2, _ = self.bert_attn(
# query=x,
# key=bert_encoder_out,
# value=bert_encoder_out,
# key_padding_mask=bert_encoder_padding_mask,
# incremental_state=incremental_state,
# static_kv=True,
# need_weights=(not self.training and self.need_attn),
# )
x1 = F.dropout(x1, p=self.dropout, training=self.training)
# x2 = F.dropout(x2, p=self.dropout, training=self.training)
# ratios = self.get_ratio()
x = residual + x1
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerDecoderLayerStack(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.bert_first = args.bert_first
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state):
residual = x
x = self.maybe_layer_norm(layer_norm, x, before=True)
x, attn = attnlayer(
query=x,
key=keyorvalue,
value=keyorvalue,
key_padding_mask=key_padding,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(layer_norm, x, after=True)
return x, attn
if self.bert_first:
x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,
bert_encoder_padding_mask, incremental_state)
x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,
incremental_state)
else:
x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,
incremental_state)
x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,
bert_encoder_padding_mask, incremental_state)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@register_model_architecture('transformer', 'transformer')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformers2', 'transformers2')
def base_architecture_s2(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformerstack', 'transformerstack')
def base_stack_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformer', 'transformer_iwslt_de_en')
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en')
def transformer_s2_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture_s2(args)
@register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en')
def transformerstack_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_stack_architecture(args)
@register_model_architecture('transformers2', 'transformer_wmt_en_de')
def transformer_wmt_en_de(args):
base_architecture_s2(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture(args)
@register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big')
def transformer_s2_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture_s2(args)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, 'dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de_big')
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
| 45.987847 | 169 | 0.625363 |
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy.random import uniform
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from bert import BertTokenizer
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
from bert import BertModel
@register_model('transformer')
class TransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)
@staticmethod
def add_args(parser):
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
@classmethod
def build_model(cls, args, task):
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
@register_model('transformers2')
class TransformerS2Model(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)
@staticmethod
def add_args(parser):
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
@classmethod
def build_model(cls, args, task):
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerS2Encoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs):
bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad())
bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask)
bert_encoder_out = bert_encoder_out[self.bert_output_layer]
if self.mask_cls_sep:
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls())
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep())
bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous()
bert_encoder_out = {
'bert_encoder_out': bert_encoder_out,
'bert_encoder_padding_mask': bert_encoder_padding_mask,
}
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs)
return decoder_out
@register_model('transformerstack')
class TransformerModelStack(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep)
@staticmethod
def add_args(parser):
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
@classmethod
def build_model(cls, args, task):
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoderStack(args, tgt_dict, embed_tokens)
class TransformerEncoder(FairseqEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, src_tokens, src_lengths):
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(0, 1)
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.layer_norm(x)
return {
'encoder_out': x,
'encoder_padding_mask': encoder_padding_mask,
}
def reorder_encoder_out(self, encoder_out, bert_outs, new_order):
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if bert_outs['bert_encoder_out'] is not None:
bert_outs['bert_encoder_out'] = \
bert_outs['bert_encoder_out'].index_select(1, new_order)
if bert_outs['bert_encoder_padding_mask'] is not None:
bert_outs['bert_encoder_padding_mask'] = \
bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)
return encoder_out, bert_outs
def max_positions(self):
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerS2Encoder(FairseqEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.output_mask = nn.Softmax(dim = 0)
self.t_layer = nn.Linear(512, 1)
self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings)
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])
bert_gates = [x == 1 for x in bert_gates]
assert len(bert_gates) == args.encoder_layers
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerS2EncoderLayer(args, bert_gate=bert_gates[i])
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim))))
self.mask_layers = nn.ModuleList([])
self.mask_layers.extend([
TransformerEncoderLayer(args)
for i in range(2)
])
if args.encoder_normalize_before:
self.mask_layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.i = 0
def forward(self, src_tokens, src_lengths, bert_encoder_out):
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
g_mask = None
for layer in self.layers:
x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])
if self.layer_norm:
x = self.layer_norm(x)
return {
'encoder_out': x,
'encoder_padding_mask': encoder_padding_mask,
}
def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out):
self.src_tokens = src_tokens
x = self.embed_scale * self.embed_tokens(src_tokens)
mask_output = self.mask(src_tokens , x)
p = mask_output
p = p
t_p = torch.argsort(p,dim=1)
ratio = 0.2
self.ratio = ratio
p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))
self.p_mask = p_mask
p_mask = p_mask.unsqueeze(-1)
self.mask_output = p
x = x * p_mask.detach()
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(0, 1)
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
for layer in self.layers:
x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])
if self.layer_norm:
x = self.layer_norm(x)
encoder_vocab_output = self.output_vocab_linear(x)
self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1)
self.token = src_tokens
return encoder_vocab_output
def mask(self, src_tokens, x):
x = x.transpose(0, 1)
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
for layer in self.mask_layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.mask_layer_norm(x)
x = self.t_layer(x).squeeze(-1)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf')))
return self.output_mask(x).transpose(0, 1)
def reorder_encoder_out(self, encoder_out, bert_outs, new_order):
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if bert_outs['bert_encoder_out'] is not None:
bert_outs['bert_encoder_out'] = \
bert_outs['bert_encoder_out'].index_select(1, new_order)
if bert_outs['bert_encoder_padding_mask'] is not None:
bert_outs['bert_encoder_padding_mask'] = \
bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)
return encoder_out, bert_outs
def max_positions(self):
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])
bert_gates = [x == 1 for x in bert_gates]
assert len(bert_gates) == args.decoder_layers
print('bert_gates', bert_gates)
self.layers = nn.ModuleList([])
decoder_no_bert = getattr(args, 'decoder_no_bert', False)
if decoder_no_bert:
self.layers.extend([
TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])
for i in range(args.decoder_layers)
])
else:
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])
for i in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(0, 1)
attn = None
inner_states = [x]
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
bert_encoder_out['bert_encoder_out'],
bert_encoder_out['bert_encoder_padding_mask'],
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
if self.adaptive_softmax is None:
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoderStack(FairseqIncrementalDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayerStack(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(0, 1)
attn = None
inner_states = [x]
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
bert_encoder_out['bert_encoder_out'],
bert_encoder_out['bert_encoder_padding_mask'],
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
if self.adaptive_softmax is None:
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerEncoderLayer(nn.Module):
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask):
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
self.attn_weight = attn_weight
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerS2EncoderLayer(nn.Module):
def __init__(self, args, bert_gate=True):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = False
self.encoder_bert_mixup = False
def upgrade_state_dict_named(self, state_dict, name):
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask):
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x2 = F.dropout(x2, p=self.dropout, training=self.training)
ratios = self.get_ratio()
x = residual + ratios[0] * x1 + ratios[1] * x2
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerDecoderLayer(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = False
self.encoder_bert_mixup = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x2, _ = self.bert_attn(
query=x,
key=bert_encoder_out,
value=bert_encoder_out,
key_padding_mask=bert_encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x2 = F.dropout(x2, p=self.dropout, training=self.training)
ratios = self.get_ratio()
x = residual + ratios[0] * x1 + ratios[1] * x2
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerStandardDecoderLayer(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x = residual + x1
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerDecoderLayerStack(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.bert_first = args.bert_first
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state):
residual = x
x = self.maybe_layer_norm(layer_norm, x, before=True)
x, attn = attnlayer(
query=x,
key=keyorvalue,
value=keyorvalue,
key_padding_mask=key_padding,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(layer_norm, x, after=True)
return x, attn
if self.bert_first:
x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,
bert_encoder_padding_mask, incremental_state)
x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,
incremental_state)
else:
x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,
incremental_state)
x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,
bert_encoder_padding_mask, incremental_state)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@register_model_architecture('transformer', 'transformer')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformers2', 'transformers2')
def base_architecture_s2(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformerstack', 'transformerstack')
def base_stack_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformer', 'transformer_iwslt_de_en')
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en')
def transformer_s2_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture_s2(args)
@register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en')
def transformerstack_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_stack_architecture(args)
@register_model_architecture('transformers2', 'transformer_wmt_en_de')
def transformer_wmt_en_de(args):
base_architecture_s2(args)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture(args)
@register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big')
def transformer_s2_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture_s2(args)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, 'dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de_big')
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
| true | true |
79003634dbd6d860663da73bfd650b2a6c30b93e | 3,447 | py | Python | impression/tests/test_distribution.py | gregschmit/django-impression | b4d624802830d00a136c2bf40b6a8911c1269095 | [
"MIT"
] | 3 | 2019-12-11T10:04:55.000Z | 2019-12-20T22:15:52.000Z | impression/tests/test_distribution.py | gregschmit/django-impression | b4d624802830d00a136c2bf40b6a8911c1269095 | [
"MIT"
] | null | null | null | impression/tests/test_distribution.py | gregschmit/django-impression | b4d624802830d00a136c2bf40b6a8911c1269095 | [
"MIT"
] | null | null | null | """
This module is for testing the distributions. Tests should focus on ensuring we can
expand distributions without missing emails or getting too many or running into infinite
loops.
"""
from django.test import TestCase
from ..models import EmailAddress, Distribution
class DistributionTestCase(TestCase):
def setUp(self):
self.test1 = EmailAddress.objects.create(email_address="test1@example.org")
self.test2 = EmailAddress.objects.create(email_address="test2@example.org")
self.all_emails = set([self.test1, self.test2])
self.disti = Distribution.objects.create(name="Test Disti")
self.disti.email_addresses.add(self.test1, self.test2)
# build disti with duplicates
self.dupe_disti = Distribution.objects.create(name="Dupe Disti")
self.dupe_disti.email_addresses.add(self.test1, self.test2)
self.dupe_disti.distributions.add(self.disti)
# build disti with self reference
self.self_disti = Distribution.objects.create(name="Self Disti")
self.self_disti.email_addresses.add(self.test1)
self.self_disti.distributions.add(self.self_disti)
# build disti with cyclic reference
self.cyclic_disti1 = Distribution.objects.create(name="Cyclic Disti 1")
self.cyclic_disti1.email_addresses.add(self.test1)
self.cyclic_disti2 = Distribution.objects.create(name="Cyclic Disti 2")
self.cyclic_disti2.email_addresses.add(self.test2)
self.cyclic_disti1.distributions.add(self.cyclic_disti2)
self.cyclic_disti2.distributions.add(self.cyclic_disti1)
def test_constructor_properties(self):
self.assertEqual(self.disti.name, "Test Disti")
emails = self.disti.email_addresses.all()
self.assertIn(self.test1, emails)
self.assertIn(self.test2, emails)
def test_collect_distribution(self):
"""
Test that emails are collected properly.
"""
test_emails = self.disti.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
def test_collect_distribution_with_duplicates(self):
"""
Test that a distribution with duplicates to ensure it only collects each email
once.
"""
test_emails = self.dupe_disti.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
def test_collect_distribution_with_self_references(self):
"""
Test that a distribution with self references to ensure it only collects each
email once, and without looping infinitely.
"""
test_emails = self.self_disti.collect_email_addresses()
self.assertEqual(len(test_emails), 1)
self.assertSetEqual(set([self.test1]), set(test_emails))
def test_collect_distribution_with_cyclic_references(self):
"""
Test that a distribution with cyclic references only collects each email once,
and without looping infinitely.
"""
test_emails = self.cyclic_disti1.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
test_emails = self.cyclic_disti2.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
| 42.036585 | 88 | 0.707572 |
from django.test import TestCase
from ..models import EmailAddress, Distribution
class DistributionTestCase(TestCase):
def setUp(self):
self.test1 = EmailAddress.objects.create(email_address="test1@example.org")
self.test2 = EmailAddress.objects.create(email_address="test2@example.org")
self.all_emails = set([self.test1, self.test2])
self.disti = Distribution.objects.create(name="Test Disti")
self.disti.email_addresses.add(self.test1, self.test2)
self.dupe_disti = Distribution.objects.create(name="Dupe Disti")
self.dupe_disti.email_addresses.add(self.test1, self.test2)
self.dupe_disti.distributions.add(self.disti)
self.self_disti = Distribution.objects.create(name="Self Disti")
self.self_disti.email_addresses.add(self.test1)
self.self_disti.distributions.add(self.self_disti)
self.cyclic_disti1 = Distribution.objects.create(name="Cyclic Disti 1")
self.cyclic_disti1.email_addresses.add(self.test1)
self.cyclic_disti2 = Distribution.objects.create(name="Cyclic Disti 2")
self.cyclic_disti2.email_addresses.add(self.test2)
self.cyclic_disti1.distributions.add(self.cyclic_disti2)
self.cyclic_disti2.distributions.add(self.cyclic_disti1)
def test_constructor_properties(self):
self.assertEqual(self.disti.name, "Test Disti")
emails = self.disti.email_addresses.all()
self.assertIn(self.test1, emails)
self.assertIn(self.test2, emails)
def test_collect_distribution(self):
test_emails = self.disti.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
def test_collect_distribution_with_duplicates(self):
test_emails = self.dupe_disti.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
def test_collect_distribution_with_self_references(self):
test_emails = self.self_disti.collect_email_addresses()
self.assertEqual(len(test_emails), 1)
self.assertSetEqual(set([self.test1]), set(test_emails))
def test_collect_distribution_with_cyclic_references(self):
test_emails = self.cyclic_disti1.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
test_emails = self.cyclic_disti2.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
| true | true |
79003746d2d5deb52b2d7752d9c7346c0c83fe2d | 4,505 | py | Python | tools/make_ctocpp_header.py | toryant/cef | c80264ab117bd3f1a60dd3267ee247bd9f15c425 | [
"BSD-3-Clause"
] | 4 | 2019-10-30T10:11:34.000Z | 2021-08-24T23:04:30.000Z | tools/make_ctocpp_header.py | toryant/cef | c80264ab117bd3f1a60dd3267ee247bd9f15c425 | [
"BSD-3-Clause"
] | null | null | null | tools/make_ctocpp_header.py | toryant/cef | c80264ab117bd3f1a60dd3267ee247bd9f15c425 | [
"BSD-3-Clause"
] | 5 | 2018-10-16T09:50:06.000Z | 2020-12-07T20:12:13.000Z | # Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from cef_parser import *
def make_function_body_block(cls):
impl = ' // ' + cls.get_name() + ' methods.\n'
funcs = cls.get_virtual_funcs()
for func in funcs:
impl += ' ' + func.get_cpp_proto()
if cls.is_client_side():
impl += ' override;\n'
else:
impl += ' OVERRIDE;\n'
return impl
def make_function_body(header, cls):
impl = make_function_body_block(cls)
cur_cls = cls
while True:
parent_name = cur_cls.get_parent_name()
if is_base_class(parent_name):
break
else:
parent_cls = header.get_class(parent_name)
if parent_cls is None:
raise Exception('Class does not exist: ' + parent_name)
if len(impl) > 0:
impl += '\n'
impl += make_function_body_block(parent_cls)
cur_cls = header.get_class(parent_name)
return impl
def make_ctocpp_header(header, clsname):
cls = header.get_class(clsname)
if cls is None:
raise Exception('Class does not exist: ' + clsname)
clientside = cls.is_client_side()
directory = cls.get_file_directory()
defname = ''
if not directory is None:
defname += directory + '_'
defname += get_capi_name(clsname[3:], False)
defname = defname.upper()
capiname = cls.get_capi_name()
result = get_copyright()
result += '#ifndef CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n'+ \
'#define CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n' + \
'#pragma once\n'
if clientside:
result += """
#if !defined(BUILDING_CEF_SHARED)
#error This file can be included DLL-side only
#endif
"""
else:
result += """
#if !defined(WRAPPING_CEF_SHARED)
#error This file can be included wrapper-side only
#endif
"""
# build the function body
func_body = make_function_body(header, cls)
# include standard headers
if func_body.find('std::map') > 0 or func_body.find('std::multimap') > 0:
result += '\n#include <map>'
if func_body.find('std::vector') > 0:
result += '\n#include <vector>'
# include the headers for this class
result += '\n#include "include/'+cls.get_file_name()+'"'+ \
'\n#include "include/capi/'+cls.get_capi_file_name()+'"\n'
# include headers for any forward declared classes that are not in the same file
declares = cls.get_forward_declares()
for declare in declares:
dcls = header.get_class(declare)
if dcls.get_file_name() != cls.get_file_name():
result += '#include "include/'+dcls.get_file_name()+'"\n' \
'#include "include/capi/'+dcls.get_capi_file_name()+'"\n'
base_class_name = header.get_base_class_name(clsname)
base_scoped = True if base_class_name == 'CefBaseScoped' else False
if base_scoped:
template_file = 'ctocpp_scoped.h'
template_class = 'CefCToCppScoped'
else:
template_file = 'ctocpp_ref_counted.h'
template_class = 'CefCToCppRefCounted'
result += '#include "libcef_dll/ctocpp/' + template_file + '"'
result += '\n\n// Wrap a C structure with a C++ class.\n'
if clientside:
result += '// This class may be instantiated and accessed DLL-side only.\n'
else:
result += '// This class may be instantiated and accessed wrapper-side only.\n'
result += 'class '+clsname+'CToCpp\n'+ \
' : public ' + template_class + '<'+clsname+'CToCpp, '+clsname+', '+capiname+'> {\n'+ \
' public:\n'+ \
' '+clsname+'CToCpp();\n\n'
result += func_body
result += '};\n\n'
result += '#endif // CEF_LIBCEF_DLL_CTOCPP_' + defname + '_CTOCPP_H_'
return result
def write_ctocpp_header(header, clsname, dir):
# give the output file the same directory offset as the input file
cls = header.get_class(clsname)
dir = os.path.dirname(os.path.join(dir, cls.get_file_name()))
file = os.path.join(dir, get_capi_name(clsname[3:], False) + '_ctocpp.h')
newcontents = make_ctocpp_header(header, clsname)
return (file, newcontents)
# test the module
if __name__ == "__main__":
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) < 3:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <infile> <classname>')
sys.exit()
# create the header object
header = obj_header()
header.add_file(sys.argv[1])
# dump the result to stdout
sys.stdout.write(make_ctocpp_header(header, sys.argv[2]))
| 29.444444 | 104 | 0.666149 |
from cef_parser import *
def make_function_body_block(cls):
impl = ' // ' + cls.get_name() + ' methods.\n'
funcs = cls.get_virtual_funcs()
for func in funcs:
impl += ' ' + func.get_cpp_proto()
if cls.is_client_side():
impl += ' override;\n'
else:
impl += ' OVERRIDE;\n'
return impl
def make_function_body(header, cls):
impl = make_function_body_block(cls)
cur_cls = cls
while True:
parent_name = cur_cls.get_parent_name()
if is_base_class(parent_name):
break
else:
parent_cls = header.get_class(parent_name)
if parent_cls is None:
raise Exception('Class does not exist: ' + parent_name)
if len(impl) > 0:
impl += '\n'
impl += make_function_body_block(parent_cls)
cur_cls = header.get_class(parent_name)
return impl
def make_ctocpp_header(header, clsname):
cls = header.get_class(clsname)
if cls is None:
raise Exception('Class does not exist: ' + clsname)
clientside = cls.is_client_side()
directory = cls.get_file_directory()
defname = ''
if not directory is None:
defname += directory + '_'
defname += get_capi_name(clsname[3:], False)
defname = defname.upper()
capiname = cls.get_capi_name()
result = get_copyright()
result += '#ifndef CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n'+ \
'#define CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n' + \
'#pragma once\n'
if clientside:
result += """
#if !defined(BUILDING_CEF_SHARED)
#error This file can be included DLL-side only
#endif
"""
else:
result += """
#if !defined(WRAPPING_CEF_SHARED)
#error This file can be included wrapper-side only
#endif
"""
func_body = make_function_body(header, cls)
if func_body.find('std::map') > 0 or func_body.find('std::multimap') > 0:
result += '\n#include <map>'
if func_body.find('std::vector') > 0:
result += '\n#include <vector>'
result += '\n#include "include/'+cls.get_file_name()+'"'+ \
'\n#include "include/capi/'+cls.get_capi_file_name()+'"\n'
declares = cls.get_forward_declares()
for declare in declares:
dcls = header.get_class(declare)
if dcls.get_file_name() != cls.get_file_name():
result += '#include "include/'+dcls.get_file_name()+'"\n' \
'#include "include/capi/'+dcls.get_capi_file_name()+'"\n'
base_class_name = header.get_base_class_name(clsname)
base_scoped = True if base_class_name == 'CefBaseScoped' else False
if base_scoped:
template_file = 'ctocpp_scoped.h'
template_class = 'CefCToCppScoped'
else:
template_file = 'ctocpp_ref_counted.h'
template_class = 'CefCToCppRefCounted'
result += '#include "libcef_dll/ctocpp/' + template_file + '"'
result += '\n\n// Wrap a C structure with a C++ class.\n'
if clientside:
result += '// This class may be instantiated and accessed DLL-side only.\n'
else:
result += '// This class may be instantiated and accessed wrapper-side only.\n'
result += 'class '+clsname+'CToCpp\n'+ \
' : public ' + template_class + '<'+clsname+'CToCpp, '+clsname+', '+capiname+'> {\n'+ \
' public:\n'+ \
' '+clsname+'CToCpp();\n\n'
result += func_body
result += '};\n\n'
result += '#endif // CEF_LIBCEF_DLL_CTOCPP_' + defname + '_CTOCPP_H_'
return result
def write_ctocpp_header(header, clsname, dir):
cls = header.get_class(clsname)
dir = os.path.dirname(os.path.join(dir, cls.get_file_name()))
file = os.path.join(dir, get_capi_name(clsname[3:], False) + '_ctocpp.h')
newcontents = make_ctocpp_header(header, clsname)
return (file, newcontents)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <infile> <classname>')
sys.exit()
header = obj_header()
header.add_file(sys.argv[1])
sys.stdout.write(make_ctocpp_header(header, sys.argv[2]))
| true | true |
7900375d4e43f7ab0e86d95065991384240948da | 668 | py | Python | var/spack/repos/builtin/packages/py-stevedore/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-03-05T10:54:32.000Z | 2021-03-05T14:14:52.000Z | var/spack/repos/builtin/packages/py-stevedore/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | var/spack/repos/builtin/packages/py-stevedore/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 7 | 2018-09-13T18:04:56.000Z | 2020-03-18T20:52:06.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyStevedore(PythonPackage):
"""Manage Dynamic Plugins for Python Applications."""
homepage = "https://docs.openstack.org/stevedore/latest/"
pypi = "stevedore/stevedore-1.28.0.tar.gz"
version('1.28.0', sha256='f1c7518e7b160336040fee272174f1f7b29a46febb3632502a8f2055f973d60b')
depends_on('python@2.6:')
depends_on('py-six@1.10.0:', type=('build', 'run'))
depends_on('py-pbr@2.0.0:2.1.0', type=('build', 'run'))
| 31.809524 | 96 | 0.714072 |
from spack import *
class PyStevedore(PythonPackage):
homepage = "https://docs.openstack.org/stevedore/latest/"
pypi = "stevedore/stevedore-1.28.0.tar.gz"
version('1.28.0', sha256='f1c7518e7b160336040fee272174f1f7b29a46febb3632502a8f2055f973d60b')
depends_on('python@2.6:')
depends_on('py-six@1.10.0:', type=('build', 'run'))
depends_on('py-pbr@2.0.0:2.1.0', type=('build', 'run'))
| true | true |
7900399dc72f4b5bb49e6f62341fbf29453d52e2 | 16,076 | py | Python | tensorflow_probability/python/distributions/zipf_test.py | OrenBochman/probability | eb4cff2c441e52f0604236b30d422577e498349c | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/zipf_test.py | OrenBochman/probability | eb4cff2c441e52f0604236b30d422577e498349c | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/zipf_test.py | OrenBochman/probability | eb4cff2c441e52f0604236b30d422577e498349c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class ZipfTest(test_util.TestCase):
def assertBetween(self, x, minimum, maximum):
self.assertGreaterEqual(x, minimum)
self.assertLessEqual(x, maximum)
def assertAllBetween(self, a, minval, maxval, atol=1e-6):
a = self._GetNdArray(a)
minval = self._GetNdArray(minval)
maxval = self._GetNdArray(maxval)
self.assertEqual(a.shape, minval.shape)
self.assertEqual(a.shape, maxval.shape)
for idx, _ in np.ndenumerate(a):
self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)
def testZipfShape(self):
power = tf.constant([3.0] * 5)
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))
self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])
self.assertEqual(zipf.event_shape, tf.TensorShape([]))
def testInvalidPower(self):
invalid_powers = [-.02, 0.5, -2., .99, 1.]
for power in invalid_powers:
with self.assertRaisesOpError("Condition x > y"):
zipf = tfd.Zipf(power=power, validate_args=True)
self.evaluate(zipf.mean())
def testNanPower(self):
zipf = tfd.Zipf(power=np.nan, validate_args=False)
self.assertAllNan(self.evaluate(zipf.power))
def testValidPower_ImplicitlyConvertsToFloat32(self):
powers = [2, 10, 1.1]
for power in powers:
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(zipf.power.dtype, tf.float32)
def testEventDtype(self):
for power_dtype in [tf.float32, tf.float64]:
for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
power_dtype = tf.float32
event_dtype = tf.int32
power = tf.constant(5., dtype=power_dtype)
zipf = tfd.Zipf(power=power, dtype=event_dtype, validate_args=True)
self.assertEqual(zipf.dtype, event_dtype)
self.assertEqual(
zipf.dtype, zipf.sample(10, seed=test_util.test_seed()).dtype)
self.assertEqual(
zipf.dtype, zipf.sample(1, seed=test_util.test_seed()).dtype)
self.assertEqual(zipf.dtype, zipf.mode().dtype)
def testInvalidEventDtype(self):
with self.assertRaisesWithPredicateMatch(
TypeError, "power.dtype .* not a supported .* type"):
power = tf.constant(5., dtype=tf.float16)
zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)
self.evaluate(zipf.sample(seed=test_util.test_seed()))
def testZipfLogPmf_InvalidArgs(self):
power = tf.constant([4.0])
# Non-integer samples are rejected if validate_args is True and
# interpolate_nondiscrete is False.
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=True)
non_integer_samples = [0.99, 4.5, 5.001, 1e-5]
for x in non_integer_samples:
with self.assertRaisesOpError("cannot contain fractional components"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("cannot contain fractional components"):
self.evaluate(zipf.prob(x))
# Negative samples are rejected if validate_args is True.
zipf = tfd.Zipf(power=power, validate_args=True)
negative_samples = [-3, -2, -1]
for x in negative_samples:
with self.assertRaisesOpError("must be non-negative"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("must be non-negative"):
self.evaluate(zipf.prob(x))
def testZipfLogPmf_IntegerArgs(self):
batch_size = 9
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
zipf = tfd.Zipf(power=power, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
# Check that log_pmf(x) of tfd.Zipf is between the values of
# stats.zipf.logpmf for ceil(x) and floor(x).
log_pmf_values = self.evaluate(log_pmf)
floor_x = np.floor(x)
ceil_x = np.ceil(x)
self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
# Check that pmf(x) of tfd.Zipf is between the values of stats.zipf.pmf for
# ceil(x) and floor(x).
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T
zipf = tfd.Zipf(power=power, validate_args=True)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=True)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllBetween(
self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllBetween(
self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfCdf_IntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsInterpolated(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
zipf = tfd.Zipf(power=power, validate_args=True)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=True)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfMean(self):
power_v = [2.0, 3.0, 2.5]
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.mean().shape)
self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))
def testZipfVariance(self):
power_v = [4.0, 3.0, 5.5] # var is undefined for power <= 3
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.variance().shape)
stat_vars = np.vectorize(stats.zipf.var)(power_v)
self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)
def testZipfStd(self):
power_v = [4.0, 3.5, 4.5]
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.stddev().shape)
stat_stddevs = np.vectorize(stats.zipf.std)(power_v)
self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)
def testZipfMode(self):
power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]
zipf = tfd.Zipf(power=power_v, validate_args=False)
self.assertEqual((6,), zipf.mode().shape)
self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))
def testZipfSample(self):
power_v = 5.
n = int(500e4)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)
self.assertAllClose(
sample_values.std(), stats.zipf.std(power_v), rtol=.03)
def testZipfSample_ValidateArgs(self):
power_v = 3.
n = int(100e3)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=test_util.test_seed())
self.evaluate(samples)
def testZipfSampleMultidimensionalMean(self):
power_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10
zipf = tfd.Zipf(power=power_v, validate_args=True)
n = int(100e3)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 10,), samples.shape)
self.assertEqual((n, 1, 10,), sample_values.shape)
# stats.zipf wants float64 params.
stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))
self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)
def testZipfSampleMultidimensionalStd(self):
power_v = np.array([np.arange(5, 10, dtype=np.float32)]) # 1 x 5
zipf = tfd.Zipf(power=power_v, validate_args=True)
n = int(100e4)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 5), samples.shape)
self.assertEqual((n, 1, 5), sample_values.shape)
# stats.zipf wants float64 params.
stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))
self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)
# Test that sampling with the same seed twice gives the same results.
def testZipfSampleMultipleTimes(self):
n = 1000
seed = test_util.test_seed()
power = 1.5
zipf1 = tfd.Zipf(power=power, name="zipf1", validate_args=True)
tf.random.set_seed(seed)
samples1 = self.evaluate(zipf1.sample(n, seed=seed))
zipf2 = tfd.Zipf(power=power, name="zipf2", validate_args=True)
tf.random.set_seed(seed)
samples2 = self.evaluate(zipf2.sample(n, seed=seed))
self.assertAllEqual(samples1, samples2)
def testZipfSample_AvoidsInfiniteLoop(self):
zipf = tfd.Zipf(power=1., validate_args=False)
n = 1000
self.evaluate(zipf.sample(n, seed=test_util.test_seed()))
if __name__ == "__main__":
tf.test.main()
| 37.299304 | 79 | 0.669818 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class ZipfTest(test_util.TestCase):
def assertBetween(self, x, minimum, maximum):
self.assertGreaterEqual(x, minimum)
self.assertLessEqual(x, maximum)
def assertAllBetween(self, a, minval, maxval, atol=1e-6):
a = self._GetNdArray(a)
minval = self._GetNdArray(minval)
maxval = self._GetNdArray(maxval)
self.assertEqual(a.shape, minval.shape)
self.assertEqual(a.shape, maxval.shape)
for idx, _ in np.ndenumerate(a):
self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)
def testZipfShape(self):
power = tf.constant([3.0] * 5)
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))
self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])
self.assertEqual(zipf.event_shape, tf.TensorShape([]))
def testInvalidPower(self):
invalid_powers = [-.02, 0.5, -2., .99, 1.]
for power in invalid_powers:
with self.assertRaisesOpError("Condition x > y"):
zipf = tfd.Zipf(power=power, validate_args=True)
self.evaluate(zipf.mean())
def testNanPower(self):
zipf = tfd.Zipf(power=np.nan, validate_args=False)
self.assertAllNan(self.evaluate(zipf.power))
def testValidPower_ImplicitlyConvertsToFloat32(self):
powers = [2, 10, 1.1]
for power in powers:
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(zipf.power.dtype, tf.float32)
def testEventDtype(self):
for power_dtype in [tf.float32, tf.float64]:
for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
power_dtype = tf.float32
event_dtype = tf.int32
power = tf.constant(5., dtype=power_dtype)
zipf = tfd.Zipf(power=power, dtype=event_dtype, validate_args=True)
self.assertEqual(zipf.dtype, event_dtype)
self.assertEqual(
zipf.dtype, zipf.sample(10, seed=test_util.test_seed()).dtype)
self.assertEqual(
zipf.dtype, zipf.sample(1, seed=test_util.test_seed()).dtype)
self.assertEqual(zipf.dtype, zipf.mode().dtype)
def testInvalidEventDtype(self):
with self.assertRaisesWithPredicateMatch(
TypeError, "power.dtype .* not a supported .* type"):
power = tf.constant(5., dtype=tf.float16)
zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)
self.evaluate(zipf.sample(seed=test_util.test_seed()))
def testZipfLogPmf_InvalidArgs(self):
power = tf.constant([4.0])
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=True)
non_integer_samples = [0.99, 4.5, 5.001, 1e-5]
for x in non_integer_samples:
with self.assertRaisesOpError("cannot contain fractional components"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("cannot contain fractional components"):
self.evaluate(zipf.prob(x))
zipf = tfd.Zipf(power=power, validate_args=True)
negative_samples = [-3, -2, -1]
for x in negative_samples:
with self.assertRaisesOpError("must be non-negative"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("must be non-negative"):
self.evaluate(zipf.prob(x))
def testZipfLogPmf_IntegerArgs(self):
batch_size = 9
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
zipf = tfd.Zipf(power=power, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
floor_x = np.floor(x)
ceil_x = np.ceil(x)
self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T
zipf = tfd.Zipf(power=power, validate_args=True)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=True)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllBetween(
self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllBetween(
self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfCdf_IntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsInterpolated(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
zipf = tfd.Zipf(power=power, validate_args=True)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=True)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfMean(self):
power_v = [2.0, 3.0, 2.5]
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.mean().shape)
self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))
def testZipfVariance(self):
power_v = [4.0, 3.0, 5.5]
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.variance().shape)
stat_vars = np.vectorize(stats.zipf.var)(power_v)
self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)
def testZipfStd(self):
power_v = [4.0, 3.5, 4.5]
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.stddev().shape)
stat_stddevs = np.vectorize(stats.zipf.std)(power_v)
self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)
def testZipfMode(self):
power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]
zipf = tfd.Zipf(power=power_v, validate_args=False)
self.assertEqual((6,), zipf.mode().shape)
self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))
def testZipfSample(self):
power_v = 5.
n = int(500e4)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)
self.assertAllClose(
sample_values.std(), stats.zipf.std(power_v), rtol=.03)
def testZipfSample_ValidateArgs(self):
power_v = 3.
n = int(100e3)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=test_util.test_seed())
self.evaluate(samples)
def testZipfSampleMultidimensionalMean(self):
power_v = np.array([np.arange(5, 15, dtype=np.float32)])
zipf = tfd.Zipf(power=power_v, validate_args=True)
n = int(100e3)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 10,), samples.shape)
self.assertEqual((n, 1, 10,), sample_values.shape)
stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))
self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)
def testZipfSampleMultidimensionalStd(self):
power_v = np.array([np.arange(5, 10, dtype=np.float32)])
zipf = tfd.Zipf(power=power_v, validate_args=True)
n = int(100e4)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 5), samples.shape)
self.assertEqual((n, 1, 5), sample_values.shape)
stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))
self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)
def testZipfSampleMultipleTimes(self):
n = 1000
seed = test_util.test_seed()
power = 1.5
zipf1 = tfd.Zipf(power=power, name="zipf1", validate_args=True)
tf.random.set_seed(seed)
samples1 = self.evaluate(zipf1.sample(n, seed=seed))
zipf2 = tfd.Zipf(power=power, name="zipf2", validate_args=True)
tf.random.set_seed(seed)
samples2 = self.evaluate(zipf2.sample(n, seed=seed))
self.assertAllEqual(samples1, samples2)
def testZipfSample_AvoidsInfiniteLoop(self):
zipf = tfd.Zipf(power=1., validate_args=False)
n = 1000
self.evaluate(zipf.sample(n, seed=test_util.test_seed()))
if __name__ == "__main__":
tf.test.main()
| true | true |
79003a3a22fab3e829b2128c7adc350ce31a8348 | 1,439 | py | Python | tests/test_plots.py | rochamatcomp/python-rocha | bbf8b559f8052f8c081be29ef21d3e1f697477c3 | [
"MIT"
] | 1 | 2021-02-27T14:35:22.000Z | 2021-02-27T14:35:22.000Z | tests/test_plots.py | rochamatcomp/python-rocha | bbf8b559f8052f8c081be29ef21d3e1f697477c3 | [
"MIT"
] | null | null | null | tests/test_plots.py | rochamatcomp/python-rocha | bbf8b559f8052f8c081be29ef21d3e1f697477c3 | [
"MIT"
] | 1 | 2021-02-27T15:27:53.000Z | 2021-02-27T15:27:53.000Z | # -*- coding: utf-8 -*-
"""
:mod:`plots` -- Tests data plots
================================
.. module:: plots
:platform: Unix, Windows
:synopsis: Tests of the raster plots and processed data plots.
.. moduleauthor:: Andre Rocha <rocha.matcomp@gmail.com>
"""
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
from src.rocha import plots
@image_comparison(baseline_images=['test_plot'],
extensions=['png'])
def test_plot():
"""
Test the rasters plot as multiples subplots.
"""
rasters = ['data/relatives/forest_111.tif',
'data/relatives/forest_112.tif',
'data/relatives/forest_113.tif',
'data/relatives/forest_121.tif',
'data/relatives/forest_122.tif',
'data/relatives/forest_123.tif',
'data/relatives/forest_211.tif',
'data/relatives/forest_212.tif',
'data/relatives/forest_213.tif',
'data/relatives/forest_221.tif',
'data/relatives/forest_222.tif',
'data/relatives/forest_223.tif']
title = 'Mean precipitation (mm/day)'
subtitles = ['HadGEM2 RCP4.5', 'HadGEM2 RCP8.5', 'MIROC5 RCP4.5', 'MIROC5 RCP8.5']
labels = ['2011-2040', '2041-2070', '2071-2100']
color = 'RdYlBu_r'
rows = 3
cols = 4
plots.maps(rasters, rows, cols, color, title, subtitles, labels) | 31.977778 | 86 | 0.599027 |
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
from src.rocha import plots
@image_comparison(baseline_images=['test_plot'],
extensions=['png'])
def test_plot():
rasters = ['data/relatives/forest_111.tif',
'data/relatives/forest_112.tif',
'data/relatives/forest_113.tif',
'data/relatives/forest_121.tif',
'data/relatives/forest_122.tif',
'data/relatives/forest_123.tif',
'data/relatives/forest_211.tif',
'data/relatives/forest_212.tif',
'data/relatives/forest_213.tif',
'data/relatives/forest_221.tif',
'data/relatives/forest_222.tif',
'data/relatives/forest_223.tif']
title = 'Mean precipitation (mm/day)'
subtitles = ['HadGEM2 RCP4.5', 'HadGEM2 RCP8.5', 'MIROC5 RCP4.5', 'MIROC5 RCP8.5']
labels = ['2011-2040', '2041-2070', '2071-2100']
color = 'RdYlBu_r'
rows = 3
cols = 4
plots.maps(rasters, rows, cols, color, title, subtitles, labels) | true | true |
79003c18e239271e0bc613ca9e261504d189d850 | 4,341 | py | Python | main.py | Troublor/ulauncher-numconverter | 98d5e01d82671eedc98c000053980ae7ceb4ea28 | [
"Apache-2.0"
] | 1 | 2021-08-31T12:51:45.000Z | 2021-08-31T12:51:45.000Z | main.py | Troublor/ulauncher-numconverter | 98d5e01d82671eedc98c000053980ae7ceb4ea28 | [
"Apache-2.0"
] | null | null | null | main.py | Troublor/ulauncher-numconverter | 98d5e01d82671eedc98c000053980ae7ceb4ea28 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import re
from abc import abstractmethod, ABC
from enum import Enum
from typing import List, Optional, Literal, Tuple, Union
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
import ulauncher.api.shared.event as events
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.DoNothingAction import DoNothingAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
class DemoExtension(Extension):
def __init__(self):
super().__init__()
self.subscribe(events.KeywordQueryEvent, KeywordQueryEventListener())
class Number(ABC):
@classmethod
def parse(cls, payload: str, encoding: Encoding) -> Union[Number, ExtensionResultItem]:
if len(payload) == 0:
return ExtensionResultItem(
icon='images/icon.png',
name='No input',
description=f"Please input a {encoding} number",
on_enter=DoNothingAction(),
)
try:
value = encoding.decode(payload)
return Number(value)
except ValueError:
msg = "Failed to convert number"
description = f"Value {payload} is not a {encoding} number."
return ExtensionResultItem(
icon='images/icon.png',
name=msg,
description=description,
on_enter=DoNothingAction(),
on_alt_enter=DoNothingAction(),
)
def __init__(self, value: int):
self.value = value
def result_item(self, encoding: Encoding) -> ExtensionResultItem:
payload = encoding.encode(self.value)
return ExtensionResultItem(
icon=encoding.icon,
name=payload,
description=encoding.__str__().capitalize() + '; Copy to clipboard.',
on_enter=CopyToClipboardAction(payload),
on_alt_enter=CopyToClipboardAction(payload),
)
class Encoding:
@abstractmethod
def base(self) -> int:
pass
@property
def icon(self) -> str:
return 'images/icon.png'
@abstractmethod
def __str__(self):
pass
@abstractmethod
def encode(self, value: int) -> str:
pass
def decode(self, value: str) -> int:
return int(value, self.base())
class Hexadecimal(Encoding):
def base(self) -> int:
return 16
@property
def icon(self) -> str:
return 'images/hex.png'
def __str__(self):
return "hexadecimal"
def encode(self, value: int) -> str:
return hex(value)[2:]
class Decimal(Encoding):
def base(self) -> int:
return 10
@property
def icon(self) -> str:
return 'images/dec.png'
def __str__(self):
return "decimal"
def encode(self, value: int) -> str:
return str(value)
class Binary(Encoding):
def base(self) -> int:
return 2
@property
def icon(self) -> str:
return 'images/bin.png'
def __str__(self):
return "binary"
def encode(self, value: int) -> str:
return bin(value)[2:]
class KeywordQueryEventListener(EventListener):
def on_event(self, event: events.KeywordQueryEvent, extension: Extension):
arg = event.get_argument() or ""
value = re.split(r"\s+", arg)[0]
kw = event.get_keyword()
if kw == extension.preferences["kw_hex"]:
num = Number.parse(value, Hexadecimal())
encodings = [Decimal(), Binary()]
elif kw == extension.preferences["kw_bin"]:
num = Number.parse(value, Binary())
encodings = [Decimal(), Hexadecimal()]
elif kw == extension.preferences["kw_dec"]:
num = Number.parse(value, Decimal())
encodings = [Hexadecimal(), Binary()]
else:
raise RuntimeError()
if isinstance(num, ExtensionResultItem):
items = [num]
else:
items = list(map(lambda enc: num.result_item(enc), encodings))
return RenderResultListAction(items)
if __name__ == '__main__':
DemoExtension().run()
| 28.188312 | 91 | 0.619443 | from __future__ import annotations
import re
from abc import abstractmethod, ABC
from enum import Enum
from typing import List, Optional, Literal, Tuple, Union
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
import ulauncher.api.shared.event as events
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.DoNothingAction import DoNothingAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
class DemoExtension(Extension):
def __init__(self):
super().__init__()
self.subscribe(events.KeywordQueryEvent, KeywordQueryEventListener())
class Number(ABC):
@classmethod
def parse(cls, payload: str, encoding: Encoding) -> Union[Number, ExtensionResultItem]:
if len(payload) == 0:
return ExtensionResultItem(
icon='images/icon.png',
name='No input',
description=f"Please input a {encoding} number",
on_enter=DoNothingAction(),
)
try:
value = encoding.decode(payload)
return Number(value)
except ValueError:
msg = "Failed to convert number"
description = f"Value {payload} is not a {encoding} number."
return ExtensionResultItem(
icon='images/icon.png',
name=msg,
description=description,
on_enter=DoNothingAction(),
on_alt_enter=DoNothingAction(),
)
def __init__(self, value: int):
self.value = value
def result_item(self, encoding: Encoding) -> ExtensionResultItem:
payload = encoding.encode(self.value)
return ExtensionResultItem(
icon=encoding.icon,
name=payload,
description=encoding.__str__().capitalize() + '; Copy to clipboard.',
on_enter=CopyToClipboardAction(payload),
on_alt_enter=CopyToClipboardAction(payload),
)
class Encoding:
@abstractmethod
def base(self) -> int:
pass
@property
def icon(self) -> str:
return 'images/icon.png'
@abstractmethod
def __str__(self):
pass
@abstractmethod
def encode(self, value: int) -> str:
pass
def decode(self, value: str) -> int:
return int(value, self.base())
class Hexadecimal(Encoding):
def base(self) -> int:
return 16
@property
def icon(self) -> str:
return 'images/hex.png'
def __str__(self):
return "hexadecimal"
def encode(self, value: int) -> str:
return hex(value)[2:]
class Decimal(Encoding):
def base(self) -> int:
return 10
@property
def icon(self) -> str:
return 'images/dec.png'
def __str__(self):
return "decimal"
def encode(self, value: int) -> str:
return str(value)
class Binary(Encoding):
def base(self) -> int:
return 2
@property
def icon(self) -> str:
return 'images/bin.png'
def __str__(self):
return "binary"
def encode(self, value: int) -> str:
return bin(value)[2:]
class KeywordQueryEventListener(EventListener):
def on_event(self, event: events.KeywordQueryEvent, extension: Extension):
arg = event.get_argument() or ""
value = re.split(r"\s+", arg)[0]
kw = event.get_keyword()
if kw == extension.preferences["kw_hex"]:
num = Number.parse(value, Hexadecimal())
encodings = [Decimal(), Binary()]
elif kw == extension.preferences["kw_bin"]:
num = Number.parse(value, Binary())
encodings = [Decimal(), Hexadecimal()]
elif kw == extension.preferences["kw_dec"]:
num = Number.parse(value, Decimal())
encodings = [Hexadecimal(), Binary()]
else:
raise RuntimeError()
if isinstance(num, ExtensionResultItem):
items = [num]
else:
items = list(map(lambda enc: num.result_item(enc), encodings))
return RenderResultListAction(items)
if __name__ == '__main__':
DemoExtension().run()
| true | true |
79003c56855aa81110d70841ff657542bea8dc30 | 3,095 | py | Python | madbg/communication.py | kmaork/madbg | 9f6097d510897ddf56eb9d87d3ac82b3a177344a | [
"MIT"
] | 48 | 2019-07-05T23:16:42.000Z | 2022-03-17T09:18:13.000Z | madbg/communication.py | kmaork/madbg | 9f6097d510897ddf56eb9d87d3ac82b3a177344a | [
"MIT"
] | 30 | 2020-07-07T13:48:00.000Z | 2022-03-24T09:19:39.000Z | madbg/communication.py | kmaork/madbg | 9f6097d510897ddf56eb9d87d3ac82b3a177344a | [
"MIT"
] | 2 | 2021-08-16T16:30:27.000Z | 2022-01-27T11:32:20.000Z | import pickle
import fcntl
import os
import struct
from collections import defaultdict
from functools import partial
from asyncio import new_event_loop
from io import BytesIO
from .utils import opposite_dict
MESSAGE_LENGTH_FMT = 'I'
def set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def blocking_read(fd, n):
io = BytesIO()
read_amount = 0
while read_amount < n:
data = os.read(fd, n - read_amount)
if not data:
raise IOError('FD closed before all bytes read')
read_amount += len(data)
io.write(data)
return io.getvalue()
class Piping:
def __init__(self, pipe_dict):
self.buffers = defaultdict(bytes)
self.loop = new_event_loop()
for src_fd, dest_fd in pipe_dict.items():
self.loop.add_reader(src_fd, partial(self._read, src_fd, dest_fd))
self.loop.add_writer(dest_fd, partial(self._write, dest_fd))
self.readers_to_writers = dict(pipe_dict)
self.writers_to_readers = opposite_dict(pipe_dict)
def _remove_writer(self, writer_fd):
self.loop.remove_writer(writer_fd)
for reader_fd in self.writers_to_readers.pop(writer_fd):
self.readers_to_writers.pop(reader_fd)
def _remove_reader(self, reader_fd):
# remove all writers that im the last to write to, remove all that write to me, if nothing left stop loop
self.loop.remove_reader(reader_fd)
writer_fd = self.readers_to_writers.pop(reader_fd)
writer_readers = self.writers_to_readers[writer_fd]
writer_readers.remove(reader_fd)
if not writer_fd:
self._remove_writer(writer_fd)
def _read(self, src_fd, dest_fd):
try:
data = os.read(src_fd, 1024)
except OSError:
data = ''
if data:
self.buffers[dest_fd] += data
else:
self._remove_reader(src_fd)
if src_fd in self.writers_to_readers:
self._remove_writer(src_fd)
if not self.readers_to_writers:
self.loop.stop()
def _write(self, dest_fd):
buffer = self.buffers[dest_fd]
if buffer:
self.buffers[dest_fd] = buffer[os.write(dest_fd, buffer):]
def run(self):
self.loop.run_forever()
# TODO: is this needed?
# for dest_fd, buffer in self.buffers.items():
# while buffer:
# buffer = buffer[os.write(dest_fd, buffer):]
def send_message(sock, obj):
message = pickle.dumps(obj)
message_len = struct.pack(MESSAGE_LENGTH_FMT, len(message))
sock.sendall(message_len)
sock.sendall(message)
def receive_message(sock):
len_len = struct.calcsize(MESSAGE_LENGTH_FMT)
len_bytes = blocking_read(sock, len_len)
message_len = struct.unpack(MESSAGE_LENGTH_FMT, len_bytes)[0]
message = blocking_read(sock, message_len)
return pickle.loads(message)
| 32.239583 | 114 | 0.636187 | import pickle
import fcntl
import os
import struct
from collections import defaultdict
from functools import partial
from asyncio import new_event_loop
from io import BytesIO
from .utils import opposite_dict
MESSAGE_LENGTH_FMT = 'I'
def set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def blocking_read(fd, n):
io = BytesIO()
read_amount = 0
while read_amount < n:
data = os.read(fd, n - read_amount)
if not data:
raise IOError('FD closed before all bytes read')
read_amount += len(data)
io.write(data)
return io.getvalue()
class Piping:
def __init__(self, pipe_dict):
self.buffers = defaultdict(bytes)
self.loop = new_event_loop()
for src_fd, dest_fd in pipe_dict.items():
self.loop.add_reader(src_fd, partial(self._read, src_fd, dest_fd))
self.loop.add_writer(dest_fd, partial(self._write, dest_fd))
self.readers_to_writers = dict(pipe_dict)
self.writers_to_readers = opposite_dict(pipe_dict)
def _remove_writer(self, writer_fd):
self.loop.remove_writer(writer_fd)
for reader_fd in self.writers_to_readers.pop(writer_fd):
self.readers_to_writers.pop(reader_fd)
def _remove_reader(self, reader_fd):
self.loop.remove_reader(reader_fd)
writer_fd = self.readers_to_writers.pop(reader_fd)
writer_readers = self.writers_to_readers[writer_fd]
writer_readers.remove(reader_fd)
if not writer_fd:
self._remove_writer(writer_fd)
def _read(self, src_fd, dest_fd):
try:
data = os.read(src_fd, 1024)
except OSError:
data = ''
if data:
self.buffers[dest_fd] += data
else:
self._remove_reader(src_fd)
if src_fd in self.writers_to_readers:
self._remove_writer(src_fd)
if not self.readers_to_writers:
self.loop.stop()
def _write(self, dest_fd):
buffer = self.buffers[dest_fd]
if buffer:
self.buffers[dest_fd] = buffer[os.write(dest_fd, buffer):]
def run(self):
self.loop.run_forever()
def send_message(sock, obj):
message = pickle.dumps(obj)
message_len = struct.pack(MESSAGE_LENGTH_FMT, len(message))
sock.sendall(message_len)
sock.sendall(message)
def receive_message(sock):
len_len = struct.calcsize(MESSAGE_LENGTH_FMT)
len_bytes = blocking_read(sock, len_len)
message_len = struct.unpack(MESSAGE_LENGTH_FMT, len_bytes)[0]
message = blocking_read(sock, message_len)
return pickle.loads(message)
| true | true |
79003c9e0a4b7a3d993d44eeb52364d2e0bb6459 | 5,112 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_service_association_links_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_service_association_links_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_service_association_links_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceAssociationLinksOperations(object):
"""ServiceAssociationLinksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceAssociationLinksListResult"
"""Gets a list of service association links for a subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceAssociationLinksListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ServiceAssociationLinksListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceAssociationLinksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ServiceAssociationLinks'} # type: ignore
| 46.899083 | 223 | 0.689358 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceAssociationLinksOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name,
virtual_network_name,
subnet_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ServiceAssociationLinks'}
| true | true |
79003cce3b2b638be71f36428fd99325eafccaf1 | 2,387 | py | Python | aiocypher/aioneo4j/graph.py | bbc/rd-cloudfit-python-aiocypher | eb6ce85ee1045ed715bbc4f2b5e033688f7fb5f2 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-11-09T20:48:18.000Z | 2021-11-12T07:45:39.000Z | aiocypher/aioneo4j/graph.py | bbc/rd-cloudfit-python-aiocypher | eb6ce85ee1045ed715bbc4f2b5e033688f7fb5f2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | aiocypher/aioneo4j/graph.py | bbc/rd-cloudfit-python-aiocypher | eb6ce85ee1045ed715bbc4f2b5e033688f7fb5f2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
#
# Copyright 2020-21 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..interface.graph import Graph as AbstractGraph
from typing import TypeVar, Callable, Awaitable, Set
import neo4j
R = TypeVar('R')
class Graph (AbstractGraph[neo4j.graph.Graph]):
"""A conceptual wrapper for a neo4j query which will return a neo4j.graph.Graph object.
To execute the query and return the underlying object await this object. But the
returned neo4j.graph.Graph is unlikely to be very useful outside of the context managers
in which it was created.
A better way to use this object is to use the 'nodes' coroutine property.
"""
def __init__(
self,
execute: Callable[[Callable[[neo4j.Transaction], neo4j.graph.Graph]], Awaitable[neo4j.graph.Graph]],
func: Callable[[neo4j.Transaction], neo4j.graph.Graph]
):
self._func = func
self._execute = execute
def __await__(self):
return self._execute(self._func).__await__()
@property
async def nodes(self) -> Set[neo4j.graph.Node]:
"""This property is a Coroutine, which is weird, but better matches the neo4j interface.
When awaited this property will execute the query and return you a Set[neo4j.graph.Node]
containing all of the nodes which the query matched.
"""
return await self._execute(lambda tx: set(self._func(tx).nodes))
@property
async def relationships(self) -> Set[neo4j.graph.Relationship]:
"""This property is a Coroutine, which is weird, but better matches the neo4j interface.
When awaited this property will execute the query and return you a Set[neo4j.graph.Relationship]
containing all of the relationships which the query matched.
"""
return await self._execute(lambda tx: set(self._func(tx).relationships))
| 36.723077 | 108 | 0.713448 |
from ..interface.graph import Graph as AbstractGraph
from typing import TypeVar, Callable, Awaitable, Set
import neo4j
R = TypeVar('R')
class Graph (AbstractGraph[neo4j.graph.Graph]):
def __init__(
self,
execute: Callable[[Callable[[neo4j.Transaction], neo4j.graph.Graph]], Awaitable[neo4j.graph.Graph]],
func: Callable[[neo4j.Transaction], neo4j.graph.Graph]
):
self._func = func
self._execute = execute
def __await__(self):
return self._execute(self._func).__await__()
@property
async def nodes(self) -> Set[neo4j.graph.Node]:
return await self._execute(lambda tx: set(self._func(tx).nodes))
@property
async def relationships(self) -> Set[neo4j.graph.Relationship]:
return await self._execute(lambda tx: set(self._func(tx).relationships))
| true | true |
79003d95109ca39991a0d00374edc5456102de45 | 2,304 | py | Python | bin/demo_get_PubMedArticle_by_pmid.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
] | 28 | 2019-09-09T08:12:31.000Z | 2021-12-17T00:09:14.000Z | bin/demo_get_PubMedArticle_by_pmid.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
] | 33 | 2019-11-07T05:36:04.000Z | 2022-01-29T01:14:57.000Z | bin/demo_get_PubMedArticle_by_pmid.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
] | 10 | 2019-09-09T10:04:05.000Z | 2021-06-08T16:00:14.000Z | from __future__ import print_function
import sys
from metapub import PubMedFetcher
from metapub import FindIt
# examples of different formats:
# 18612690: PubMedArticle with multiple AbstractText sections
# 1234567: PubMedArticle with no abstract whatsoever
# 20301546: PubMedBookArticle from GeneReviews
####
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("eutils").setLevel(logging.WARNING)
ch = logging.StreamHandler()
logging.getLogger("metapub").setLevel(logging.INFO)
logging.getLogger("metapub").addHandler(ch)
####
try:
pmid = sys.argv[1]
except IndexError:
print('Supply a pubmed ID as the argument to this script.')
print('')
print('Example: python demo_pubmed.py 123456')
sys.exit()
article = PubMedFetcher().article_by_pmid(pmid)
print('')
print(article.pmid, article.title)
print('')
print('authors: %s' % ','.join(article.authors))
print('journal: %s' % article.journal)
print('')
excerpt = '(empty)' if article.abstract is None else article.abstract[:100] + '[...]'
print('abstract: %s' % excerpt)
print('')
print('pii:',str(article.pii))
print('doi:',str(article.doi))
print('pmc:',str(article.pmc))
print('volume:',str(article.volume))
print('issue:',str(article.issue))
print('pages:',str(article.pages))
print('year:',str(article.year))
print('')
print('MeSH headings: ')
for DUI in list(article.mesh.keys()):
print('\t', DUI, article.mesh[DUI]['descriptor_name'], article.mesh.get('qualifier_name', ''))
if article.publication_types:
print('\nPublication Type Information')
for pt in list(article.publication_types.keys()):
print('\t', pt, article.publication_types[pt])
if article.chemicals:
print('\nChemical List')
for DUI in list(article.chemicals.keys()):
print('\t', DUI, article.chemicals[DUI]['substance_name'])
if article.grants:
print('\nGrant Information')
for gr in grants:
print('\t', gr)
if article.history:
print('\nArticle History')
for hist in article.history:
print('\t', hist, article.history[hist])
print('')
print('FindIt results:')
source = FindIt(pmid=pmid)
print('\tdoi:', source.doi)
print('\turl:', source.url)
print('\tbackup:', source.backup_url)
print('\treason:', source.reason)
print(article.citation_html)
| 27.428571 | 98 | 0.707031 | from __future__ import print_function
import sys
from metapub import PubMedFetcher
from metapub import FindIt
t logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("eutils").setLevel(logging.WARNING)
ch = logging.StreamHandler()
logging.getLogger("metapub").setLevel(logging.INFO)
logging.getLogger("metapub").addHandler(ch)
pmid = sys.argv[1]
except IndexError:
print('Supply a pubmed ID as the argument to this script.')
print('')
print('Example: python demo_pubmed.py 123456')
sys.exit()
article = PubMedFetcher().article_by_pmid(pmid)
print('')
print(article.pmid, article.title)
print('')
print('authors: %s' % ','.join(article.authors))
print('journal: %s' % article.journal)
print('')
excerpt = '(empty)' if article.abstract is None else article.abstract[:100] + '[...]'
print('abstract: %s' % excerpt)
print('')
print('pii:',str(article.pii))
print('doi:',str(article.doi))
print('pmc:',str(article.pmc))
print('volume:',str(article.volume))
print('issue:',str(article.issue))
print('pages:',str(article.pages))
print('year:',str(article.year))
print('')
print('MeSH headings: ')
for DUI in list(article.mesh.keys()):
print('\t', DUI, article.mesh[DUI]['descriptor_name'], article.mesh.get('qualifier_name', ''))
if article.publication_types:
print('\nPublication Type Information')
for pt in list(article.publication_types.keys()):
print('\t', pt, article.publication_types[pt])
if article.chemicals:
print('\nChemical List')
for DUI in list(article.chemicals.keys()):
print('\t', DUI, article.chemicals[DUI]['substance_name'])
if article.grants:
print('\nGrant Information')
for gr in grants:
print('\t', gr)
if article.history:
print('\nArticle History')
for hist in article.history:
print('\t', hist, article.history[hist])
print('')
print('FindIt results:')
source = FindIt(pmid=pmid)
print('\tdoi:', source.doi)
print('\turl:', source.url)
print('\tbackup:', source.backup_url)
print('\treason:', source.reason)
print(article.citation_html)
| true | true |
79003e1445380720ad3a6144288375f59533a79b | 13,825 | py | Python | third_party/augment_ops.py | harshita1000/crest | 122a40518ba8c4ecf27e7460104c176e01e960d3 | [
"Apache-2.0"
] | 50 | 2021-06-10T21:25:16.000Z | 2022-03-30T03:37:53.000Z | third_party/augment_ops.py | kihyuks/crest | 64918b85d31e7939fce874431b6059c0c9cca7b7 | [
"Apache-2.0"
] | 5 | 2021-07-22T13:01:32.000Z | 2021-11-29T13:30:20.000Z | third_party/augment_ops.py | kihyuks/crest | 64918b85d31e7939fce874431b6059c0c9cca7b7 | [
"Apache-2.0"
] | 9 | 2021-06-10T22:44:39.000Z | 2022-03-22T14:55:33.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various ops for augmentation."""
import math
import tensorflow as tf
from tensorflow_addons import image as tfa_image
# Default replace value
REPLACE_VALUE = 128
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
A value of factor 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor.
image2: An image Tensor.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
"""
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8)
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1)
replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1],
image.dtype)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0],
[image_shape[0], image_shape[1], image_shape[2] - 1])
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
threshold = tf.saturate_cast(threshold, image.dtype)
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128
threshold = tf.saturate_cast(threshold, image.dtype)
added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32)
added_im = tf.saturate_cast(added_im, tf.uint8)
return tf.where(image < threshold, added_im, image)
def invert(image):
"""Inverts the image pixels."""
return 255 - tf.convert_to_tensor(image)
def invert_blend(image, factor):
"""Implements blend of invert with original image."""
return blend(invert(image), image, factor)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
grayscale_im = tf.image.rgb_to_grayscale(image)
mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))
mean = tf.saturate_cast(mean + 0.5, tf.uint8)
degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean
degenerate = tf.image.grayscale_to_rgb(degenerate)
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = tf.cast(8 - bits, image.dtype)
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees):
"""Equivalent of PIL Rotation."""
# Convert from degrees to radians
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = tfa_image.transform_ops.rotate(wrap(image), radians)
return unwrap(image)
def translate_x(image, pixels):
"""Equivalent of PIL Translate in X dimension."""
image = tfa_image.translate_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image)
def translate_y(image, pixels):
"""Equivalent of PIL Translate in Y dimension."""
image = tfa_image.translate_ops.translate(wrap(image), [0, -pixels])
return unwrap(image)
def shear_x(image, level):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1]
image = tfa_image.transform_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image)
def shear_y(image, level):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1]
image = tfa_image.transform_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops."""
def scale_channel(channel):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(channel), tf.float32)
hi = tf.cast(tf.reduce_max(channel), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
return tf.saturate_cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def autocontrast_blend(image, factor):
"""Implements blend of autocontrast with original image."""
return blend(autocontrast(image), image, factor)
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_im = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im)
# Blend the final result
return blend(result, orig_im, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0), lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def equalize_blend(image, factor):
"""Implements blend of equalize with original image."""
return blend(equalize(image), image, factor)
def _convolve_image_with_kernel(image, kernel):
num_channels = tf.shape(image)[-1]
kernel = tf.tile(kernel, [1, 1, num_channels, 1])
image = tf.expand_dims(image, axis=0)
convolved_im = tf.nn.depthwise_conv2d(
tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME')
# adding 0.5 for future rounding, same as in PIL:
# https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long
convolved_im = convolved_im + 0.5
return tf.squeeze(convolved_im, axis=0)
def blur(image, factor):
"""Blur with the same kernel as ImageFilter.BLUR."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class BLUR(BuiltinFilter):
# name = "Blur"
# # fmt: off
# filterargs = (5, 5), 16, 0, (
# 1, 1, 1, 1, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 1, 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
blur_kernel = tf.constant(
[[1., 1., 1., 1., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.],
[1., 0., 0., 0., 1.], [1., 1., 1., 1., 1.]],
dtype=tf.float32,
shape=[5, 5, 1, 1]) / 16.0
blurred_im = _convolve_image_with_kernel(image, blur_kernel)
return blend(image, blurred_im, factor)
def smooth(image, factor):
"""Smooth with the same kernel as ImageFilter.SMOOTH."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class SMOOTH(BuiltinFilter):
# name = "Smooth"
# # fmt: off
# filterargs = (3, 3), 13, 0, (
# 1, 1, 1,
# 1, 5, 1,
# 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
smooth_kernel = tf.constant([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.0
smoothed_im = _convolve_image_with_kernel(image, smooth_kernel)
return blend(image, smoothed_im, factor)
def rescale(image, level):
"""Rescales image and enlarged cornet."""
# See tf.image.ResizeMethod for full list
size = image.shape[:2]
scale = level * 0.25
scale_height = tf.cast(scale * size[0], tf.int32)
scale_width = tf.cast(scale * size[1], tf.int32)
cropped_image = tf.image.crop_to_bounding_box(
image,
offset_height=scale_height,
offset_width=scale_width,
target_height=size[0] - scale_height,
target_width=size[1] - scale_width)
rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC)
return tf.saturate_cast(rescaled, tf.uint8)
NAME_TO_FUNC = {
'Identity': tf.identity,
'AutoContrast': autocontrast,
'AutoContrastBlend': autocontrast_blend,
'Equalize': equalize,
'EqualizeBlend': equalize_blend,
'Invert': invert,
'InvertBlend': invert_blend,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Blur': blur,
'Smooth': smooth,
'Rescale': rescale,
}
| 33.474576 | 151 | 0.670524 |
import math
import tensorflow as tf
from tensorflow_addons import image as tfa_image
REPLACE_VALUE = 128
def blend(image1, image2, factor):
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8)
def wrap(image):
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image):
image_shape = tf.shape(image)
flattened_image = tf.reshape(image, [-1, image_shape[2]])
alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1)
replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1],
image.dtype)
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0],
[image_shape[0], image_shape[1], image_shape[2] - 1])
return image
def solarize(image, threshold=128):
threshold = tf.saturate_cast(threshold, image.dtype)
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
threshold = tf.saturate_cast(threshold, image.dtype)
added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32)
added_im = tf.saturate_cast(added_im, tf.uint8)
return tf.where(image < threshold, added_im, image)
def invert(image):
return 255 - tf.convert_to_tensor(image)
def invert_blend(image, factor):
return blend(invert(image), image, factor)
def color(image, factor):
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
grayscale_im = tf.image.rgb_to_grayscale(image)
mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))
mean = tf.saturate_cast(mean + 0.5, tf.uint8)
degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean
degenerate = tf.image.grayscale_to_rgb(degenerate)
return blend(degenerate, image, factor)
def brightness(image, factor):
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
shift = tf.cast(8 - bits, image.dtype)
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees):
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# of the function.
image = tfa_image.transform_ops.rotate(wrap(image), radians)
return unwrap(image)
def translate_x(image, pixels):
image = tfa_image.translate_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image)
def translate_y(image, pixels):
image = tfa_image.translate_ops.translate(wrap(image), [0, -pixels])
return unwrap(image)
def shear_x(image, level):
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1]
image = tfa_image.transform_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image)
def shear_y(image, level):
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1]
image = tfa_image.transform_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image)
def autocontrast(image):
def scale_channel(channel):
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(channel), tf.float32)
hi = tf.cast(tf.reduce_max(channel), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
return tf.saturate_cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def autocontrast_blend(image, factor):
return blend(autocontrast(image), image, factor)
def sharpness(image, factor):
orig_im = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im)
# Blend the final result
return blend(result, orig_im, factor)
def equalize(image):
def scale_channel(im, c):
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0), lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def equalize_blend(image, factor):
return blend(equalize(image), image, factor)
def _convolve_image_with_kernel(image, kernel):
num_channels = tf.shape(image)[-1]
kernel = tf.tile(kernel, [1, 1, num_channels, 1])
image = tf.expand_dims(image, axis=0)
convolved_im = tf.nn.depthwise_conv2d(
tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME')
# adding 0.5 for future rounding, same as in PIL:
# https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long
convolved_im = convolved_im + 0.5
return tf.squeeze(convolved_im, axis=0)
def blur(image, factor):
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class BLUR(BuiltinFilter):
# name = "Blur"
# # fmt: off
# filterargs = (5, 5), 16, 0, (
# 1, 1, 1, 1, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 1, 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
blur_kernel = tf.constant(
[[1., 1., 1., 1., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.],
[1., 0., 0., 0., 1.], [1., 1., 1., 1., 1.]],
dtype=tf.float32,
shape=[5, 5, 1, 1]) / 16.0
blurred_im = _convolve_image_with_kernel(image, blur_kernel)
return blend(image, blurred_im, factor)
def smooth(image, factor):
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class SMOOTH(BuiltinFilter):
# name = "Smooth"
# # fmt: off
# filterargs = (3, 3), 13, 0, (
# 1, 1, 1,
# 1, 5, 1,
# 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
smooth_kernel = tf.constant([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.0
smoothed_im = _convolve_image_with_kernel(image, smooth_kernel)
return blend(image, smoothed_im, factor)
def rescale(image, level):
# See tf.image.ResizeMethod for full list
size = image.shape[:2]
scale = level * 0.25
scale_height = tf.cast(scale * size[0], tf.int32)
scale_width = tf.cast(scale * size[1], tf.int32)
cropped_image = tf.image.crop_to_bounding_box(
image,
offset_height=scale_height,
offset_width=scale_width,
target_height=size[0] - scale_height,
target_width=size[1] - scale_width)
rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC)
return tf.saturate_cast(rescaled, tf.uint8)
NAME_TO_FUNC = {
'Identity': tf.identity,
'AutoContrast': autocontrast,
'AutoContrastBlend': autocontrast_blend,
'Equalize': equalize,
'EqualizeBlend': equalize_blend,
'Invert': invert,
'InvertBlend': invert_blend,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Blur': blur,
'Smooth': smooth,
'Rescale': rescale,
}
| true | true |
79003f65149f9cd97988838b7910e8a70bf1d084 | 42,076 | py | Python | Apps/phgsgmail/gsgmail_process_email.py | chunmanjimmyf/phantom-apps | 204d77ac1c6917ad7b363f5e8930e60e8e9aa8d2 | [
"Apache-2.0"
] | null | null | null | Apps/phgsgmail/gsgmail_process_email.py | chunmanjimmyf/phantom-apps | 204d77ac1c6917ad7b363f5e8930e60e8e9aa8d2 | [
"Apache-2.0"
] | null | null | null | Apps/phgsgmail/gsgmail_process_email.py | chunmanjimmyf/phantom-apps | 204d77ac1c6917ad7b363f5e8930e60e8e9aa8d2 | [
"Apache-2.0"
] | null | null | null | # File: gsgmail_process_email.py
# Copyright (c) 2017-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
import email
import tempfile
from collections import OrderedDict
import os
import re
from bs4 import BeautifulSoup, UnicodeDammit
import phantom.app as phantom
import phantom.utils as ph_utils
import mimetypes
import socket
from email.header import decode_header, make_header
import shutil
import hashlib
import json
import magic
import random
import string
import phantom.rules as phantom_rules
from gsgmail_consts import *
import sys
from requests.structures import CaseInsensitiveDict
_container_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
_artifact_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
FILE_EXTENSIONS = {
'.vmsn': ['os memory dump', 'vm snapshot file'],
'.vmss': ['os memory dump', 'vm suspend file'],
'.js': ['javascript'],
'.doc': ['doc'],
'.docx': ['doc'],
'.xls': ['xls'],
'.xlsx': ['xls'],
}
MAGIC_FORMATS = [
(re.compile('^PE.* Windows'), ['pe file', 'hash']),
(re.compile('^MS-DOS executable'), ['pe file', 'hash']),
(re.compile('^PDF '), ['pdf']),
(re.compile('^MDMP crash'), ['process dump']),
(re.compile('^Macromedia Flash'), ['flash']),
]
EWS_DEFAULT_ARTIFACT_COUNT = 100
EWS_DEFAULT_CONTAINER_COUNT = 100
HASH_FIXED_PHANTOM_VERSION = "2.0.201"
OFFICE365_APP_ID = "a73f6d32-c9d5-4fec-b024-43876700daa6"
EXCHANGE_ONPREM_APP_ID = "badc5252-4a82-4a6d-bc53-d1e503857124"
IMAP_APP_ID = "9f2e9f72-b0e5-45d6-92a7-09ef820476c1"
uri_regexc = re.compile(URI_REGEX)
email_regexc = re.compile(EMAIL_REGEX, re.IGNORECASE)
email_regexc2 = re.compile(EMAIL_REGEX2, re.IGNORECASE)
hash_regexc = re.compile(HASH_REGEX)
ip_regexc = re.compile(IP_REGEX)
ipv6_regexc = re.compile(IPV6_REGEX)
class ProcessMail:
def __init__(self, base_connector, config):
self._base_connector = base_connector
self._config = config
self._email_id_contains = list()
self._container = dict()
self._artifacts = list()
self._attachments = list()
self._python_version = None
try:
self._python_version = int(sys.version_info[0])
except Exception:
raise Exception("Error occurred while getting the Phantom server's Python major version.")
def _get_file_contains(self, file_path):
contains = []
ext = os.path.splitext(file_path)[1]
contains.extend(FILE_EXTENSIONS.get(ext, []))
magic_str = magic.from_file(file_path)
for regex, cur_contains in MAGIC_FORMATS:
if regex.match(magic_str):
contains.extend(cur_contains)
return contains
def _is_ip(self, input_ip):
if ph_utils.is_ip(input_ip):
return True
if self.is_ipv6(input_ip):
return True
return False
def is_ipv6(self, input_ip):
try:
socket.inet_pton(socket.AF_INET6, input_ip)
except Exception:
return False
return True
def _clean_url(self, url):
url = url.strip('>),.]\r\n')
# Check before splicing, find returns -1 if not found
# _and_ you will end up splicing on -1 (incorrectly)
if '<' in url:
url = url[:url.find('<')]
elif '>' in url:
url = url[:url.find('>')]
return url
def _extract_urls_domains(self, file_data, urls, domains):
if not self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS] and not self._config[PROC_EMAIL_JSON_EXTRACT_URLS]:
return
# try to load the email
try:
soup = BeautifulSoup(file_data, "html.parser")
except Exception as e:
self._base_connector.debug_print(e)
return
uris = []
# get all tags that have hrefs
links = soup.find_all(href=True)
if links:
# it's html, so get all the urls
uris = [x['href'] for x in links if (not x['href'].startswith('mailto:'))]
# work on the text part of the link, they might be http links different from the href
# and were either missed by the uri_regexc while parsing text or there was no text counterpart
# in the email
uri_text = [self._clean_url(x.get_text()) for x in links]
if uri_text:
uri_text = [x for x in uri_text if x.startswith('http')]
if uri_text:
uris.extend(uri_text)
else:
# Parse it as a text file
uris = re.findall(uri_regexc, file_data)
if uris:
uris = [self._clean_url(x) for x in uris]
if self._config[PROC_EMAIL_JSON_EXTRACT_URLS]:
# add the uris to the urls
urls |= set(uris)
if self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
for uri in uris:
domain = phantom.get_host_from_url(uri)
if domain and not self._is_ip(domain):
domains.add(domain)
# work on any mailto urls if present
if links:
mailtos = [x['href'] for x in links if (x['href'].startswith('mailto:'))]
for curr_email in mailtos:
domain = curr_email[curr_email.find('@') + 1:]
if domain and not self._is_ip(domain):
domains.add(domain)
return
def _get_ips(self, file_data, ips):
# First extract what looks like an IP from the file, this is a faster operation
ips_in_mail = re.findall(ip_regexc, file_data)
ip6_in_mail = re.findall(ipv6_regexc, file_data)
if ip6_in_mail:
for ip6_tuple in ip6_in_mail:
ip6s = [x for x in ip6_tuple if x]
ips_in_mail.extend(ip6s)
# Now validate them
if ips_in_mail:
ips_in_mail = set(ips_in_mail)
ips_in_mail = [x for x in ips_in_mail if self._is_ip(x)]
if ips_in_mail:
ips |= set(ips_in_mail)
def _handle_body(self, body, parsed_mail, email_id):
local_file_path = body['file_path']
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
file_data = None
try:
with open(local_file_path, 'r') as f:
file_data = f.read()
except Exception:
with open(local_file_path, 'rb') as f:
file_data = f.read()
self._base_connector.debug_print("Reading file data using binary mode")
if (file_data is None) or (len(file_data) == 0):
return phantom.APP_ERROR
file_data = UnicodeDammit(file_data).unicode_markup.encode('utf-8').decode('utf-8')
self._parse_email_headers_as_inline(file_data, parsed_mail, email_id)
if self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
emails = []
emails.extend(re.findall(email_regexc, file_data))
emails.extend(re.findall(email_regexc2, file_data))
for curr_email in emails:
domain = curr_email[curr_email.rfind('@') + 1:]
if domain and (not ph_utils.is_ip(domain)):
domains.add(domain)
self._extract_urls_domains(file_data, urls, domains)
if self._config[PROC_EMAIL_JSON_EXTRACT_IPS]:
self._get_ips(file_data, ips)
if self._config[PROC_EMAIL_JSON_EXTRACT_HASHES]:
hashs_in_mail = re.findall(hash_regexc, file_data)
if hashs_in_mail:
hashes |= set(hashs_in_mail)
return phantom.APP_SUCCESS
def _add_artifacts(self, cef_key, input_set, artifact_name, start_index, artifacts):
added_artifacts = 0
for entry in input_set:
# ignore empty entries
if not entry:
continue
artifact = {}
artifact.update(_artifact_common)
artifact['source_data_identifier'] = start_index + added_artifacts
artifact['cef'] = {cef_key: entry}
artifact['name'] = artifact_name
self._base_connector.debug_print('Artifact:', artifact)
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _parse_email_headers_as_inline(self, file_data, parsed_mail, email_id):
# remove the 'Forwarded Message' from the email text and parse it
p = re.compile(r'(?<=\r\n).*Forwarded Message.*\r\n', re.IGNORECASE)
email_text = p.sub('', file_data.strip())
mail = email.message_from_string(email_text)
self._parse_email_headers(parsed_mail, mail, add_email_id=email_id)
return phantom.APP_SUCCESS
def _add_email_header_artifacts(self, email_header_artifacts, start_index, artifacts):
added_artifacts = 0
for artifact in email_header_artifacts:
artifact['source_data_identifier'] = start_index + added_artifacts
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _create_artifacts(self, parsed_mail):
# get all the artifact data in their own list objects
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
# set the default artifact dict
artifact_id = 0
# add artifacts
added_artifacts = self._add_artifacts('sourceAddress', ips, 'IP Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('fileHash', hashes, 'Hash Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('requestURL', urls, 'URL Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('destinationDnsDomain', domains, 'Domain Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_email_header_artifacts(email_headers, artifact_id, self._artifacts)
artifact_id += added_artifacts
return phantom.APP_SUCCESS
def _decode_uni_string(self, input_str, def_name):
# try to find all the decoded strings, we could have multiple decoded strings
# or a single decoded string between two normal strings separated by \r\n
# YEAH...it could get that messy
encoded_strings = re.findall(r'=\?.*?\?=', input_str, re.I)
# return input_str as is, no need to do any conversion
if not encoded_strings:
return input_str
# get the decoded strings
try:
decoded_strings = [decode_header(x)[0] for x in encoded_strings]
decoded_strings = [{'value': x[0], 'encoding': x[1]} for x in decoded_strings]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
self._base_connector.debug_print("Decoding: {0}. Error code: {1}. Error message: {2}".format(encoded_strings, error_code, error_msg))
return def_name
# convert to dict for safe access, if it's an empty list, the dict will be empty
decoded_strings = dict(enumerate(decoded_strings))
new_str = ''
new_str_create_count = 0
for i, encoded_string in enumerate(encoded_strings):
decoded_string = decoded_strings.get(i)
if not decoded_string:
# nothing to replace with
continue
value = decoded_string.get('value')
encoding = decoded_string.get('encoding')
if not encoding or not value:
# nothing to replace with
continue
try:
if encoding != 'utf-8':
value = str(value, encoding)
except Exception:
pass
try:
# commenting the existing approach due to a new approach being deployed below
# substitute the encoded string with the decoded one
# input_str = input_str.replace(encoded_string, value)
# make new string insted of replacing in the input string because issue find in PAPP-9531
if value:
new_str += UnicodeDammit(value).unicode_markup
new_str_create_count += 1
except Exception:
pass
# replace input string with new string because issue find in PAPP-9531
if new_str and new_str_create_count == len(encoded_strings):
self._base_connector.debug_print("Creating a new string entirely from the encoded_strings and assiging into input_str")
input_str = new_str
return input_str
def _get_container_name(self, parsed_mail, email_id):
# Create the default name
def_cont_name = "Email ID: {0}".format(email_id)
# get the subject from the parsed mail
subject = parsed_mail.get(PROC_EMAIL_JSON_SUBJECT)
# if no subject then return the default
if not subject:
return def_cont_name
try:
return str(make_header(decode_header(subject)))
except Exception:
return self._decode_uni_string(subject, def_cont_name)
def _handle_if_body(self, content_disp, content_type, part, bodies, file_path, parsed_mail):
process_as_body = False
# if content disposition is None then assume that it is
if content_disp is None:
process_as_body = True
# if content disposition is inline
elif content_disp.lower().strip() == 'inline':
if ('text/html' in content_type) or ('text/plain' in content_type):
process_as_body = True
if not process_as_body:
return phantom.APP_SUCCESS, True
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS, False
charset = part.get_content_charset()
with open(file_path, 'wb') as f: # noqa
f.write(part_payload)
bodies.append({'file_path': file_path, 'charset': part.get_content_charset()})
self._add_body_in_email_headers(parsed_mail, file_path, charset, content_type)
return phantom.APP_SUCCESS, False
def _handle_part(self, part, part_index, tmp_dir, extract_attach, parsed_mail):
bodies = parsed_mail[PROC_EMAIL_JSON_BODIES]
files = parsed_mail[PROC_EMAIL_JSON_FILES]
# get the file_name
file_name = part.get_filename()
content_disp = part.get('Content-Disposition')
content_type = part.get('Content-Type')
content_id = part.get('Content-ID')
if file_name is None:
# init name and extension to default values
name = "part_{0}".format(part_index)
extension = ".{0}".format(part_index)
# Try to create an extension from the content type if possible
if content_type is not None:
extension = mimetypes.guess_extension(re.sub(';.*', '', content_type))
# Try to create a name from the content id if possible
if content_id is not None:
name = content_id
file_name = "{0}{1}".format(name, extension)
else:
try:
file_name = str(make_header(decode_header(file_name)))
except Exception:
file_name = self._decode_uni_string(file_name, file_name)
# Remove any chars that we don't want in the name
file_path = "{0}/{1}_{2}".format(tmp_dir, part_index,
file_name.translate(str.maketrans("", "", ''.join(['<', '>', ' ']))))
self._base_connector.debug_print("file_path: {0}".format(file_path))
# is the part representing the body of the email
status, process_further = self._handle_if_body(content_disp, content_type, part, bodies, file_path, parsed_mail)
if not process_further:
return phantom.APP_SUCCESS
# is this another email as an attachment
if (content_type is not None) and (content_type.find(PROC_EMAIL_CONTENT_TYPE_MESSAGE) != -1):
return phantom.APP_SUCCESS
# This is an attachment, first check if it is another email or not
if extract_attach:
_, file_extension = os.path.splitext(file_name)
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS
try:
with open(file_path, 'wb') as f: # noqa
f.write(part_payload)
files.append({'file_name': file_name, 'file_path': file_path})
except IOError as e:
error_msg = str(e)
if "File name too long" in error_msg:
self.write_with_new_filename(tmp_dir, part_payload, file_extension, files, as_byte=False)
else:
self._base_connector.debug_print('Failed to write file: {}'.format(e))
return phantom.APP_SUCCESS
def _get_file_name(self, input_str):
try:
return str(make_header(decode_header(input_str)))
except Exception:
return self._decode_uni_string(input_str, input_str)
def _parse_email_headers(self, parsed_mail, part, charset=None, add_email_id=None):
email_header_artifacts = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
email_headers = part.items()
if not email_headers:
return 0
# Parse email keys first
headers = self._get_email_headers_from_part(part, charset)
cef_artifact = {}
cef_types = {}
if headers.get('From'):
emails = headers['From']
if emails:
cef_artifact.update({'fromEmail': emails})
if headers.get('To'):
emails = headers['To']
if emails:
cef_artifact.update({'toEmail': emails})
message_id = headers.get('Message-ID')
# if the header did not contain any email addresses and message ID then ignore this artifact
if not cef_artifact and not message_id:
return 0
cef_types.update({'fromEmail': ['email'], 'toEmail': ['email']})
if headers:
cef_artifact['emailHeaders'] = headers
# Adding the email id as a cef artifact crashes the UI when trying to show the action dialog box
# so not adding this right now. All the other code to process the emailId is there, but the refraining
# from adding the emailId
# add_email_id = False
if add_email_id:
cef_artifact['emailId'] = add_email_id
if self._email_id_contains:
cef_types.update({'emailId': self._email_id_contains})
artifact = {}
artifact.update(_artifact_common)
artifact['name'] = 'Email Artifact'
artifact['cef'] = cef_artifact
artifact['cef_types'] = cef_types
email_header_artifacts.append(artifact)
return len(email_header_artifacts)
def _get_email_headers_from_part(self, part, charset=None):
email_headers = list(part.items())
# TODO: the next 2 ifs can be condensed to use 'or'
if charset is None:
charset = part.get_content_charset()
if charset is None:
charset = 'utf8'
if not email_headers:
return {}
# Convert the header tuple into a dictionary
headers = CaseInsensitiveDict()
try:
[headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while converting the header tuple into a dictionary"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
# Handle received separately
try:
received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received']
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while handling the received header tuple separately"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
if received_headers:
headers['Received'] = received_headers
# handle the subject string, if required add a new key
subject = headers.get('Subject')
if subject:
try:
headers['decodedSubject'] = str(make_header(decode_header(subject)))
except Exception:
headers['decodedSubject'] = self._decode_uni_string(subject, subject)
return dict(headers)
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = "Error code unavailable"
error_msg = e.args[0]
else:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
except Exception:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
return error_code, error_msg
def _handle_mail_object(self, mail, email_id, rfc822_email, tmp_dir, start_time_epoch):
parsed_mail = OrderedDict()
# Create a tmp directory for this email, will extract all files here
tmp_dir = tmp_dir
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
extract_attach = self._config[PROC_EMAIL_JSON_EXTRACT_ATTACHMENTS]
charset = mail.get_content_charset()
if charset is None:
charset = 'utf-8'
# Extract fields and place it in a dictionary
parsed_mail[PROC_EMAIL_JSON_SUBJECT] = mail.get('Subject', '')
parsed_mail[PROC_EMAIL_JSON_FROM] = mail.get('From', '')
parsed_mail[PROC_EMAIL_JSON_TO] = mail.get('To', '')
parsed_mail[PROC_EMAIL_JSON_DATE] = mail.get('Date', '')
parsed_mail[PROC_EMAIL_JSON_MSG_ID] = mail.get('Message-ID', '')
parsed_mail[PROC_EMAIL_JSON_FILES] = files = []
parsed_mail[PROC_EMAIL_JSON_BODIES] = bodies = []
parsed_mail[PROC_EMAIL_JSON_START_TIME] = start_time_epoch
parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS] = []
# parse the parts of the email
if mail.is_multipart():
for i, part in enumerate(mail.walk()):
add_email_id = None
if i == 0:
add_email_id = email_id
self._parse_email_headers(parsed_mail, part, add_email_id=add_email_id)
self._base_connector.debug_print("part: {0}".format(part.__dict__))
self._base_connector.debug_print("part type", type(part))
if part.is_multipart():
self.check_and_update_eml(part)
continue
try:
ret_val = self._handle_part(part, i, tmp_dir, extract_attach, parsed_mail)
except Exception as e:
self._base_connector.debug_print("ErrorExp in _handle_part # {0}".format(i), e)
continue
if phantom.is_fail(ret_val):
continue
else:
self._parse_email_headers(parsed_mail, mail, add_email_id=email_id)
# parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS].append(mail.items())
file_path = "{0}/part_1.text".format(tmp_dir)
with open(file_path, 'wb') as f: # noqa
f.write(mail.get_payload(decode=True))
bodies.append({'file_path': file_path, 'charset': charset})
self._add_body_in_email_headers(parsed_mail, file_path, mail.get_content_charset(), 'text/plain')
# get the container name
container_name = self._get_container_name(parsed_mail, email_id)
if container_name is None:
return phantom.APP_ERROR
# Add the container
# first save the container, to do that copy things from parsed_mail to a new object
container = {}
container_data = dict(parsed_mail)
# delete the header info, we dont make it a part of the container json
del (container_data[PROC_EMAIL_JSON_EMAIL_HEADERS])
container.update(_container_common)
self._container['source_data_identifier'] = email_id
self._container['name'] = container_name
self._container['data'] = {'raw_email': rfc822_email}
# Create the sets before handling the bodies If both the bodies add the same ip
# only one artifact should be created
parsed_mail[PROC_EMAIL_JSON_IPS] = set()
parsed_mail[PROC_EMAIL_JSON_HASHES] = set()
parsed_mail[PROC_EMAIL_JSON_URLS] = set()
parsed_mail[PROC_EMAIL_JSON_DOMAINS] = set()
# For bodies
for i, body in enumerate(bodies):
if not body:
continue
try:
self._handle_body(body, parsed_mail, email_id)
except Exception as e:
self._base_connector.debug_print_debug_print("ErrorExp in _handle_body # {0}: {1}".format(i, str(e)))
continue
# Files
self._attachments.extend(files)
self._create_artifacts(parsed_mail)
return phantom.APP_SUCCESS
def _add_body_in_email_headers(self, parsed_mail, file_path, charset, content_type):
# Add email_bodies to email_headers
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
try:
with open(file_path, 'r') as f:
body_content = f.read()
except Exception:
with open(file_path, 'rb') as f:
body_content = f.read()
self._base_connector.debug_print("Reading file data using binary mode")
# Add body to the last added Email artifact
body_content = UnicodeDammit(body_content).unicode_markup.encode('utf-8').decode('utf-8')
if 'text/plain' in content_type:
try:
email_headers[-1]['cef']['bodyText'] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyText'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyText'] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing text/plain body content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
elif 'text/html' in content_type:
try:
email_headers[-1]['cef']['bodyHtml'] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyHtml'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyHtml'] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing text/html body content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
else:
if not email_headers[-1]['cef'].get('bodyOther'):
email_headers[-1]['cef']['bodyOther'] = {}
try:
email_headers[-1]['cef']['bodyOther'][content_type] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyOther'][content_type] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyOther'][content_type] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing bodyOther content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
def _get_string(self, input_str, charset):
try:
if input_str:
if self._python_version == 2:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset)
else:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset).decode(charset)
except Exception:
try:
input_str = str(make_header(decode_header(input_str)))
except Exception:
input_str = self._decode_uni_string(input_str, input_str)
self._base_connector.debug_print(
"Error occurred while converting to string with specific encoding {}".format(input_str))
return input_str
def _set_email_id_contains(self, email_id):
if not self._base_connector:
return
try:
email_id = self._get_string(email_id, 'utf-8')
except Exception:
email_id = str(email_id)
if self._base_connector.get_app_id() == EXCHANGE_ONPREM_APP_ID and email_id.endswith('='):
self._email_id_contains = ["exchange email id"]
elif self._base_connector.get_app_id() == OFFICE365_APP_ID and email_id.endswith('='):
self._email_id_contains = ["office 365 email id"]
elif self._base_connector.get_app_id() == IMAP_APP_ID and email_id.isdigit():
self._email_id_contains = ["imap email id"]
elif ph_utils.is_sha1(email_id):
self._email_id_contains = ["vault id"]
return
def _int_process_email(self, rfc822_email, email_id, start_time_epoch):
mail = email.message_from_string(rfc822_email)
tmp_dir = tempfile.mkdtemp(prefix='ph_email')
try:
ret_val = self._handle_mail_object(mail, email_id, rfc822_email, tmp_dir, start_time_epoch)
except Exception as e:
message = "ErrorExp in _handle_mail_object: {0}".format(e)
self._base_connector.debug_print(message)
return phantom.APP_ERROR, message, []
results = [{'container': self._container, 'artifacts': self._artifacts, 'files': self._attachments, 'temp_directory': tmp_dir}]
return ret_val, PROC_EMAIL_PARSED, results
def check_and_update_eml(self, part):
if self._config[PROC_EMAIL_JSON_EXTRACT_EMAIL_ATTACHMENTS]:
tmp_dir = None
msg = None
file_extension = ''
try:
tmp_dir = tempfile.mkdtemp(prefix='ph_email')
filename = self._get_file_name(part.get_filename())
_, file_extension = os.path.splitext(filename)
if filename.endswith('.eml'):
file_path = os.path.join(tmp_dir, filename)
msg = part.get_payload()[0]
with open(file_path, 'wb') as f: # noqa
f.write(msg.as_bytes())
self._attachments.append({'file_name': filename, 'file_path': file_path})
except IOError as e:
error_msg = str(e)
if "File name too long" in error_msg:
self.write_with_new_filename(tmp_dir, msg, file_extension, self._attachments, as_byte=True)
else:
self._base_connector.debug_print('Failed to write file: {}'.format(e))
except Exception as e:
self._base_connector.debug_print("Exception occurred: {}".format(e))
def write_with_new_filename(self, tmp_dir, data, file_extension, dict_to_fill, as_byte=False):
try:
random_suffix = '_' + ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(16))
new_file_name = "ph_long_file_name_{0}{1}".format(random_suffix, file_extension)
file_path = os.path.join(tmp_dir, new_file_name)
with open(file_path, 'wb') as f:
if as_byte:
f.write(data.as_bytes())
else:
f.write(data)
dict_to_fill.append({'file_name': new_file_name, 'file_path': file_path})
except Exception as e:
self._base_connector.debug_print('Exception while writing file: {}'.format(e))
def process_email(self, rfc822_email, email_id, epoch):
try:
self._set_email_id_contains(email_id)
except Exception:
pass
ret_val, message, results = self._int_process_email(rfc822_email, email_id, epoch)
if not ret_val:
return phantom.APP_ERROR, message
self._parse_results(results)
return phantom.APP_SUCCESS, PROC_EMAIL_PROCESSED
def _parse_results(self, results):
param = self._base_connector.get_current_param()
container_count = EWS_DEFAULT_CONTAINER_COUNT
artifact_count = EWS_DEFAULT_ARTIFACT_COUNT
if param:
container_count = param.get(phantom.APP_JSON_CONTAINER_COUNT, EWS_DEFAULT_CONTAINER_COUNT)
artifact_count = param.get(phantom.APP_JSON_ARTIFACT_COUNT, EWS_DEFAULT_ARTIFACT_COUNT)
results = results[:container_count]
for result in results:
container = result.get('container')
if not container:
continue
container.update(_container_common)
try:
ret_val, message, container_id = self._base_connector.save_container(container)
except Exception as e:
self._base_connector.debug_print("Exception: ", e)
continue
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONTAINER.format(ret_val, message, container_id))
if phantom.is_fail(ret_val):
message = PROC_EMAIL_FAILED_CONTAINER.format(container['source_data_identifier'], message)
self._base_connector.debug_print(message)
continue
if not container_id:
message = PROC_EMAIL_SAVE_CONTAINER_FAILED
self._base_connector.debug_print(message)
continue
files = result.get('files')
vault_artifacts_added = 0
for curr_file in files:
ret_val, added_to_vault = self._handle_file(curr_file, container_id)
if added_to_vault:
vault_artifacts_added += 1
artifacts = result.get('artifacts')
if not artifacts:
continue
if not self._base_connector.is_poll_now():
artifacts = artifacts[:artifact_count]
len_artifacts = len(artifacts)
for j, artifact in enumerate(artifacts):
if not artifact:
continue
# add the container id to the artifact
artifact['container_id'] = container_id
self._set_sdi(artifact)
# if it is the last artifact of the last container
if (j + 1) == len_artifacts:
# mark it such that active playbooks get executed
artifact['run_automation'] = True
ret_val, status_string, artifact_id = self._base_connector.save_artifact(artifact)
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONT_PASSED.format(ret_val, status_string, artifact_id))
# delete any temp directories that were created by the email parsing function
[shutil.rmtree(x['temp_directory'], ignore_errors=True) for x in results if x.get('temp_directory')]
return self._base_connector.set_status(phantom.APP_SUCCESS)
def _add_vault_hashes_to_dictionary(self, cef_artifact, vault_id):
success, message, vault_info = phantom_rules.vault_info(vault_id=vault_id)
if not vault_info:
return phantom.APP_ERROR, "Vault ID not found"
# The return value is a list, each item represents an item in the vault
# matching the vault id, the info that we are looking for (the hashes)
# will be the same for every entry, so just access the first one
try:
metadata = vault_info[0].get('metadata')
except Exception:
return phantom.APP_ERROR, PROC_EMAIL_FAILED_VAULT_CONT_DATA
try:
cef_artifact['fileHashSha256'] = metadata['sha256']
except Exception:
pass
try:
cef_artifact['fileHashMd5'] = metadata['md5']
except Exception:
pass
try:
cef_artifact['fileHashSha1'] = metadata['sha1']
except Exception:
pass
return phantom.APP_SUCCESS, PROC_EMAIL_MAPPED_HASH_VAL
def _handle_file(self, curr_file, container_id):
file_name = curr_file.get('file_name')
local_file_path = curr_file['file_path']
contains = self._get_file_contains(local_file_path)
# lets move the data into the vault
vault_attach_dict = {}
if not file_name:
file_name = os.path.basename(local_file_path)
self._base_connector.debug_print("Vault file name: {0}".format(file_name))
vault_attach_dict[phantom.APP_JSON_ACTION_NAME] = self._base_connector.get_action_name()
vault_attach_dict[phantom.APP_JSON_APP_RUN_ID] = self._base_connector.get_app_run_id()
file_name = self._decode_uni_string(file_name, file_name)
# success, message, vault_id = phantom_rules.vault_add(container_id, local_file_path, file_name)
try:
success, message, vault_id = phantom_rules.vault_add(file_location=local_file_path, container=container_id, file_name=file_name, metadata=vault_attach_dict)
except Exception as e:
self._base_connector.debug_print(phantom.APP_ERR_FILE_ADD_TO_VAULT.format(e))
return phantom.APP_ERROR, phantom.APP_ERROR
if not success:
self._base_connector.debug_print(PROC_EMAIL_FAILED_VAULT_ADD_FILE.format(message))
return phantom.APP_ERROR, phantom.APP_ERROR
# add the vault id artifact to the container
cef_artifact = {}
if file_name:
cef_artifact.update({'fileName': file_name})
if vault_id:
cef_artifact.update({'vaultId': vault_id,
'cs6': vault_id,
'cs6Label': 'Vault ID'})
# now get the rest of the hashes and add them to the cef artifact
self._add_vault_hashes_to_dictionary(cef_artifact, vault_id)
if not cef_artifact:
return phantom.APP_SUCCESS, phantom.APP_ERROR
artifact = {}
artifact.update(_artifact_common)
artifact['container_id'] = container_id
artifact['name'] = 'Vault Artifact'
artifact['cef'] = cef_artifact
if contains:
artifact['cef_types'] = {'vaultId': contains, 'cs6': contains}
self._set_sdi(artifact)
ret_val, status_string, artifact_id = self._base_connector.save_artifact(artifact)
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONT_PASSED.format(ret_val, status_string, artifact_id))
return phantom.APP_SUCCESS, ret_val
def cmp2(self, a, b):
return (a > b) - (a < b)
def _set_sdi(self, input_dict):
if 'source_data_identifier' in input_dict:
del input_dict['source_data_identifier']
dict_hash = None
# first get the phantom version
phantom_version = self._base_connector.get_product_version()
if not phantom_version:
dict_hash = self._create_dict_hash(input_dict)
else:
ver_cmp = self.cmp2(phantom_version, HASH_FIXED_PHANTOM_VERSION)
if ver_cmp == -1:
dict_hash = self._create_dict_hash(input_dict)
if dict_hash:
input_dict['source_data_identifier'] = dict_hash
else:
# Remove this code once the backend has fixed PS-4216 _and_ it has been
# merged into next so that 2.0 and 2.1 has the code
input_dict['source_data_identifier'] = self._create_dict_hash(input_dict)
return phantom.APP_SUCCESS
def _create_dict_hash(self, input_dict):
try:
input_dict_str = json.dumps(input_dict, sort_keys=True)
except Exception as e:
self._base_connector.debug_print('Exception: ', e)
return None
return hashlib.md5(input_dict_str.encode('utf-8')).hexdigest()
| 38.744015 | 168 | 0.621304 |
import email
import tempfile
from collections import OrderedDict
import os
import re
from bs4 import BeautifulSoup, UnicodeDammit
import phantom.app as phantom
import phantom.utils as ph_utils
import mimetypes
import socket
from email.header import decode_header, make_header
import shutil
import hashlib
import json
import magic
import random
import string
import phantom.rules as phantom_rules
from gsgmail_consts import *
import sys
from requests.structures import CaseInsensitiveDict
_container_common = {
"run_automation": False
}
_artifact_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
FILE_EXTENSIONS = {
'.vmsn': ['os memory dump', 'vm snapshot file'],
'.vmss': ['os memory dump', 'vm suspend file'],
'.js': ['javascript'],
'.doc': ['doc'],
'.docx': ['doc'],
'.xls': ['xls'],
'.xlsx': ['xls'],
}
MAGIC_FORMATS = [
(re.compile('^PE.* Windows'), ['pe file', 'hash']),
(re.compile('^MS-DOS executable'), ['pe file', 'hash']),
(re.compile('^PDF '), ['pdf']),
(re.compile('^MDMP crash'), ['process dump']),
(re.compile('^Macromedia Flash'), ['flash']),
]
EWS_DEFAULT_ARTIFACT_COUNT = 100
EWS_DEFAULT_CONTAINER_COUNT = 100
HASH_FIXED_PHANTOM_VERSION = "2.0.201"
OFFICE365_APP_ID = "a73f6d32-c9d5-4fec-b024-43876700daa6"
EXCHANGE_ONPREM_APP_ID = "badc5252-4a82-4a6d-bc53-d1e503857124"
IMAP_APP_ID = "9f2e9f72-b0e5-45d6-92a7-09ef820476c1"
uri_regexc = re.compile(URI_REGEX)
email_regexc = re.compile(EMAIL_REGEX, re.IGNORECASE)
email_regexc2 = re.compile(EMAIL_REGEX2, re.IGNORECASE)
hash_regexc = re.compile(HASH_REGEX)
ip_regexc = re.compile(IP_REGEX)
ipv6_regexc = re.compile(IPV6_REGEX)
class ProcessMail:
def __init__(self, base_connector, config):
self._base_connector = base_connector
self._config = config
self._email_id_contains = list()
self._container = dict()
self._artifacts = list()
self._attachments = list()
self._python_version = None
try:
self._python_version = int(sys.version_info[0])
except Exception:
raise Exception("Error occurred while getting the Phantom server's Python major version.")
def _get_file_contains(self, file_path):
contains = []
ext = os.path.splitext(file_path)[1]
contains.extend(FILE_EXTENSIONS.get(ext, []))
magic_str = magic.from_file(file_path)
for regex, cur_contains in MAGIC_FORMATS:
if regex.match(magic_str):
contains.extend(cur_contains)
return contains
def _is_ip(self, input_ip):
if ph_utils.is_ip(input_ip):
return True
if self.is_ipv6(input_ip):
return True
return False
def is_ipv6(self, input_ip):
try:
socket.inet_pton(socket.AF_INET6, input_ip)
except Exception:
return False
return True
def _clean_url(self, url):
url = url.strip('>),.]\r\n')
# Check before splicing, find returns -1 if not found
# _and_ you will end up splicing on -1 (incorrectly)
if '<' in url:
url = url[:url.find('<')]
elif '>' in url:
url = url[:url.find('>')]
return url
def _extract_urls_domains(self, file_data, urls, domains):
if not self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS] and not self._config[PROC_EMAIL_JSON_EXTRACT_URLS]:
return
# try to load the email
try:
soup = BeautifulSoup(file_data, "html.parser")
except Exception as e:
self._base_connector.debug_print(e)
return
uris = []
# get all tags that have hrefs
links = soup.find_all(href=True)
if links:
# it's html, so get all the urls
uris = [x['href'] for x in links if (not x['href'].startswith('mailto:'))]
uri_text = [self._clean_url(x.get_text()) for x in links]
if uri_text:
uri_text = [x for x in uri_text if x.startswith('http')]
if uri_text:
uris.extend(uri_text)
else:
uris = re.findall(uri_regexc, file_data)
if uris:
uris = [self._clean_url(x) for x in uris]
if self._config[PROC_EMAIL_JSON_EXTRACT_URLS]:
urls |= set(uris)
if self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
for uri in uris:
domain = phantom.get_host_from_url(uri)
if domain and not self._is_ip(domain):
domains.add(domain)
if links:
mailtos = [x['href'] for x in links if (x['href'].startswith('mailto:'))]
for curr_email in mailtos:
domain = curr_email[curr_email.find('@') + 1:]
if domain and not self._is_ip(domain):
domains.add(domain)
return
def _get_ips(self, file_data, ips):
ips_in_mail = re.findall(ip_regexc, file_data)
ip6_in_mail = re.findall(ipv6_regexc, file_data)
if ip6_in_mail:
for ip6_tuple in ip6_in_mail:
ip6s = [x for x in ip6_tuple if x]
ips_in_mail.extend(ip6s)
if ips_in_mail:
ips_in_mail = set(ips_in_mail)
ips_in_mail = [x for x in ips_in_mail if self._is_ip(x)]
if ips_in_mail:
ips |= set(ips_in_mail)
def _handle_body(self, body, parsed_mail, email_id):
local_file_path = body['file_path']
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
file_data = None
try:
with open(local_file_path, 'r') as f:
file_data = f.read()
except Exception:
with open(local_file_path, 'rb') as f:
file_data = f.read()
self._base_connector.debug_print("Reading file data using binary mode")
if (file_data is None) or (len(file_data) == 0):
return phantom.APP_ERROR
file_data = UnicodeDammit(file_data).unicode_markup.encode('utf-8').decode('utf-8')
self._parse_email_headers_as_inline(file_data, parsed_mail, email_id)
if self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
emails = []
emails.extend(re.findall(email_regexc, file_data))
emails.extend(re.findall(email_regexc2, file_data))
for curr_email in emails:
domain = curr_email[curr_email.rfind('@') + 1:]
if domain and (not ph_utils.is_ip(domain)):
domains.add(domain)
self._extract_urls_domains(file_data, urls, domains)
if self._config[PROC_EMAIL_JSON_EXTRACT_IPS]:
self._get_ips(file_data, ips)
if self._config[PROC_EMAIL_JSON_EXTRACT_HASHES]:
hashs_in_mail = re.findall(hash_regexc, file_data)
if hashs_in_mail:
hashes |= set(hashs_in_mail)
return phantom.APP_SUCCESS
def _add_artifacts(self, cef_key, input_set, artifact_name, start_index, artifacts):
added_artifacts = 0
for entry in input_set:
if not entry:
continue
artifact = {}
artifact.update(_artifact_common)
artifact['source_data_identifier'] = start_index + added_artifacts
artifact['cef'] = {cef_key: entry}
artifact['name'] = artifact_name
self._base_connector.debug_print('Artifact:', artifact)
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _parse_email_headers_as_inline(self, file_data, parsed_mail, email_id):
p = re.compile(r'(?<=\r\n).*Forwarded Message.*\r\n', re.IGNORECASE)
email_text = p.sub('', file_data.strip())
mail = email.message_from_string(email_text)
self._parse_email_headers(parsed_mail, mail, add_email_id=email_id)
return phantom.APP_SUCCESS
def _add_email_header_artifacts(self, email_header_artifacts, start_index, artifacts):
added_artifacts = 0
for artifact in email_header_artifacts:
artifact['source_data_identifier'] = start_index + added_artifacts
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _create_artifacts(self, parsed_mail):
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
artifact_id = 0
added_artifacts = self._add_artifacts('sourceAddress', ips, 'IP Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('fileHash', hashes, 'Hash Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('requestURL', urls, 'URL Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('destinationDnsDomain', domains, 'Domain Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_email_header_artifacts(email_headers, artifact_id, self._artifacts)
artifact_id += added_artifacts
return phantom.APP_SUCCESS
def _decode_uni_string(self, input_str, def_name):
encoded_strings = re.findall(r'=\?.*?\?=', input_str, re.I)
if not encoded_strings:
return input_str
try:
decoded_strings = [decode_header(x)[0] for x in encoded_strings]
decoded_strings = [{'value': x[0], 'encoding': x[1]} for x in decoded_strings]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
self._base_connector.debug_print("Decoding: {0}. Error code: {1}. Error message: {2}".format(encoded_strings, error_code, error_msg))
return def_name
decoded_strings = dict(enumerate(decoded_strings))
new_str = ''
new_str_create_count = 0
for i, encoded_string in enumerate(encoded_strings):
decoded_string = decoded_strings.get(i)
if not decoded_string:
# nothing to replace with
continue
value = decoded_string.get('value')
encoding = decoded_string.get('encoding')
if not encoding or not value:
# nothing to replace with
continue
try:
if encoding != 'utf-8':
value = str(value, encoding)
except Exception:
pass
try:
# commenting the existing approach due to a new approach being deployed below
# substitute the encoded string with the decoded one
# input_str = input_str.replace(encoded_string, value)
# make new string insted of replacing in the input string because issue find in PAPP-9531
if value:
new_str += UnicodeDammit(value).unicode_markup
new_str_create_count += 1
except Exception:
pass
# replace input string with new string because issue find in PAPP-9531
if new_str and new_str_create_count == len(encoded_strings):
self._base_connector.debug_print("Creating a new string entirely from the encoded_strings and assiging into input_str")
input_str = new_str
return input_str
def _get_container_name(self, parsed_mail, email_id):
# Create the default name
def_cont_name = "Email ID: {0}".format(email_id)
# get the subject from the parsed mail
subject = parsed_mail.get(PROC_EMAIL_JSON_SUBJECT)
# if no subject then return the default
if not subject:
return def_cont_name
try:
return str(make_header(decode_header(subject)))
except Exception:
return self._decode_uni_string(subject, def_cont_name)
def _handle_if_body(self, content_disp, content_type, part, bodies, file_path, parsed_mail):
process_as_body = False
# if content disposition is None then assume that it is
if content_disp is None:
process_as_body = True
# if content disposition is inline
elif content_disp.lower().strip() == 'inline':
if ('text/html' in content_type) or ('text/plain' in content_type):
process_as_body = True
if not process_as_body:
return phantom.APP_SUCCESS, True
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS, False
charset = part.get_content_charset()
with open(file_path, 'wb') as f: # noqa
f.write(part_payload)
bodies.append({'file_path': file_path, 'charset': part.get_content_charset()})
self._add_body_in_email_headers(parsed_mail, file_path, charset, content_type)
return phantom.APP_SUCCESS, False
def _handle_part(self, part, part_index, tmp_dir, extract_attach, parsed_mail):
bodies = parsed_mail[PROC_EMAIL_JSON_BODIES]
files = parsed_mail[PROC_EMAIL_JSON_FILES]
# get the file_name
file_name = part.get_filename()
content_disp = part.get('Content-Disposition')
content_type = part.get('Content-Type')
content_id = part.get('Content-ID')
if file_name is None:
# init name and extension to default values
name = "part_{0}".format(part_index)
extension = ".{0}".format(part_index)
# Try to create an extension from the content type if possible
if content_type is not None:
extension = mimetypes.guess_extension(re.sub(';.*', '', content_type))
# Try to create a name from the content id if possible
if content_id is not None:
name = content_id
file_name = "{0}{1}".format(name, extension)
else:
try:
file_name = str(make_header(decode_header(file_name)))
except Exception:
file_name = self._decode_uni_string(file_name, file_name)
# Remove any chars that we don't want in the name
file_path = "{0}/{1}_{2}".format(tmp_dir, part_index,
file_name.translate(str.maketrans("", "", ''.join(['<', '>', ' ']))))
self._base_connector.debug_print("file_path: {0}".format(file_path))
status, process_further = self._handle_if_body(content_disp, content_type, part, bodies, file_path, parsed_mail)
if not process_further:
return phantom.APP_SUCCESS
if (content_type is not None) and (content_type.find(PROC_EMAIL_CONTENT_TYPE_MESSAGE) != -1):
return phantom.APP_SUCCESS
if extract_attach:
_, file_extension = os.path.splitext(file_name)
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS
try:
with open(file_path, 'wb') as f:
f.write(part_payload)
files.append({'file_name': file_name, 'file_path': file_path})
except IOError as e:
error_msg = str(e)
if "File name too long" in error_msg:
self.write_with_new_filename(tmp_dir, part_payload, file_extension, files, as_byte=False)
else:
self._base_connector.debug_print('Failed to write file: {}'.format(e))
return phantom.APP_SUCCESS
def _get_file_name(self, input_str):
try:
return str(make_header(decode_header(input_str)))
except Exception:
return self._decode_uni_string(input_str, input_str)
def _parse_email_headers(self, parsed_mail, part, charset=None, add_email_id=None):
email_header_artifacts = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
email_headers = part.items()
if not email_headers:
return 0
headers = self._get_email_headers_from_part(part, charset)
cef_artifact = {}
cef_types = {}
if headers.get('From'):
emails = headers['From']
if emails:
cef_artifact.update({'fromEmail': emails})
if headers.get('To'):
emails = headers['To']
if emails:
cef_artifact.update({'toEmail': emails})
message_id = headers.get('Message-ID')
if not cef_artifact and not message_id:
return 0
cef_types.update({'fromEmail': ['email'], 'toEmail': ['email']})
if headers:
cef_artifact['emailHeaders'] = headers
if add_email_id:
cef_artifact['emailId'] = add_email_id
if self._email_id_contains:
cef_types.update({'emailId': self._email_id_contains})
artifact = {}
artifact.update(_artifact_common)
artifact['name'] = 'Email Artifact'
artifact['cef'] = cef_artifact
artifact['cef_types'] = cef_types
email_header_artifacts.append(artifact)
return len(email_header_artifacts)
def _get_email_headers_from_part(self, part, charset=None):
email_headers = list(part.items())
if charset is None:
charset = part.get_content_charset()
if charset is None:
charset = 'utf8'
if not email_headers:
return {}
headers = CaseInsensitiveDict()
try:
[headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while converting the header tuple into a dictionary"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
try:
received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received']
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while handling the received header tuple separately"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
if received_headers:
headers['Received'] = received_headers
subject = headers.get('Subject')
if subject:
try:
headers['decodedSubject'] = str(make_header(decode_header(subject)))
except Exception:
headers['decodedSubject'] = self._decode_uni_string(subject, subject)
return dict(headers)
def _get_error_message_from_exception(self, e):
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = "Error code unavailable"
error_msg = e.args[0]
else:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
except Exception:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
return error_code, error_msg
def _handle_mail_object(self, mail, email_id, rfc822_email, tmp_dir, start_time_epoch):
parsed_mail = OrderedDict()
tmp_dir = tmp_dir
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
extract_attach = self._config[PROC_EMAIL_JSON_EXTRACT_ATTACHMENTS]
charset = mail.get_content_charset()
if charset is None:
charset = 'utf-8'
parsed_mail[PROC_EMAIL_JSON_SUBJECT] = mail.get('Subject', '')
parsed_mail[PROC_EMAIL_JSON_FROM] = mail.get('From', '')
parsed_mail[PROC_EMAIL_JSON_TO] = mail.get('To', '')
parsed_mail[PROC_EMAIL_JSON_DATE] = mail.get('Date', '')
parsed_mail[PROC_EMAIL_JSON_MSG_ID] = mail.get('Message-ID', '')
parsed_mail[PROC_EMAIL_JSON_FILES] = files = []
parsed_mail[PROC_EMAIL_JSON_BODIES] = bodies = []
parsed_mail[PROC_EMAIL_JSON_START_TIME] = start_time_epoch
parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS] = []
if mail.is_multipart():
for i, part in enumerate(mail.walk()):
add_email_id = None
if i == 0:
add_email_id = email_id
self._parse_email_headers(parsed_mail, part, add_email_id=add_email_id)
self._base_connector.debug_print("part: {0}".format(part.__dict__))
self._base_connector.debug_print("part type", type(part))
if part.is_multipart():
self.check_and_update_eml(part)
continue
try:
ret_val = self._handle_part(part, i, tmp_dir, extract_attach, parsed_mail)
except Exception as e:
self._base_connector.debug_print("ErrorExp in _handle_part # {0}".format(i), e)
continue
if phantom.is_fail(ret_val):
continue
else:
self._parse_email_headers(parsed_mail, mail, add_email_id=email_id)
file_path = "{0}/part_1.text".format(tmp_dir)
with open(file_path, 'wb') as f:
f.write(mail.get_payload(decode=True))
bodies.append({'file_path': file_path, 'charset': charset})
self._add_body_in_email_headers(parsed_mail, file_path, mail.get_content_charset(), 'text/plain')
container_name = self._get_container_name(parsed_mail, email_id)
if container_name is None:
return phantom.APP_ERROR
container = {}
container_data = dict(parsed_mail)
del (container_data[PROC_EMAIL_JSON_EMAIL_HEADERS])
container.update(_container_common)
self._container['source_data_identifier'] = email_id
self._container['name'] = container_name
self._container['data'] = {'raw_email': rfc822_email}
parsed_mail[PROC_EMAIL_JSON_IPS] = set()
parsed_mail[PROC_EMAIL_JSON_HASHES] = set()
parsed_mail[PROC_EMAIL_JSON_URLS] = set()
parsed_mail[PROC_EMAIL_JSON_DOMAINS] = set()
for i, body in enumerate(bodies):
if not body:
continue
try:
self._handle_body(body, parsed_mail, email_id)
except Exception as e:
self._base_connector.debug_print_debug_print("ErrorExp in _handle_body # {0}: {1}".format(i, str(e)))
continue
self._attachments.extend(files)
self._create_artifacts(parsed_mail)
return phantom.APP_SUCCESS
def _add_body_in_email_headers(self, parsed_mail, file_path, charset, content_type):
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
try:
with open(file_path, 'r') as f:
body_content = f.read()
except Exception:
with open(file_path, 'rb') as f:
body_content = f.read()
self._base_connector.debug_print("Reading file data using binary mode")
body_content = UnicodeDammit(body_content).unicode_markup.encode('utf-8').decode('utf-8')
if 'text/plain' in content_type:
try:
email_headers[-1]['cef']['bodyText'] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyText'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyText'] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing text/plain body content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
elif 'text/html' in content_type:
try:
email_headers[-1]['cef']['bodyHtml'] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyHtml'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyHtml'] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing text/html body content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
else:
if not email_headers[-1]['cef'].get('bodyOther'):
email_headers[-1]['cef']['bodyOther'] = {}
try:
email_headers[-1]['cef']['bodyOther'][content_type] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyOther'][content_type] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyOther'][content_type] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing bodyOther content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
def _get_string(self, input_str, charset):
try:
if input_str:
if self._python_version == 2:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset)
else:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset).decode(charset)
except Exception:
try:
input_str = str(make_header(decode_header(input_str)))
except Exception:
input_str = self._decode_uni_string(input_str, input_str)
self._base_connector.debug_print(
"Error occurred while converting to string with specific encoding {}".format(input_str))
return input_str
def _set_email_id_contains(self, email_id):
if not self._base_connector:
return
try:
email_id = self._get_string(email_id, 'utf-8')
except Exception:
email_id = str(email_id)
if self._base_connector.get_app_id() == EXCHANGE_ONPREM_APP_ID and email_id.endswith('='):
self._email_id_contains = ["exchange email id"]
elif self._base_connector.get_app_id() == OFFICE365_APP_ID and email_id.endswith('='):
self._email_id_contains = ["office 365 email id"]
elif self._base_connector.get_app_id() == IMAP_APP_ID and email_id.isdigit():
self._email_id_contains = ["imap email id"]
elif ph_utils.is_sha1(email_id):
self._email_id_contains = ["vault id"]
return
def _int_process_email(self, rfc822_email, email_id, start_time_epoch):
mail = email.message_from_string(rfc822_email)
tmp_dir = tempfile.mkdtemp(prefix='ph_email')
try:
ret_val = self._handle_mail_object(mail, email_id, rfc822_email, tmp_dir, start_time_epoch)
except Exception as e:
message = "ErrorExp in _handle_mail_object: {0}".format(e)
self._base_connector.debug_print(message)
return phantom.APP_ERROR, message, []
results = [{'container': self._container, 'artifacts': self._artifacts, 'files': self._attachments, 'temp_directory': tmp_dir}]
return ret_val, PROC_EMAIL_PARSED, results
def check_and_update_eml(self, part):
if self._config[PROC_EMAIL_JSON_EXTRACT_EMAIL_ATTACHMENTS]:
tmp_dir = None
msg = None
file_extension = ''
try:
tmp_dir = tempfile.mkdtemp(prefix='ph_email')
filename = self._get_file_name(part.get_filename())
_, file_extension = os.path.splitext(filename)
if filename.endswith('.eml'):
file_path = os.path.join(tmp_dir, filename)
msg = part.get_payload()[0]
with open(file_path, 'wb') as f:
f.write(msg.as_bytes())
self._attachments.append({'file_name': filename, 'file_path': file_path})
except IOError as e:
error_msg = str(e)
if "File name too long" in error_msg:
self.write_with_new_filename(tmp_dir, msg, file_extension, self._attachments, as_byte=True)
else:
self._base_connector.debug_print('Failed to write file: {}'.format(e))
except Exception as e:
self._base_connector.debug_print("Exception occurred: {}".format(e))
def write_with_new_filename(self, tmp_dir, data, file_extension, dict_to_fill, as_byte=False):
try:
random_suffix = '_' + ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(16))
new_file_name = "ph_long_file_name_{0}{1}".format(random_suffix, file_extension)
file_path = os.path.join(tmp_dir, new_file_name)
with open(file_path, 'wb') as f:
if as_byte:
f.write(data.as_bytes())
else:
f.write(data)
dict_to_fill.append({'file_name': new_file_name, 'file_path': file_path})
except Exception as e:
self._base_connector.debug_print('Exception while writing file: {}'.format(e))
def process_email(self, rfc822_email, email_id, epoch):
try:
self._set_email_id_contains(email_id)
except Exception:
pass
ret_val, message, results = self._int_process_email(rfc822_email, email_id, epoch)
if not ret_val:
return phantom.APP_ERROR, message
self._parse_results(results)
return phantom.APP_SUCCESS, PROC_EMAIL_PROCESSED
def _parse_results(self, results):
param = self._base_connector.get_current_param()
container_count = EWS_DEFAULT_CONTAINER_COUNT
artifact_count = EWS_DEFAULT_ARTIFACT_COUNT
if param:
container_count = param.get(phantom.APP_JSON_CONTAINER_COUNT, EWS_DEFAULT_CONTAINER_COUNT)
artifact_count = param.get(phantom.APP_JSON_ARTIFACT_COUNT, EWS_DEFAULT_ARTIFACT_COUNT)
results = results[:container_count]
for result in results:
container = result.get('container')
if not container:
continue
container.update(_container_common)
try:
ret_val, message, container_id = self._base_connector.save_container(container)
except Exception as e:
self._base_connector.debug_print("Exception: ", e)
continue
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONTAINER.format(ret_val, message, container_id))
if phantom.is_fail(ret_val):
message = PROC_EMAIL_FAILED_CONTAINER.format(container['source_data_identifier'], message)
self._base_connector.debug_print(message)
continue
if not container_id:
message = PROC_EMAIL_SAVE_CONTAINER_FAILED
self._base_connector.debug_print(message)
continue
files = result.get('files')
vault_artifacts_added = 0
for curr_file in files:
ret_val, added_to_vault = self._handle_file(curr_file, container_id)
if added_to_vault:
vault_artifacts_added += 1
artifacts = result.get('artifacts')
if not artifacts:
continue
if not self._base_connector.is_poll_now():
artifacts = artifacts[:artifact_count]
len_artifacts = len(artifacts)
for j, artifact in enumerate(artifacts):
if not artifact:
continue
artifact['container_id'] = container_id
self._set_sdi(artifact)
if (j + 1) == len_artifacts:
artifact['run_automation'] = True
ret_val, status_string, artifact_id = self._base_connector.save_artifact(artifact)
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONT_PASSED.format(ret_val, status_string, artifact_id))
[shutil.rmtree(x['temp_directory'], ignore_errors=True) for x in results if x.get('temp_directory')]
return self._base_connector.set_status(phantom.APP_SUCCESS)
def _add_vault_hashes_to_dictionary(self, cef_artifact, vault_id):
success, message, vault_info = phantom_rules.vault_info(vault_id=vault_id)
if not vault_info:
return phantom.APP_ERROR, "Vault ID not found"
try:
metadata = vault_info[0].get('metadata')
except Exception:
return phantom.APP_ERROR, PROC_EMAIL_FAILED_VAULT_CONT_DATA
try:
cef_artifact['fileHashSha256'] = metadata['sha256']
except Exception:
pass
try:
cef_artifact['fileHashMd5'] = metadata['md5']
except Exception:
pass
try:
cef_artifact['fileHashSha1'] = metadata['sha1']
except Exception:
pass
return phantom.APP_SUCCESS, PROC_EMAIL_MAPPED_HASH_VAL
def _handle_file(self, curr_file, container_id):
file_name = curr_file.get('file_name')
local_file_path = curr_file['file_path']
contains = self._get_file_contains(local_file_path)
vault_attach_dict = {}
if not file_name:
file_name = os.path.basename(local_file_path)
self._base_connector.debug_print("Vault file name: {0}".format(file_name))
vault_attach_dict[phantom.APP_JSON_ACTION_NAME] = self._base_connector.get_action_name()
vault_attach_dict[phantom.APP_JSON_APP_RUN_ID] = self._base_connector.get_app_run_id()
file_name = self._decode_uni_string(file_name, file_name)
try:
success, message, vault_id = phantom_rules.vault_add(file_location=local_file_path, container=container_id, file_name=file_name, metadata=vault_attach_dict)
except Exception as e:
self._base_connector.debug_print(phantom.APP_ERR_FILE_ADD_TO_VAULT.format(e))
return phantom.APP_ERROR, phantom.APP_ERROR
if not success:
self._base_connector.debug_print(PROC_EMAIL_FAILED_VAULT_ADD_FILE.format(message))
return phantom.APP_ERROR, phantom.APP_ERROR
cef_artifact = {}
if file_name:
cef_artifact.update({'fileName': file_name})
if vault_id:
cef_artifact.update({'vaultId': vault_id,
'cs6': vault_id,
'cs6Label': 'Vault ID'})
self._add_vault_hashes_to_dictionary(cef_artifact, vault_id)
if not cef_artifact:
return phantom.APP_SUCCESS, phantom.APP_ERROR
artifact = {}
artifact.update(_artifact_common)
artifact['container_id'] = container_id
artifact['name'] = 'Vault Artifact'
artifact['cef'] = cef_artifact
if contains:
artifact['cef_types'] = {'vaultId': contains, 'cs6': contains}
self._set_sdi(artifact)
ret_val, status_string, artifact_id = self._base_connector.save_artifact(artifact)
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONT_PASSED.format(ret_val, status_string, artifact_id))
return phantom.APP_SUCCESS, ret_val
def cmp2(self, a, b):
return (a > b) - (a < b)
def _set_sdi(self, input_dict):
if 'source_data_identifier' in input_dict:
del input_dict['source_data_identifier']
dict_hash = None
phantom_version = self._base_connector.get_product_version()
if not phantom_version:
dict_hash = self._create_dict_hash(input_dict)
else:
ver_cmp = self.cmp2(phantom_version, HASH_FIXED_PHANTOM_VERSION)
if ver_cmp == -1:
dict_hash = self._create_dict_hash(input_dict)
if dict_hash:
input_dict['source_data_identifier'] = dict_hash
else:
input_dict['source_data_identifier'] = self._create_dict_hash(input_dict)
return phantom.APP_SUCCESS
def _create_dict_hash(self, input_dict):
try:
input_dict_str = json.dumps(input_dict, sort_keys=True)
except Exception as e:
self._base_connector.debug_print('Exception: ', e)
return None
return hashlib.md5(input_dict_str.encode('utf-8')).hexdigest()
| true | true |
7900402e9d7be3a9e325300c7d54ac92b6f11496 | 1,002 | py | Python | kubernetes/test/test_v1alpha1_priority_class.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_priority_class.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_priority_class.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | 1 | 2019-01-10T11:13:52.000Z | 2019-01-10T11:13:52.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1alpha1_priority_class import V1alpha1PriorityClass
class TestV1alpha1PriorityClass(unittest.TestCase):
""" V1alpha1PriorityClass unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1PriorityClass(self):
"""
Test V1alpha1PriorityClass
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1alpha1_priority_class.V1alpha1PriorityClass()
pass
if __name__ == '__main__':
unittest.main()
| 22.266667 | 105 | 0.720559 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1alpha1_priority_class import V1alpha1PriorityClass
class TestV1alpha1PriorityClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1PriorityClass(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
7900411b618eb5dd888ef069e7cb4648e3c76211 | 818 | py | Python | var/spack/repos/builtin/packages/py-datalad-webapp/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/py-datalad-webapp/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/py-datalad-webapp/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDataladWebapp(PythonPackage):
"""DataLad extension for exposing commands via a web request API"""
homepage = "https://github.com/datalad/datalad-webapp"
pypi = "datalad_webapp/datalad_webapp-0.3.tar.gz"
version('0.3', sha256='7bbb2ce58a7e0e6d1a7a2f33d7e50fe7e73cd764380e70fdc2d9f651c3d0e312')
depends_on('py-setuptools', type='build')
depends_on('py-datalad@0.12.5:', type=('build', 'run'))
depends_on('py-flask@1.0:', type=('build', 'run'))
depends_on('py-flask-restful', type=('build', 'run'))
depends_on('py-pytest-cov', type=('build', 'run'))
| 37.181818 | 93 | 0.709046 |
from spack import *
class PyDataladWebapp(PythonPackage):
homepage = "https://github.com/datalad/datalad-webapp"
pypi = "datalad_webapp/datalad_webapp-0.3.tar.gz"
version('0.3', sha256='7bbb2ce58a7e0e6d1a7a2f33d7e50fe7e73cd764380e70fdc2d9f651c3d0e312')
depends_on('py-setuptools', type='build')
depends_on('py-datalad@0.12.5:', type=('build', 'run'))
depends_on('py-flask@1.0:', type=('build', 'run'))
depends_on('py-flask-restful', type=('build', 'run'))
depends_on('py-pytest-cov', type=('build', 'run'))
| true | true |
790041379749cc18b5becf495d594fcbd07f17ff | 5,794 | py | Python | bin/p3motioncor2.py | emkailu/PAT3DEM | 74e7a0f30179e49ea5c7da1bea893e21a3ed601a | [
"MIT"
] | null | null | null | bin/p3motioncor2.py | emkailu/PAT3DEM | 74e7a0f30179e49ea5c7da1bea893e21a3ed601a | [
"MIT"
] | null | null | null | bin/p3motioncor2.py | emkailu/PAT3DEM | 74e7a0f30179e49ea5c7da1bea893e21a3ed601a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import argparse
import subprocess
import glob
import math
from EMAN2 import *
def file_base(movie):
# return the filename and basename, exclude '.p3'
return movie, os.path.basename(os.path.splitext(movie)[0]).replace('.p3', '')
def check(log,c_p):
with open(log) as log_r:
lines = [line for line in log_r]
x0 = 0
y0 = 0
f = c_p['throw']
bad = []
while len(lines) > 0:
line1 = lines.pop(0)
if "...... Frame (" in line1:
line = line1.strip().split()
x = float(line[-2])
y = float(line[-1])
if math.sqrt((x - x0)**2 + (y - y0)**2) * c_p['apixr'] > c_p['target']:
bad += [f]
f += 1
x0 = x
y0 = y
return bad
def run_motioncor2(movie, c_p):
movie, basename = file_base(movie)
# generate the com file
out = basename+'_throw{:03}'.format(c_p['throw'])
o_com = out + '.com'
o_log = out + '.log'
o_mrc = out + '.mrc'
common = 'motioncor2 -InMrc {} -OutMrc {} -Iter 10 -Bft 100 -FtBin {} -Throw {} -FmRef -1 -Tilt {} {}'.format(movie,o_mrc,c_p['bin'],c_p['throw'],c_p['tilt'], c_p['gainref'])
with open(o_com, 'w') as o_com_w:
if c_p['local'] == 0:
o_com_w.write('{} -Patch 0 0'.format(common))
else:
o_com_w.write('{} -Patch {} {} -LogFile {} -FmDose {} -PixSize {} -kV {}'.format(common,c_p['patch'],c_p['patch'],out+'_',c_p['dose'],c_p['apixr'],c_p['voltage']))
# run the com
with open(o_log, 'w') as write_log:
subprocess.call(['sh', o_com], stdout=write_log, stderr=subprocess.STDOUT)
# check the shifts
bad = check(o_log,c_p)
# decide bad
decide(movie, bad, c_p)
def decide(movie, bad, c_p):
if bad == []:
if c_p['local'] == 0:
print "No bad frames. Do local now."
c_p['local'] = 1
run_motioncor2(movie, c_p)
else:
print "No bad frames. Local done for {}. Throwed the first {} frames.".format(movie, c_p['throw'])
elif max(bad) < c_p['maxthrow']:
c_p['throw'] = max(bad)
print "Throw the first {} frames.".format(c_p['throw']), "Bad frames: ", bad
run_motioncor2(movie, c_p)
else: # if too many bad frames
print '{} has too many bad frames: '.format(movie), bad
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <movies>
Output unfiltered and filtered sum using MotionCor2.
Automatically discard bad frames.
Needs:
'motioncor2' command (v1, Zheng et al., 2017)
'EMAN2' python module (v2.11, Tang et al., 2007)
"""
args_def = {'apix':1.315, 'apixr':0.6575, 'bin':1, 'patch':5, 'voltage':300, 'time':200, 'rate':7, 'target':5, 'tilt':'0 0', 'gainref':''}
parser = argparse.ArgumentParser()
parser.add_argument("movie", nargs='*', help="specify movies (mrc, mrcs, dm4) to be processed")
parser.add_argument("-a", "--apix", type=float, help="specify counting apix, by default {}".format(args_def['apix']))
parser.add_argument("-ar", "--apixr", type=float, help="specify real apix of input movie, by default {}".format(args_def['apixr']))
parser.add_argument("-b", "--bin", type=float, help="specify binning factor, by default {}".format(args_def['bin']))
parser.add_argument("-p", "--patch", type=int, help="specify the patch, by default {}".format(args_def['patch']))
parser.add_argument("-v", "--voltage", type=int, help="specify the voltage (kV), by default {}".format(args_def['voltage']))
parser.add_argument("-t", "--time", type=float, help="specify exposure time per frame in ms, by default {}".format(args_def['time']))
parser.add_argument("-r", "--rate", type=float, help="specify dose rate in e/pix/s (counting pixel, not superresolution), by default {}".format(args_def['rate']))
parser.add_argument("-ta", "--target", type=float, help="specify the target resolution, by default {}".format(args_def['target']))
parser.add_argument("-ti", "--tilt", type=str, help="specify the tilt, by default {}".format(args_def['tilt']))
parser.add_argument("-g", "--gainref", type=str, help="specify the gainref option, by default {}. e.g., '-Gain ../14sep05c_raw_196/norm-amibox05-0.mrc -RotGain 0 -FlipGain 1'".format(args_def['gainref']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# get common parameters
dose = args.time/1000.0 * args.rate / args.apix ** 2
voltage = args.voltage
c_p = {'dose':dose, 'apix':args.apix, 'apixr':args.apixr, 'bin':args.bin, 'patch':args.patch, 'voltage':voltage, 'target':args.target, 'tilt':args.tilt, 'throw':0, 'gainref':args.gainref}
# loop over all the input movies
for movie in args.movie:
if movie[-3:] == '.gz':
subprocess.call(['gunzip', movie])
movie = movie[:-3]
basename = os.path.basename(os.path.splitext(movie)[0])
suffix = os.path.basename(os.path.splitext(movie)[1])
basename_raw = basename
# unify mrc and mrcs to mrcs format
m = basename+'.p3.mrcs'
if suffix in ['.mrc','.mrcs']:
os.symlink(movie, m)
movie, basename = file_base(m)
# get nimg
c_p['nimg'] = EMUtil.get_image_count(movie)
# convert dm4 to mrcs
if suffix == '.dm4':
for i in xrange(c_p['nimg']):
d=EMData(movie, i)
d.write_image(m, i)
movie, basename = file_base(m)
# here we assume 36e is the maximal dose that still contributes to visualization of protein side chains, and a total of 20e is the minimum to ensure good alignment. therefore, you can throw the first 16e at most.
c_p['maxthrow'] = min(16/dose, c_p['nimg'] - 20/dose)
# motioncor2
c_p['local'] = 0 #0 means no local, only global
c_p['throw'] = 0
run_motioncor2(movie, c_p)
# delete intermediate files, they contain '.p3.'
for i in glob.glob(basename_raw + '*.p3.*'):
os.unlink(i)
if __name__ == '__main__':
main()
| 41.092199 | 214 | 0.654298 |
import os
import sys
import argparse
import subprocess
import glob
import math
from EMAN2 import *
def file_base(movie):
return movie, os.path.basename(os.path.splitext(movie)[0]).replace('.p3', '')
def check(log,c_p):
with open(log) as log_r:
lines = [line for line in log_r]
x0 = 0
y0 = 0
f = c_p['throw']
bad = []
while len(lines) > 0:
line1 = lines.pop(0)
if "...... Frame (" in line1:
line = line1.strip().split()
x = float(line[-2])
y = float(line[-1])
if math.sqrt((x - x0)**2 + (y - y0)**2) * c_p['apixr'] > c_p['target']:
bad += [f]
f += 1
x0 = x
y0 = y
return bad
def run_motioncor2(movie, c_p):
movie, basename = file_base(movie)
out = basename+'_throw{:03}'.format(c_p['throw'])
o_com = out + '.com'
o_log = out + '.log'
o_mrc = out + '.mrc'
common = 'motioncor2 -InMrc {} -OutMrc {} -Iter 10 -Bft 100 -FtBin {} -Throw {} -FmRef -1 -Tilt {} {}'.format(movie,o_mrc,c_p['bin'],c_p['throw'],c_p['tilt'], c_p['gainref'])
with open(o_com, 'w') as o_com_w:
if c_p['local'] == 0:
o_com_w.write('{} -Patch 0 0'.format(common))
else:
o_com_w.write('{} -Patch {} {} -LogFile {} -FmDose {} -PixSize {} -kV {}'.format(common,c_p['patch'],c_p['patch'],out+'_',c_p['dose'],c_p['apixr'],c_p['voltage']))
with open(o_log, 'w') as write_log:
subprocess.call(['sh', o_com], stdout=write_log, stderr=subprocess.STDOUT)
bad = check(o_log,c_p)
decide(movie, bad, c_p)
def decide(movie, bad, c_p):
if bad == []:
if c_p['local'] == 0:
print "No bad frames. Do local now."
c_p['local'] = 1
run_motioncor2(movie, c_p)
else:
print "No bad frames. Local done for {}. Throwed the first {} frames.".format(movie, c_p['throw'])
elif max(bad) < c_p['maxthrow']:
c_p['throw'] = max(bad)
print "Throw the first {} frames.".format(c_p['throw']), "Bad frames: ", bad
run_motioncor2(movie, c_p)
else:
print '{} has too many bad frames: '.format(movie), bad
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <movies>
Output unfiltered and filtered sum using MotionCor2.
Automatically discard bad frames.
Needs:
'motioncor2' command (v1, Zheng et al., 2017)
'EMAN2' python module (v2.11, Tang et al., 2007)
"""
args_def = {'apix':1.315, 'apixr':0.6575, 'bin':1, 'patch':5, 'voltage':300, 'time':200, 'rate':7, 'target':5, 'tilt':'0 0', 'gainref':''}
parser = argparse.ArgumentParser()
parser.add_argument("movie", nargs='*', help="specify movies (mrc, mrcs, dm4) to be processed")
parser.add_argument("-a", "--apix", type=float, help="specify counting apix, by default {}".format(args_def['apix']))
parser.add_argument("-ar", "--apixr", type=float, help="specify real apix of input movie, by default {}".format(args_def['apixr']))
parser.add_argument("-b", "--bin", type=float, help="specify binning factor, by default {}".format(args_def['bin']))
parser.add_argument("-p", "--patch", type=int, help="specify the patch, by default {}".format(args_def['patch']))
parser.add_argument("-v", "--voltage", type=int, help="specify the voltage (kV), by default {}".format(args_def['voltage']))
parser.add_argument("-t", "--time", type=float, help="specify exposure time per frame in ms, by default {}".format(args_def['time']))
parser.add_argument("-r", "--rate", type=float, help="specify dose rate in e/pix/s (counting pixel, not superresolution), by default {}".format(args_def['rate']))
parser.add_argument("-ta", "--target", type=float, help="specify the target resolution, by default {}".format(args_def['target']))
parser.add_argument("-ti", "--tilt", type=str, help="specify the tilt, by default {}".format(args_def['tilt']))
parser.add_argument("-g", "--gainref", type=str, help="specify the gainref option, by default {}. e.g., '-Gain ../14sep05c_raw_196/norm-amibox05-0.mrc -RotGain 0 -FlipGain 1'".format(args_def['gainref']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
dose = args.time/1000.0 * args.rate / args.apix ** 2
voltage = args.voltage
c_p = {'dose':dose, 'apix':args.apix, 'apixr':args.apixr, 'bin':args.bin, 'patch':args.patch, 'voltage':voltage, 'target':args.target, 'tilt':args.tilt, 'throw':0, 'gainref':args.gainref}
for movie in args.movie:
if movie[-3:] == '.gz':
subprocess.call(['gunzip', movie])
movie = movie[:-3]
basename = os.path.basename(os.path.splitext(movie)[0])
suffix = os.path.basename(os.path.splitext(movie)[1])
basename_raw = basename
m = basename+'.p3.mrcs'
if suffix in ['.mrc','.mrcs']:
os.symlink(movie, m)
movie, basename = file_base(m)
c_p['nimg'] = EMUtil.get_image_count(movie)
if suffix == '.dm4':
for i in xrange(c_p['nimg']):
d=EMData(movie, i)
d.write_image(m, i)
movie, basename = file_base(m)
c_p['maxthrow'] = min(16/dose, c_p['nimg'] - 20/dose)
c_p['local'] = 0
c_p['throw'] = 0
run_motioncor2(movie, c_p)
for i in glob.glob(basename_raw + '*.p3.*'):
os.unlink(i)
if __name__ == '__main__':
main()
| false | true |
790042be3e2c9b1e54c4488b33629bd6ccfbd3da | 2,974 | py | Python | tests/moduletool/test_python_dependencies.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 6 | 2021-03-09T10:24:02.000Z | 2022-01-16T03:52:11.000Z | tests/moduletool/test_python_dependencies.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 1,319 | 2020-12-18T08:52:29.000Z | 2022-03-31T18:17:32.000Z | tests/moduletool/test_python_dependencies.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 4 | 2021-03-03T15:36:50.000Z | 2022-03-11T11:41:51.000Z | """
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import os
import common
from inmanta.loader import SourceInfo
from inmanta.module import Project
def test_collect_python_requirements(tmpdir):
# Create project
common.makeproject(tmpdir, "test-project", deps=[("mod1", ""), ("mod2", "")], imports=["mod1", "mod2"])
project_dir = os.path.join(tmpdir, "test-project")
libs_dir = os.path.join(project_dir, "libs")
# Create mod1
common.makemodule(libs_dir, "mod1", project=False)
mod1 = os.path.join(libs_dir, "mod1")
mod1_req_txt = """iplib@git+https://github.com/bartv/python3-iplib
pytest\
>=\
1.5
iplib>=0.0.1
"""
common.add_file(mod1, "requirements.txt", mod1_req_txt, msg="initial commit")
# Create mod2
common.makemodule(libs_dir, "mod2", project=False)
mod2 = os.path.join(libs_dir, "mod2")
mod2_req_txt = """# A comment
dummy-yummy # A comment
# Another comment
"""
common.add_file(mod2, "requirements.txt", mod2_req_txt, msg="initial commit")
project = Project(project_dir, venv_path=os.path.join(project_dir, ".env"))
Project.set(project)
project.load_module("mod1", allow_v1=True)
project.load_module("mod2", allow_v1=True)
reqs = project.collect_python_requirements()
expected_reqs = ["iplib@git+https://github.com/bartv/python3-iplib", "pytest>=1.5", "iplib>=0.0.1", "dummy-yummy"]
assert sorted(reqs) == sorted(expected_reqs)
def test_requirements_from_source_info(tmpdir):
"""Test the code path used by the exporter"""
common.makeproject(tmpdir, "test-project", deps=[("mod1", "")], imports=["mod1"])
project_dir = os.path.join(tmpdir, "test-project")
libs_dir = os.path.join(project_dir, "libs")
common.makemodule(libs_dir, "mod1", project=False)
mod1 = os.path.join(libs_dir, "mod1")
mod1_req_txt = """# I'm a comment
pytest\
>=\
1.5
iplib>=0.0.1
"""
common.add_file(mod1, "requirements.txt", mod1_req_txt, msg="initial commit")
project = Project(project_dir, venv_path=os.path.join(project_dir, ".env"))
Project.set(project)
project.load_module("mod1", allow_v1=True)
requirements = SourceInfo(mod1, "inmanta_plugins.mod1").requires
assert sorted(requirements) == sorted(["pytest>=1.5", "iplib>=0.0.1"])
# This would fail if the comments weren't filtered out
project.virtualenv.install_from_list(requirements)
| 34.988235 | 118 | 0.697377 | import os
import common
from inmanta.loader import SourceInfo
from inmanta.module import Project
def test_collect_python_requirements(tmpdir):
common.makeproject(tmpdir, "test-project", deps=[("mod1", ""), ("mod2", "")], imports=["mod1", "mod2"])
project_dir = os.path.join(tmpdir, "test-project")
libs_dir = os.path.join(project_dir, "libs")
common.makemodule(libs_dir, "mod1", project=False)
mod1 = os.path.join(libs_dir, "mod1")
mod1_req_txt = """iplib@git+https://github.com/bartv/python3-iplib
pytest\
>=\
1.5
iplib>=0.0.1
"""
common.add_file(mod1, "requirements.txt", mod1_req_txt, msg="initial commit")
common.makemodule(libs_dir, "mod2", project=False)
mod2 = os.path.join(libs_dir, "mod2")
mod2_req_txt = """# A comment
dummy-yummy # A comment
# Another comment
"""
common.add_file(mod2, "requirements.txt", mod2_req_txt, msg="initial commit")
project = Project(project_dir, venv_path=os.path.join(project_dir, ".env"))
Project.set(project)
project.load_module("mod1", allow_v1=True)
project.load_module("mod2", allow_v1=True)
reqs = project.collect_python_requirements()
expected_reqs = ["iplib@git+https://github.com/bartv/python3-iplib", "pytest>=1.5", "iplib>=0.0.1", "dummy-yummy"]
assert sorted(reqs) == sorted(expected_reqs)
def test_requirements_from_source_info(tmpdir):
common.makeproject(tmpdir, "test-project", deps=[("mod1", "")], imports=["mod1"])
project_dir = os.path.join(tmpdir, "test-project")
libs_dir = os.path.join(project_dir, "libs")
common.makemodule(libs_dir, "mod1", project=False)
mod1 = os.path.join(libs_dir, "mod1")
mod1_req_txt = """# I'm a comment
pytest\
>=\
1.5
iplib>=0.0.1
"""
common.add_file(mod1, "requirements.txt", mod1_req_txt, msg="initial commit")
project = Project(project_dir, venv_path=os.path.join(project_dir, ".env"))
Project.set(project)
project.load_module("mod1", allow_v1=True)
requirements = SourceInfo(mod1, "inmanta_plugins.mod1").requires
assert sorted(requirements) == sorted(["pytest>=1.5", "iplib>=0.0.1"])
# This would fail if the comments weren't filtered out
project.virtualenv.install_from_list(requirements)
| true | true |
790042efbcdec0a389e086edaa634f05971d0edf | 173 | py | Python | Mundo 1/Ex003 - soma.py | FelipeDreissig/Prog-em-Py---CursoEmVideo | 59a85e228b4c7bc0738d1a213e71b0f7fb07d03a | [
"MIT"
] | null | null | null | Mundo 1/Ex003 - soma.py | FelipeDreissig/Prog-em-Py---CursoEmVideo | 59a85e228b4c7bc0738d1a213e71b0f7fb07d03a | [
"MIT"
] | null | null | null | Mundo 1/Ex003 - soma.py | FelipeDreissig/Prog-em-Py---CursoEmVideo | 59a85e228b4c7bc0738d1a213e71b0f7fb07d03a | [
"MIT"
] | null | null | null | # Exercício número 3 da lista
n1 = int(input('DIgite um valor:'))
n2 = int(input('Digite outro valor:'))
soma = n1+n2
print('A soma entre {} e {} é {}'.format(n1, n2, soma)) | 34.6 | 55 | 0.641618 |
n1 = int(input('DIgite um valor:'))
n2 = int(input('Digite outro valor:'))
soma = n1+n2
print('A soma entre {} e {} é {}'.format(n1, n2, soma)) | true | true |
79004322e1e6138ed1b408bce60ad1b602813964 | 16,753 | py | Python | python/pyspark/pandas/data_type_ops/base.py | satya323/spark | 4f825aad65f2650343e7cfbef39465ebb4e403b6 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-12-11T08:54:45.000Z | 2021-12-11T08:54:45.000Z | python/pyspark/pandas/data_type_ops/base.py | satya323/spark | 4f825aad65f2650343e7cfbef39465ebb4e403b6 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-11-15T04:24:15.000Z | 2020-11-15T04:31:22.000Z | python/pyspark/pandas/data_type_ops/base.py | satya323/spark | 4f825aad65f2650343e7cfbef39465ebb4e403b6 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2021-12-11T06:25:34.000Z | 2022-01-06T07:22:30.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
DecimalType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
TimestampNTZType,
UserDefinedType,
)
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.typedef.typehints import (
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
spark_type_to_pandas_dtype,
)
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
"""Check whether the `operand` is valid for arithmetic operations against numerics."""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
def transform_boolean_operand_to_numeric(
operand: Any, *, spark_type: Optional[DataType] = None
) -> Any:
"""Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is.
"""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
assert isinstance(spark_type, NumericType), "spark_type must be NumericType"
dtype = spark_type_to_pandas_dtype(
spark_type, use_extension_dtypes=operand._internal.data_fields[0].is_extension_dtype
)
return operand._with_new_scol(
operand.spark.column.cast(spark_type),
field=operand._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type),
)
elif isinstance(operand, bool):
return int(operand)
else:
return operand
def _as_categorical_type(
index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`."""
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = SF.lit(-1)
else:
kvs = chain(
*[(SF.lit(category), SF.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol[index_ops.spark.column], SF.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""Cast `index_ops` to BooleanType Spark type, given `dtype`."""
spark_type = BooleanType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(
index_ops.spark.column.cast(spark_type)
)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_string_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], *, null_str: str = str(None)
) -> IndexOpsLike:
"""Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column. Note that `null_str` is for non-extension dtypes only.
"""
spark_type = StringType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
casted = index_ops.spark.column.cast(spark_type)
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_other_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.
"""
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(scol, field=InternalField(dtype=dtype))
def _sanitize_list_like(operand: Any) -> None:
"""Raise TypeError if operand is list-like."""
if isinstance(operand, (list, tuple, dict, set)):
raise TypeError("The operation can not be applied to %s." % type(operand).__name__)
def _is_valid_for_logical_operator(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, (int, bool)) or (
isinstance(right, IndexOpsMixin)
and (
isinstance(right.spark.data_type, BooleanType)
or isinstance(right.spark.data_type, IntegralType)
)
)
def _is_boolean_type(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, bool) or (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType)
)
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps, DatetimeNTZOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import (
DecimalOps,
FractionalExtensionOps,
FractionalOps,
IntegralExtensionOps,
IntegralOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps, StringExtensionOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, DecimalType):
return object.__new__(DecimalOps)
elif isinstance(spark_type, FractionalType):
if extension_float_dtypes_available and type(dtype) in [Float32Dtype, Float64Dtype]:
return object.__new__(FractionalExtensionOps)
else:
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
if extension_dtypes_available and type(dtype) in [
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
]:
return object.__new__(IntegralExtensionOps)
else:
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
if extension_object_dtypes_available and isinstance(dtype, StringDtype):
return object.__new__(StringExtensionOps)
else:
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, TimestampNTZType):
return object.__new__(DatetimeNTZOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def xor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise xor can not be applied to %s." % self.pretty_name)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__and__(right)
def rxor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left ^ right
def ror(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__or__(right)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary - can not be applied to %s." % self.pretty_name)
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("abs() can not be applied to %s." % self.pretty_name)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def eq(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__eq__)(left, right)
def ne(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__ne__)(left, right)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.replace({np.nan: None})
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def nan_to_null(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops.copy()
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
raise TypeError("astype can not be applied to %s." % self.pretty_name)
| 40.271635 | 100 | 0.685429 |
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
DecimalType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
TimestampNTZType,
UserDefinedType,
)
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.typedef.typehints import (
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
spark_type_to_pandas_dtype,
)
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
def transform_boolean_operand_to_numeric(
operand: Any, *, spark_type: Optional[DataType] = None
) -> Any:
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
assert isinstance(spark_type, NumericType), "spark_type must be NumericType"
dtype = spark_type_to_pandas_dtype(
spark_type, use_extension_dtypes=operand._internal.data_fields[0].is_extension_dtype
)
return operand._with_new_scol(
operand.spark.column.cast(spark_type),
field=operand._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type),
)
elif isinstance(operand, bool):
return int(operand)
else:
return operand
def _as_categorical_type(
index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType
) -> IndexOpsLike:
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = SF.lit(-1)
else:
kvs = chain(
*[(SF.lit(category), SF.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol[index_ops.spark.column], SF.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
spark_type = BooleanType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(
index_ops.spark.column.cast(spark_type)
)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_string_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], *, null_str: str = str(None)
) -> IndexOpsLike:
spark_type = StringType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
casted = index_ops.spark.column.cast(spark_type)
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_other_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], spark_type: DataType
) -> IndexOpsLike:
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(scol, field=InternalField(dtype=dtype))
def _sanitize_list_like(operand: Any) -> None:
if isinstance(operand, (list, tuple, dict, set)):
raise TypeError("The operation can not be applied to %s." % type(operand).__name__)
def _is_valid_for_logical_operator(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, (int, bool)) or (
isinstance(right, IndexOpsMixin)
and (
isinstance(right.spark.data_type, BooleanType)
or isinstance(right.spark.data_type, IntegralType)
)
)
def _is_boolean_type(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, bool) or (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType)
)
class DataTypeOps(object, metaclass=ABCMeta):
def __new__(cls, dtype: Dtype, spark_type: DataType) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps, DatetimeNTZOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import (
DecimalOps,
FractionalExtensionOps,
FractionalOps,
IntegralExtensionOps,
IntegralOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps, StringExtensionOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, DecimalType):
return object.__new__(DecimalOps)
elif isinstance(spark_type, FractionalType):
if extension_float_dtypes_available and type(dtype) in [Float32Dtype, Float64Dtype]:
return object.__new__(FractionalExtensionOps)
else:
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
if extension_dtypes_available and type(dtype) in [
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
]:
return object.__new__(IntegralExtensionOps)
else:
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
if extension_object_dtypes_available and isinstance(dtype, StringDtype):
return object.__new__(StringExtensionOps)
else:
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, TimestampNTZType):
return object.__new__(DatetimeNTZOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def xor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise xor can not be applied to %s." % self.pretty_name)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__and__(right)
def rxor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left ^ right
def ror(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__or__(right)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary - can not be applied to %s." % self.pretty_name)
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("abs() can not be applied to %s." % self.pretty_name)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def eq(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__eq__)(left, right)
def ne(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__ne__)(left, right)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def restore(self, col: pd.Series) -> pd.Series:
return col
def prepare(self, col: pd.Series) -> pd.Series:
return col.replace({np.nan: None})
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def nan_to_null(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops.copy()
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
raise TypeError("astype can not be applied to %s." % self.pretty_name)
| true | true |
790043b9b1a0c584ef9b0ee96ef901ad6a6ac26b | 707 | py | Python | user/migrations/0018_loginrequest.py | EncryptEx/myhackupc | 3b7c8bce8528e61aab65c976a3c9b4a700210c09 | [
"MIT"
] | 8 | 2017-11-20T09:11:37.000Z | 2020-01-26T19:23:33.000Z | user/migrations/0018_loginrequest.py | EncryptEx/myhackupc | 3b7c8bce8528e61aab65c976a3c9b4a700210c09 | [
"MIT"
] | 38 | 2018-07-11T08:03:43.000Z | 2019-10-22T09:26:36.000Z | user/migrations/0018_loginrequest.py | EncryptEx/myhackupc | 3b7c8bce8528e61aab65c976a3c9b4a700210c09 | [
"MIT"
] | 6 | 2019-01-21T18:19:17.000Z | 2020-03-09T17:42:36.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2021-10-02 20:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0017_user_email_subscribed'),
]
operations = [
migrations.CreateModel(
name='LoginRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=30)),
('latestRequest', models.DateTimeField()),
('login_tries', models.IntegerField(default=1)),
],
),
]
| 28.28 | 114 | 0.591231 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0017_user_email_subscribed'),
]
operations = [
migrations.CreateModel(
name='LoginRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=30)),
('latestRequest', models.DateTimeField()),
('login_tries', models.IntegerField(default=1)),
],
),
]
| true | true |
790044a8f078462e7191d4ab97ec292a566ea10a | 1,058 | py | Python | nnunet/utilities/file_endings.py | anxingle/nnUNet_simple | 9c69bc5a005d5305b27d6d214dc16ac25c4ead76 | [
"Apache-2.0"
] | null | null | null | nnunet/utilities/file_endings.py | anxingle/nnUNet_simple | 9c69bc5a005d5305b27d6d214dc16ac25c4ead76 | [
"Apache-2.0"
] | null | null | null | nnunet/utilities/file_endings.py | anxingle/nnUNet_simple | 9c69bc5a005d5305b27d6d214dc16ac25c4ead76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
def remove_trailing_slash(filename: str):
while filename.endswith('/'):
filename = filename[:-1]
return filename
def maybe_add_0000_to_all_niigz(folder):
nii_gz = subfiles(folder, suffix='.nii.gz')
for n in nii_gz:
n = remove_trailing_slash(n)
if not n.endswith('_0000.nii.gz'):
os.rename(n, n[:-7] + '_0000.nii.gz')
| 34.129032 | 111 | 0.757089 |
from batchgenerators.utilities.file_and_folder_operations import *
def remove_trailing_slash(filename: str):
while filename.endswith('/'):
filename = filename[:-1]
return filename
def maybe_add_0000_to_all_niigz(folder):
nii_gz = subfiles(folder, suffix='.nii.gz')
for n in nii_gz:
n = remove_trailing_slash(n)
if not n.endswith('_0000.nii.gz'):
os.rename(n, n[:-7] + '_0000.nii.gz')
| true | true |
790044a9bd42f2aa1aad794030f8540d3b92b393 | 5,737 | py | Python | settings/production.py | CoronaCircle/coronacircles | 66963d178fe5ebd400e5f9403730ae0f8be4fb4d | [
"MIT"
] | null | null | null | settings/production.py | CoronaCircle/coronacircles | 66963d178fe5ebd400e5f9403730ae0f8be4fb4d | [
"MIT"
] | null | null | null | settings/production.py | CoronaCircle/coronacircles | 66963d178fe5ebd400e5f9403730ae0f8be4fb4d | [
"MIT"
] | null | null | null | from .base import * # noqa pylint: disable=wildcard-import, unused-wildcard-import
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
SECRET_KEY = env("DJANGO_SECRET_KEY")
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["coronacircles.de"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# CACHES = {
# 'default': {
# 'BACKEND': 'django_redis.cache.RedisCache',
# 'LOCATION': env('REDIS_URL'),
# 'OPTIONS': {
# 'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# 'IGNORE_EXCEPTIONS': True,
# }
# }
# }
# SECURITY
# ------------------------------------------------------------------------------
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
# set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = env("DJANGO_SECURE_HSTS_SECONDS", default="60")
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = "DENY"
# STORAGES
# ------------------------------------------------------------------------------
# INSTALLED_APPS += ["storages"] # noqa F405
# AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# AWS_AUTO_CREATE_BUCKET = False
# AWS_QUERYSTRING_AUTH = False
# _AWS_EXPIRY = 60 * 60 * 24 * 7
# AWS_S3_OBJECT_PARAMETERS = {
# "CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
# }
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
# DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="CoronaCircles <contact@coronacircles.net>",
)
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default="[Coronacircles]")
EMAIL_HOST = env("DJANGO_EMAIL_HOST", default="localhost")
EMAIL_HOST_USER = env("DJANGO_EMAIL_HOST_USER", default="")
EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD", default="")
EMAIL_PORT = env("DJANGO_EMAIL_PORT", default="465")
EMAIL_USE_SSL = env.bool("DJANGO_EMAIL_USE_SSL", default=False)
EMAIL_USE_TLS = env.bool("DJANGO_EMAIL_USE_TLS", default=False)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
# ADMIN_URL = env("DJANGO_ADMIN_URL") # no admin in use here
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ["gunicorn"] # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins bon every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] "
"pathname=%(pathname)s lineno=%(lineno)s "
"funcname=%(funcName)s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"django.request": {
"handlers": ["console", "mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
} | 37.496732 | 87 | 0.569984 | from .base import *
from .base import env
SECRET_KEY = env("DJANGO_SECRET_KEY")
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["coronacircles.de"])
DATABASES["default"] = env.db("DATABASE_URL")
DATABASES["default"]["ATOMIC_REQUESTS"] = True
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60)
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SECURE_HSTS_SECONDS = env("DJANGO_SECURE_HSTS_SECONDS", default="60")
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = "DENY"
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
[0]["OPTIONS"]["loaders"] = [
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="CoronaCircles <contact@coronacircles.net>",
)
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default="[Coronacircles]")
EMAIL_HOST = env("DJANGO_EMAIL_HOST", default="localhost")
EMAIL_HOST_USER = env("DJANGO_EMAIL_HOST_USER", default="")
EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD", default="")
EMAIL_PORT = env("DJANGO_EMAIL_PORT", default="465")
EMAIL_USE_SSL = env.bool("DJANGO_EMAIL_USE_SSL", default=False)
EMAIL_USE_TLS = env.bool("DJANGO_EMAIL_USE_TLS", default=False)
["gunicorn"]
GGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] "
"pathname=%(pathname)s lineno=%(lineno)s "
"funcname=%(funcName)s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"django.request": {
"handlers": ["console", "mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
} | true | true |
790044ba1e8e05bb8ad573572e1ca05fdf6a418b | 24,067 | py | Python | graphene_django/tests/test_views.py | joerhodes3/graphene-django | 99892eba853bf060d25a4314c9db3ad28949c824 | [
"MIT"
] | null | null | null | graphene_django/tests/test_views.py | joerhodes3/graphene-django | 99892eba853bf060d25a4314c9db3ad28949c824 | [
"MIT"
] | null | null | null | graphene_django/tests/test_views.py | joerhodes3/graphene-django | 99892eba853bf060d25a4314c9db3ad28949c824 | [
"MIT"
] | null | null | null | import json
import pytest
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
def url_string(string="/graphql", **url_params):
if url_params:
string += "?" + urlencode(url_params)
return string
def batch_url_string(**url_params):
return url_string("/graphql/batch", **url_params)
j = lambda **kwargs: json.dumps(kwargs)
jl = lambda **kwargs: json.dumps([kwargs])
@pytest.mark.django_db
def test_graphiql_is_enabled(client):
from django.conf import settings
response = client.get(url_string(), HTTP_ACCEPT="text/html")
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "text/html"
@pytest.mark.django_db
def test_qfactor_graphiql(client):
response = client.get(url_string(query="{test}", HTTP_ACCEPT="text/html",))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "text/html"
@pytest.mark.django_db
def test_qfactor_json(client):
response = client.get(url_string(query="{test}", HTTP_ACCEPT="application/json",))
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_query_param(client):
response = client.get(url_string(query="{test}"))
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_variable_values(client):
response = client.get(
url_string(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
HTTP_ACCEPT="application/json",
)
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_operation_name(client):
response = client.get(
url_string(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_reports_validation_errors(client):
response = client.get(url_string(query="{ test, unknownOne, unknownTwo }"))
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{
"message": 'Cannot query field "unknownOne" on type "QueryRoot".',
"locations": [{"line": 1, "column": 9}],
},
{
"message": 'Cannot query field "unknownTwo" on type "QueryRoot".',
"locations": [{"line": 1, "column": 21}],
},
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_missing_operation_name(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{
"message": "Must provide operation name if query contains multiple operations."
}
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_sending_a_mutation_via_get(client):
response = client.get(
url_string(
query="""
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status_code == 405
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."}
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_selecting_a_mutation_within_a_get(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestMutation",
)
)
assert response.status_code == 405
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."}
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_mutation_to_exist_within_a_get(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestQuery",
)
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_json_encoding(client):
response = client.post(url_string(), j(query="{test}"), "application/json")
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_allows_post_with_json_encoding(client):
response = client.post(
batch_url_string(), jl(id=1, query="{test}"), "application/json"
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
# returns just json as __dict__
expected_dict = [{"id": 1, "data": {"test": "Hello World"}, "status": 200}]
# directly compare all key,value for __dict__ -- NOTE responce is list of stuff!
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_fails_if_is_empty(client):
response = client.post(batch_url_string(), "[]", "application/json")
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "Received an empty list in the batch request."}]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_sending_a_mutation_via_post(client):
response = client.post(
url_string(),
j(query="mutation TestMutation { writeTest { test } }"),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"writeTest": {"test": "Hello World"}}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_url_encoding(client):
response = client.post(
url_string(),
urlencode(dict(query="{test}")),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_query_with_string_variables(client):
response = client.post(
url_string(),
j(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_supports_post_json_query_with_string_variables(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [{"id": 1, "data": {"test": "Hello Dolly"}, "status": 200}]
# directly compare all key,value for __dict__ -- NOTE responce is list of stuff!
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_query_with_json_variables(client):
response = client.post(
url_string(),
j(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_supports_post_json_query_with_json_variables(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [{"id": 1, "data": {"test": "Hello Dolly"}, "status": 200}]
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_url_encoded_query_with_string_variables(client):
response = client.post(
url_string(),
urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_quey_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
j(query="query helloWho($who: String){ test(who: $who) }"),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_post_url_encoded_query_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
urlencode(dict(query="query helloWho($who: String){ test(who: $who) }")),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_raw_text_query_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
"query helloWho($who: String){ test(who: $who) }",
"application/graphql",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_operation_name(client):
response = client.post(
url_string(),
j(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_allows_post_with_operation_name(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
"application/json",
)
assert response.status_code == 200
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [
{
"id": 1,
"data": {"test": "Hello World", "shared": "Hello Everyone"},
"status": 200,
}
]
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_get_operation_name(client):
response = client.post(
url_string(operationName="helloWorld"),
"""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
"application/graphql",
)
assert response.status_code == 200
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
# inherited/ ???
"""
@pytest.mark.django_db
@pytest.mark.urls("graphene_django.tests.urls_inherited")
def test_inherited_class_with_attributes_works(client):
inherited_url = "/graphql/inherited/"
# Check schema and pretty attributes work
response = client.post(url_string(inherited_url, query="{test}"))
assert response.status_code == 200
# returns just json as list of __dict__
expected_dict = (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
# directly compare all key,value for __dict__
assert response.json() == expected_dict
# Check graphiql works
response = client.get(url_string(inherited_url), HTTP_ACCEPT="text/html")
assert response.status_code == 200
"""
@pytest.mark.django_db
def test_handles_field_errors_caught_by_graphql(client):
response = client.get(url_string(query="{thrower}"))
assert response.status_code == 200
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"data": None,
"errors": [
{
"locations": [{"column": 2, "line": 1}],
"path": ["thrower"],
"message": "Throws!",
}
],
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_syntax_errors_caught_by_graphql(client):
response = client.get(url_string(query="syntaxerror"))
assert response.status_code == 400
# returns just json as list of __dict__
expected_dict = {
"errors": [
{
"locations": [{"column": 1, "line": 1}],
"message": "Syntax Error GraphQL (1:1) "
'Unexpected Name "syntaxerror"\n\n1: syntaxerror\n ^\n',
}
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_errors_caused_by_a_lack_of_query(client):
response = client.get(url_string())
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Must provide query string."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_not_expected_json_bodies(client):
response = client.post(url_string(), "[]", "application/json")
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "The received data is not a valid JSON query."}]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_invalid_json_bodies(client):
response = client.post(url_string(), "[oh}", "application/json")
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "POST body sent invalid JSON."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_django_request_error(client, monkeypatch):
def mocked_read(*args):
raise IOError("foo-bar")
monkeypatch.setattr("django.http.request.HttpRequest.read", mocked_read)
valid_json = json.dumps(dict(foo="bar"))
response = client.post(url_string(), valid_json, "application/json")
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "foo-bar"}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_plain_post_text(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
"query helloWho($who: String){ test(who: $who) }",
"text/plain",
)
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Must provide query string."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_poorly_formed_variables(client):
response = client.get(
url_string(
query="query helloWho($who: String){ test(who: $who) }", variables="who:You"
)
)
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Variables are invalid JSON."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_unsupported_http_methods(client):
response = client.put(url_string(query="{test}"))
assert response.status_code == 405
assert response["Allow"] == "GET, POST"
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "GraphQL only supports GET and POST requests."}]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_incomplete_json_bodies(client):
response = client.post(url_string(), '{"query":', "application/json")
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "POST body sent invalid JSON."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_passes_request_into_context_request(client):
response = client.get(url_string(query="{request}", q="testing"))
assert response.status_code == 200
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"request": "testing"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
# pretty() -- comparing as string
@pytest.mark.django_db
@pytest.mark.urls("graphene_django.tests.urls_pretty")
def test_supports_pretty_printing(client):
response = client.get(url_string(query="{test}"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
assert response.content.decode() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
@pytest.mark.django_db
def test_supports_pretty_printing_by_request(client):
response = client.get(url_string(query="{test}", pretty="1"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
assert response.content.decode() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
# GraphQL SPEC:
# TODO: more mutations and somesucriptions
# TODO: fragment
# TODO: META __typename
# Additions:
# META AUTH
# ?not working? CDN not static/ for DEBUG
| 32.788828 | 95 | 0.640711 | import json
import pytest
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
def url_string(string="/graphql", **url_params):
if url_params:
string += "?" + urlencode(url_params)
return string
def batch_url_string(**url_params):
return url_string("/graphql/batch", **url_params)
j = lambda **kwargs: json.dumps(kwargs)
jl = lambda **kwargs: json.dumps([kwargs])
@pytest.mark.django_db
def test_graphiql_is_enabled(client):
from django.conf import settings
response = client.get(url_string(), HTTP_ACCEPT="text/html")
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "text/html"
@pytest.mark.django_db
def test_qfactor_graphiql(client):
response = client.get(url_string(query="{test}", HTTP_ACCEPT="text/html",))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "text/html"
@pytest.mark.django_db
def test_qfactor_json(client):
response = client.get(url_string(query="{test}", HTTP_ACCEPT="application/json",))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_query_param(client):
response = client.get(url_string(query="{test}"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_variable_values(client):
response = client.get(
url_string(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
HTTP_ACCEPT="application/json",
)
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_operation_name(client):
response = client.get(
url_string(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_reports_validation_errors(client):
response = client.get(url_string(query="{ test, unknownOne, unknownTwo }"))
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{
"message": 'Cannot query field "unknownOne" on type "QueryRoot".',
"locations": [{"line": 1, "column": 9}],
},
{
"message": 'Cannot query field "unknownTwo" on type "QueryRoot".',
"locations": [{"line": 1, "column": 21}],
},
]
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_missing_operation_name(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{
"message": "Must provide operation name if query contains multiple operations."
}
]
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_sending_a_mutation_via_get(client):
response = client.get(
url_string(
query="""
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status_code == 405
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."}
]
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_selecting_a_mutation_within_a_get(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestMutation",
)
)
assert response.status_code == 405
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."}
]
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_mutation_to_exist_within_a_get(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestQuery",
)
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_json_encoding(client):
response = client.post(url_string(), j(query="{test}"), "application/json")
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_allows_post_with_json_encoding(client):
response = client.post(
batch_url_string(), jl(id=1, query="{test}"), "application/json"
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [{"id": 1, "data": {"test": "Hello World"}, "status": 200}]
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_fails_if_is_empty(client):
response = client.post(batch_url_string(), "[]", "application/json")
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "Received an empty list in the batch request."}]
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_sending_a_mutation_via_post(client):
response = client.post(
url_string(),
j(query="mutation TestMutation { writeTest { test } }"),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"writeTest": {"test": "Hello World"}}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_url_encoding(client):
response = client.post(
url_string(),
urlencode(dict(query="{test}")),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_query_with_string_variables(client):
response = client.post(
url_string(),
j(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_supports_post_json_query_with_string_variables(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [{"id": 1, "data": {"test": "Hello Dolly"}, "status": 200}]
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_query_with_json_variables(client):
response = client.post(
url_string(),
j(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_supports_post_json_query_with_json_variables(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [{"id": 1, "data": {"test": "Hello Dolly"}, "status": 200}]
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_url_encoded_query_with_string_variables(client):
response = client.post(
url_string(),
urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_quey_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
j(query="query helloWho($who: String){ test(who: $who) }"),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_post_url_encoded_query_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
urlencode(dict(query="query helloWho($who: String){ test(who: $who) }")),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_raw_text_query_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
"query helloWho($who: String){ test(who: $who) }",
"application/graphql",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_operation_name(client):
response = client.post(
url_string(),
j(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_allows_post_with_operation_name(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [
{
"id": 1,
"data": {"test": "Hello World", "shared": "Hello Everyone"},
"status": 200,
}
]
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_get_operation_name(client):
response = client.post(
url_string(operationName="helloWorld"),
"""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
"application/graphql",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_field_errors_caught_by_graphql(client):
response = client.get(url_string(query="{thrower}"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"data": None,
"errors": [
{
"locations": [{"column": 2, "line": 1}],
"path": ["thrower"],
"message": "Throws!",
}
],
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_syntax_errors_caught_by_graphql(client):
response = client.get(url_string(query="syntaxerror"))
assert response.status_code == 400
expected_dict = {
"errors": [
{
"locations": [{"column": 1, "line": 1}],
"message": "Syntax Error GraphQL (1:1) "
'Unexpected Name "syntaxerror"\n\n1: syntaxerror\n ^\n',
}
]
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_errors_caused_by_a_lack_of_query(client):
response = client.get(url_string())
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Must provide query string."}]}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_not_expected_json_bodies(client):
response = client.post(url_string(), "[]", "application/json")
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "The received data is not a valid JSON query."}]
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_invalid_json_bodies(client):
response = client.post(url_string(), "[oh}", "application/json")
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "POST body sent invalid JSON."}]}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_django_request_error(client, monkeypatch):
def mocked_read(*args):
raise IOError("foo-bar")
monkeypatch.setattr("django.http.request.HttpRequest.read", mocked_read)
valid_json = json.dumps(dict(foo="bar"))
response = client.post(url_string(), valid_json, "application/json")
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "foo-bar"}]}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_plain_post_text(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
"query helloWho($who: String){ test(who: $who) }",
"text/plain",
)
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Must provide query string."}]}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_poorly_formed_variables(client):
response = client.get(
url_string(
query="query helloWho($who: String){ test(who: $who) }", variables="who:You"
)
)
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Variables are invalid JSON."}]}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_unsupported_http_methods(client):
response = client.put(url_string(query="{test}"))
assert response.status_code == 405
assert response["Allow"] == "GET, POST"
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "GraphQL only supports GET and POST requests."}]
}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_incomplete_json_bodies(client):
response = client.post(url_string(), '{"query":', "application/json")
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "POST body sent invalid JSON."}]}
assert response.json() == expected_dict
@pytest.mark.django_db
def test_passes_request_into_context_request(client):
response = client.get(url_string(query="{request}", q="testing"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"request": "testing"}}
assert response.json() == expected_dict
@pytest.mark.django_db
@pytest.mark.urls("graphene_django.tests.urls_pretty")
def test_supports_pretty_printing(client):
response = client.get(url_string(query="{test}"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
assert response.content.decode() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
@pytest.mark.django_db
def test_supports_pretty_printing_by_request(client):
response = client.get(url_string(query="{test}", pretty="1"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
assert response.content.decode() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
| true | true |
790044f9018ccdaa1b1f66221dd74eee86b09efc | 1,480 | py | Python | elabjournal/elabjournal/SampleSerie.py | matthijsbrouwer/elabjournal-python | 4063b01993f0bf17ea2857009c1bedc5ace8b87b | [
"Apache-2.0"
] | 2 | 2021-06-29T11:17:27.000Z | 2022-01-11T18:41:49.000Z | elabjournal/elabjournal/SampleSerie.py | matthijsbrouwer/elabjournal-python | 4063b01993f0bf17ea2857009c1bedc5ace8b87b | [
"Apache-2.0"
] | null | null | null | elabjournal/elabjournal/SampleSerie.py | matthijsbrouwer/elabjournal-python | 4063b01993f0bf17ea2857009c1bedc5ace8b87b | [
"Apache-2.0"
] | 1 | 2019-06-06T13:23:11.000Z | 2019-06-06T13:23:11.000Z | from .eLABJournalObject import *
import json
import pandas as pd
import numbers
class SampleSerie(eLABJournalObject):
def __init__(self, api, data):
"""
Internal use only: initialize sample serie
"""
if ((data is not None) & (type(data) == dict) &
("name" in data.keys())
):
super().__init__(api, data, "seriesID", str(data["name"]))
else:
raise Exception("no (valid) sampleSerie data")
def barcode(self):
"""
Get the barcode.
"""
if "barcode" in self.data():
barcode = self.data()["barcode"]
return(barcode)
return None
def samples(self):
"""
Get a dict with the samples for this sample serie.
The sampleID is used as a key, the value is a sample object.
"""
sample_list = []
if "samples" in self.data():
samplesData = self.data()["samples"]
if isinstance(samplesData, list):
for sampleItem in samplesData:
if isinstance(sampleItem,dict) & ("sampleID" in sampleItem):
sample_list.append(sampleItem["sampleID"])
elif isinstance(sampleItem,numbers.Integral) | isinstance(sampleItem,str):
sample_list.append(sampleItem)
return(self._eLABJournalObject__api.sample(sample_list))
| 32.888889 | 94 | 0.538514 | from .eLABJournalObject import *
import json
import pandas as pd
import numbers
class SampleSerie(eLABJournalObject):
def __init__(self, api, data):
if ((data is not None) & (type(data) == dict) &
("name" in data.keys())
):
super().__init__(api, data, "seriesID", str(data["name"]))
else:
raise Exception("no (valid) sampleSerie data")
def barcode(self):
if "barcode" in self.data():
barcode = self.data()["barcode"]
return(barcode)
return None
def samples(self):
sample_list = []
if "samples" in self.data():
samplesData = self.data()["samples"]
if isinstance(samplesData, list):
for sampleItem in samplesData:
if isinstance(sampleItem,dict) & ("sampleID" in sampleItem):
sample_list.append(sampleItem["sampleID"])
elif isinstance(sampleItem,numbers.Integral) | isinstance(sampleItem,str):
sample_list.append(sampleItem)
return(self._eLABJournalObject__api.sample(sample_list))
| true | true |
790045b9940a233b7fe5b3ea902b024bfb745fc8 | 18 | py | Python | lemons/__init__.py | jakebrehm/ezpz | 42d539bc37aa0c3789030ab4a1cae960d56bd5ac | [
"MIT"
] | null | null | null | lemons/__init__.py | jakebrehm/ezpz | 42d539bc37aa0c3789030ab4a1cae960d56bd5ac | [
"MIT"
] | null | null | null | lemons/__init__.py | jakebrehm/ezpz | 42d539bc37aa0c3789030ab4a1cae960d56bd5ac | [
"MIT"
] | null | null | null | from .gui import * | 18 | 18 | 0.722222 | from .gui import * | true | true |
790045f361b08ad1c9412cfcf108d5f4078232bd | 461 | py | Python | vaas-app/src/vaas/manager/migrations/0002_auto_20210225_1216.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 251 | 2015-09-02T10:50:51.000Z | 2022-03-16T08:00:35.000Z | vaas-app/src/vaas/manager/migrations/0002_auto_20210225_1216.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 154 | 2015-09-02T14:54:08.000Z | 2022-03-16T08:34:17.000Z | vaas-app/src/vaas/manager/migrations/0002_auto_20210225_1216.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 31 | 2015-09-03T07:51:05.000Z | 2020-09-24T09:02:40.000Z | # Generated by Django 3.1.5 on 2021-02-25 11:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cluster', '0001_initial'),
('manager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='director',
name='cluster',
field=models.ManyToManyField(related_name='directory', to='cluster.LogicalCluster'),
),
]
| 23.05 | 96 | 0.607375 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cluster', '0001_initial'),
('manager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='director',
name='cluster',
field=models.ManyToManyField(related_name='directory', to='cluster.LogicalCluster'),
),
]
| true | true |
790046b2f5d43a3787516621c1fece4ec644016f | 3,009 | py | Python | tests/test_base.py | AverkinSergei/pyexcel-io | a611a69cf7c2fa75f226b7879aba61bcfdaceda1 | [
"BSD-3-Clause"
] | null | null | null | tests/test_base.py | AverkinSergei/pyexcel-io | a611a69cf7c2fa75f226b7879aba61bcfdaceda1 | [
"BSD-3-Clause"
] | null | null | null | tests/test_base.py | AverkinSergei/pyexcel-io | a611a69cf7c2fa75f226b7879aba61bcfdaceda1 | [
"BSD-3-Clause"
] | 1 | 2019-04-27T04:40:14.000Z | 2019-04-27T04:40:14.000Z | from pyexcel_io.sheet import (
SheetReader, SheetWriter, NamedContent
)
from pyexcel_io.book import BookWriter
from pyexcel_io.utils import is_empty_array
from nose.tools import raises
@raises(NotImplementedError)
def test_book_writer():
book = BookWriter()
book.create_sheet("test")
def test_is_empty_array():
a = ["", "", "", ""]
assert is_empty_array(a) is True
b = [1, "", "", ""]
assert is_empty_array(b) is False
class ArrayReader(SheetReader):
@property
def name(self):
SheetReader.name
return self._native_sheet.name
def number_of_columns(self):
SheetReader.number_of_columns(self)
return len(self._native_sheet.payload[0])
def number_of_rows(self):
SheetReader.number_of_rows(self)
return len(self._native_sheet.payload)
def cell_value(self, row, column):
SheetReader.cell_value(self, row, column)
return self._native_sheet.payload[row][column]
class ArrayWriter(SheetWriter):
def set_sheet_name(self, name):
self._native_sheet.name = name
def write_row(self, array):
self._native_sheet.payload.append(array)
class TestSheetReader:
@raises(NotImplementedError)
def test_abstractness(self):
reader = SheetReader("test")
reader.cell_value(1, 2)
@raises(NotImplementedError)
def test_number_of_columns(self):
reader = SheetReader("test")
reader.number_of_columns()
@raises(NotImplementedError)
def test_number_of_rows(self):
reader = SheetReader("test")
reader.number_of_rows()
def test_to_array(self):
name = "test"
class B(SheetReader):
@property
def name(self):
return self._native_sheet
def to_array(self):
pass
b = B(name)
b.to_array()
assert b.name == name
class TestSheetWriter:
@raises(NotImplementedError)
def test_abstractness(self):
writer = SheetWriter("te", "st", "abstract")
writer.write_row([])
def test_inheritance(self):
class D(SheetWriter):
def write_row(self, row):
pass
d = D('t', 'e', 's')
d.write_row([11, 11])
def test_writer(self):
native_sheet = NamedContent("test", [])
content = [
[1, 2],
[3, 4],
[5, 6]
]
writer = ArrayWriter(None, native_sheet, "test")
writer.write_row(content[0])
writer.write_array(content[1:])
assert native_sheet.payload == content
def test_writer2(self):
native_sheet = NamedContent("test", [])
content = [
[1, 2],
[3, 4],
[5, 6]
]
writer = ArrayWriter(None, native_sheet, None)
writer.write_row(content[0])
writer.write_array(content[1:])
assert native_sheet.payload == content
assert native_sheet.name == "pyexcel_sheet1"
| 25.075 | 56 | 0.606846 | from pyexcel_io.sheet import (
SheetReader, SheetWriter, NamedContent
)
from pyexcel_io.book import BookWriter
from pyexcel_io.utils import is_empty_array
from nose.tools import raises
@raises(NotImplementedError)
def test_book_writer():
book = BookWriter()
book.create_sheet("test")
def test_is_empty_array():
a = ["", "", "", ""]
assert is_empty_array(a) is True
b = [1, "", "", ""]
assert is_empty_array(b) is False
class ArrayReader(SheetReader):
@property
def name(self):
SheetReader.name
return self._native_sheet.name
def number_of_columns(self):
SheetReader.number_of_columns(self)
return len(self._native_sheet.payload[0])
def number_of_rows(self):
SheetReader.number_of_rows(self)
return len(self._native_sheet.payload)
def cell_value(self, row, column):
SheetReader.cell_value(self, row, column)
return self._native_sheet.payload[row][column]
class ArrayWriter(SheetWriter):
def set_sheet_name(self, name):
self._native_sheet.name = name
def write_row(self, array):
self._native_sheet.payload.append(array)
class TestSheetReader:
@raises(NotImplementedError)
def test_abstractness(self):
reader = SheetReader("test")
reader.cell_value(1, 2)
@raises(NotImplementedError)
def test_number_of_columns(self):
reader = SheetReader("test")
reader.number_of_columns()
@raises(NotImplementedError)
def test_number_of_rows(self):
reader = SheetReader("test")
reader.number_of_rows()
def test_to_array(self):
name = "test"
class B(SheetReader):
@property
def name(self):
return self._native_sheet
def to_array(self):
pass
b = B(name)
b.to_array()
assert b.name == name
class TestSheetWriter:
@raises(NotImplementedError)
def test_abstractness(self):
writer = SheetWriter("te", "st", "abstract")
writer.write_row([])
def test_inheritance(self):
class D(SheetWriter):
def write_row(self, row):
pass
d = D('t', 'e', 's')
d.write_row([11, 11])
def test_writer(self):
native_sheet = NamedContent("test", [])
content = [
[1, 2],
[3, 4],
[5, 6]
]
writer = ArrayWriter(None, native_sheet, "test")
writer.write_row(content[0])
writer.write_array(content[1:])
assert native_sheet.payload == content
def test_writer2(self):
native_sheet = NamedContent("test", [])
content = [
[1, 2],
[3, 4],
[5, 6]
]
writer = ArrayWriter(None, native_sheet, None)
writer.write_row(content[0])
writer.write_array(content[1:])
assert native_sheet.payload == content
assert native_sheet.name == "pyexcel_sheet1"
| true | true |
790047388c9263b78ed04749687d2019273e54ec | 4,090 | py | Python | tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for linear regression example under TensorFlow eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
def device():
return "/device:GPU:0" if tfe.num_gpus() > 0 else "/device:CPU:0"
class LinearRegressionTest(tf.test.TestCase):
def setUp(self):
super(LinearRegressionTest, self).setUp()
self._tmp_logdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmp_logdir)
super(LinearRegressionTest, self).tearDown()
def testSyntheticDataset(self):
true_w = tf.random_uniform([3, 1])
true_b = [1.0]
batch_size = 10
num_batches = 2
noise_level = 0.
dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level,
batch_size, num_batches)
it = tfe.Iterator(dataset)
for _ in range(2):
(xs, ys) = it.next()
self.assertEqual((batch_size, 3), xs.shape)
self.assertEqual((batch_size, 1), ys.shape)
self.assertEqual(tf.float32, xs.dtype)
self.assertEqual(tf.float32, ys.dtype)
with self.assertRaises(StopIteration):
it.next()
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)
self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)
self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*")))
class EagerLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkEagerLinearRegression(self):
num_epochs = 10
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
burn_in_dataset = dataset.take(10)
model = linear_regression.LinearModel()
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# Perform burn-in.
linear_regression.fit(model, burn_in_dataset, optimizer)
start_time = time.time()
for _ in range(num_epochs):
linear_regression.fit(model, dataset, optimizer)
wall_time = time.time() - start_time
examples_per_sec = num_epochs * num_batches * batch_size / wall_time
self.report_benchmark(
name="eager_train_%s" %
("gpu" if tfe.num_gpus() > 0 else "cpu"),
iters=num_epochs * num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
| 33.52459 | 89 | 0.666504 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
def device():
return "/device:GPU:0" if tfe.num_gpus() > 0 else "/device:CPU:0"
class LinearRegressionTest(tf.test.TestCase):
def setUp(self):
super(LinearRegressionTest, self).setUp()
self._tmp_logdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmp_logdir)
super(LinearRegressionTest, self).tearDown()
def testSyntheticDataset(self):
true_w = tf.random_uniform([3, 1])
true_b = [1.0]
batch_size = 10
num_batches = 2
noise_level = 0.
dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level,
batch_size, num_batches)
it = tfe.Iterator(dataset)
for _ in range(2):
(xs, ys) = it.next()
self.assertEqual((batch_size, 3), xs.shape)
self.assertEqual((batch_size, 1), ys.shape)
self.assertEqual(tf.float32, xs.dtype)
self.assertEqual(tf.float32, ys.dtype)
with self.assertRaises(StopIteration):
it.next()
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)
self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)
self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*")))
class EagerLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkEagerLinearRegression(self):
num_epochs = 10
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
burn_in_dataset = dataset.take(10)
model = linear_regression.LinearModel()
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, burn_in_dataset, optimizer)
start_time = time.time()
for _ in range(num_epochs):
linear_regression.fit(model, dataset, optimizer)
wall_time = time.time() - start_time
examples_per_sec = num_epochs * num_batches * batch_size / wall_time
self.report_benchmark(
name="eager_train_%s" %
("gpu" if tfe.num_gpus() > 0 else "cpu"),
iters=num_epochs * num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
| true | true |
790047895f82a32d171d681dabea6e5076b7abeb | 259 | py | Python | Classroom 4/Buzzer_PWM.py | lakshanthad/Wio_Terminal_Classroom_Ardupy | d97ecb3dad7160ed6df14002b7c1b71a0e111383 | [
"MIT"
] | 3 | 2020-11-01T07:06:41.000Z | 2021-11-04T05:50:31.000Z | Classroom 4/Buzzer_PWM.py | lakshanthad/Wio_Terminal_Classroom_Ardupy | d97ecb3dad7160ed6df14002b7c1b71a0e111383 | [
"MIT"
] | 3 | 2020-10-29T17:13:10.000Z | 2021-02-02T20:11:02.000Z | Classroom 4/Buzzer_PWM.py | lakshanthad/Wio_Terminal_Classroom_Ardupy | d97ecb3dad7160ed6df14002b7c1b71a0e111383 | [
"MIT"
] | null | null | null | from machine import Pin, Map, PWM # include Pin, Map and PWM functions from machine module
import time # include time module
# create PWM on WIO BUZZER with 2000Hz frequency and 250 duty cycle
BUZZER = PWM(Pin(Map.WIO_BUZZER), freq=1000, duty=250)
| 37 | 92 | 0.741313 | from machine import Pin, Map, PWM
import time
BUZZER = PWM(Pin(Map.WIO_BUZZER), freq=1000, duty=250)
| true | true |
7900493737e89ea4e37b3f31f90bdb4a41be0315 | 1,211 | py | Python | src/pkgcore/resolver/util.py | thesamesam/pkgcore | be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf | [
"BSD-3-Clause"
] | null | null | null | src/pkgcore/resolver/util.py | thesamesam/pkgcore | be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf | [
"BSD-3-Clause"
] | null | null | null | src/pkgcore/resolver/util.py | thesamesam/pkgcore | be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf | [
"BSD-3-Clause"
] | null | null | null | __all__ = ("group_attempts", "fails_filter", "reduce_to_failures",)
def group_attempts(sequence, filter_func=None):
if filter_func is None:
filter_func = lambda x:True
last, l = None, []
for x in sequence:
if isinstance(x, tuple) and x[0] == 'inspecting':
if l:
yield last, l
last, l = x[1], []
elif last is not None:
if filter_func(x):
# inline ignored frames
if getattr(x, 'ignored', False):
l.extend(y for y in x.events if filter_func(y))
else:
l.append(x)
if l:
yield last, l
def fails_filter(x):
if not isinstance(x, tuple):
return not x.succeeded
if x[0] == "viable":
return not x[1]
return x[0] != "inspecting"
def reduce_to_failures(frame):
if frame.succeeded:
return []
l = [frame]
for pkg, nodes in group_attempts(frame.events, fails_filter):
l2 = []
for x in nodes:
if not isinstance(x, tuple):
l2.append(reduce_to_failures(x))
else:
l2.append(x)
l.append((pkg, l2))
return l
| 28.162791 | 67 | 0.521883 | __all__ = ("group_attempts", "fails_filter", "reduce_to_failures",)
def group_attempts(sequence, filter_func=None):
if filter_func is None:
filter_func = lambda x:True
last, l = None, []
for x in sequence:
if isinstance(x, tuple) and x[0] == 'inspecting':
if l:
yield last, l
last, l = x[1], []
elif last is not None:
if filter_func(x):
if getattr(x, 'ignored', False):
l.extend(y for y in x.events if filter_func(y))
else:
l.append(x)
if l:
yield last, l
def fails_filter(x):
if not isinstance(x, tuple):
return not x.succeeded
if x[0] == "viable":
return not x[1]
return x[0] != "inspecting"
def reduce_to_failures(frame):
if frame.succeeded:
return []
l = [frame]
for pkg, nodes in group_attempts(frame.events, fails_filter):
l2 = []
for x in nodes:
if not isinstance(x, tuple):
l2.append(reduce_to_failures(x))
else:
l2.append(x)
l.append((pkg, l2))
return l
| true | true |
790049b345b5410136760d5ba8f60212769eb68c | 3,737 | py | Python | Code/tests/python_tests/nebulae_live.py | DaveSeidel/QB_Nebulae_V2 | 4a0218bb6a05e835e74b126729a1c3cd221fc9b5 | [
"MIT"
] | 40 | 2019-12-30T03:44:36.000Z | 2022-02-07T23:09:42.000Z | Code/tests/python_tests/nebulae_live.py | alex-thibodeau/QB_Nebulae_V2 | 34bcf341ea8eddaa9f9ce2e7c2d2438e00e50f54 | [
"MIT"
] | 11 | 2020-03-08T10:22:57.000Z | 2022-03-22T21:18:32.000Z | Code/tests/python_tests/nebulae_live.py | alex-thibodeau/QB_Nebulae_V2 | 34bcf341ea8eddaa9f9ce2e7c2d2438e00e50f54 | [
"MIT"
] | 23 | 2020-01-20T11:12:20.000Z | 2022-03-02T20:39:09.000Z | import csnd6
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
from random import randint, random
import time
# For Directory Searching
import glob
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
class RandomLine(object):
def __init__(self, base, range):
self.curVal = 0.0
self.reset()
self.base = base
self.range = range
def reset(self):
self.dur = randint(256,512)
self.end = random()
self.slope = (self.end - self.curVal) / self.dur
def getValue(self):
self.dur -= 1
if(self.dur < 0):
self.reset()
retVal = self.curVal
self.curVal += self.slope
return self.base + (self.range * retVal)
def createChannel(csound, channelName):
chn = csnd6.CsoundMYFLTArray(1)
csound.GetChannelPtr(chn.GetPtr(), channelName,
csnd6.CSOUND_CONTROL_CHANNEL | csnd6.CSOUND_INPUT_CHANNEL)
return chn
class ChannelUpdater(object):
def __init__(self, csound, channelName, updater):
self.updater = updater
self.channel = createChannel(csound, channelName)
def update(self):
self.channel.SetValue(0, self.updater.getValue())
class InputData(object):
def __init__(self, channel):
self.curVal = 0.0
self.channel = channel
self.mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
def getValue(self):
self.curVal = (((self.mcp.read_adc(self.channel)) / 1023.0) + 0.01) * 4;
return self.curVal
class StoredFiles(object):
def __init__(self):
self.reset()
self.scanfiles()
def reset(self):
self.numFiles = 0
self.files = []
def scanfiles(self):
mypath = "../"
self.files = glob.glob("../*.wav")
###############################
# Our Orchestra for our project
orc = """
sr=44100
ksmps=64
nchnls=2
0dbfs=1
instr 1
ainl, ainr inch 1, 2
outs ainl, ainr
endin"""
inputFiles = StoredFiles()
inputFiles.reset()
inputFiles.scanfiles()
for f in inputFiles.files:
print f
c = csnd6.Csound() # create an instance of Csound
c.SetOption("-iadc")
c.SetOption("-odac") # Set option for Csound
c.SetOption("-b 64")
c.SetOption("-B 128")
c.SetOption("-+rtaudio=alsa") # Set option for Csound
c.SetOption("--realtime")
c.SetOption("--sched")
c.SetOption("-m7") # Set option for Csound
c.CompileOrc(orc) # Compile Orchestra from String
# Set the Instrument to Play for 60 seconds. Change this to infinite later.
sco = "f0 $INF\n" + "i1 0 -10\n"
# Set the ftables based on the files within the specified directory.
#fsco = "f 1 0 0 1 \"" + inputFiles.files[0] + "\" 0 0 0\n" #sco = isco + fsco
c.ReadScore(sco) # Read in Score generated from notes
c.Start() # When compiling from strings, this call is necessary before doing any performing
# Create a set of ChannelUpdaters
#channels = [ChannelUpdater(c, "amp", RandomLine(-2.0, 2.0)),
# ChannelUpdater(c, "freq", RandomLine(0.6, 8.0)),
# ChannelUpdater(c, "resonance", RandomLine(0.4, .3))]
#freq_ctrl = InputData(0)
#amp_ctrl = InputData(1)
#res_ctrl = InputData(2)
freq_ctrl = InputData(1)
amp_ctrl = InputData(0)
res_ctrl = RandomLine(0.6, 8.0)
channels = [ChannelUpdater(c, "amp", freq_ctrl),
ChannelUpdater(c, "freq", amp_ctrl),
ChannelUpdater(c, "resonance", res_ctrl)]
# Initialize all Channel Values
for chn in channels:
chn.update()
while (c.PerformKsmps() == 0):
for chn in channels: # update all channel values
chn.update()
c.Stop()
| 26.692857 | 104 | 0.628579 | import csnd6
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
from random import randint, random
import time
import glob
SPI_PORT = 0
SPI_DEVICE = 0
class RandomLine(object):
def __init__(self, base, range):
self.curVal = 0.0
self.reset()
self.base = base
self.range = range
def reset(self):
self.dur = randint(256,512)
self.end = random()
self.slope = (self.end - self.curVal) / self.dur
def getValue(self):
self.dur -= 1
if(self.dur < 0):
self.reset()
retVal = self.curVal
self.curVal += self.slope
return self.base + (self.range * retVal)
def createChannel(csound, channelName):
chn = csnd6.CsoundMYFLTArray(1)
csound.GetChannelPtr(chn.GetPtr(), channelName,
csnd6.CSOUND_CONTROL_CHANNEL | csnd6.CSOUND_INPUT_CHANNEL)
return chn
class ChannelUpdater(object):
def __init__(self, csound, channelName, updater):
self.updater = updater
self.channel = createChannel(csound, channelName)
def update(self):
self.channel.SetValue(0, self.updater.getValue())
class InputData(object):
def __init__(self, channel):
self.curVal = 0.0
self.channel = channel
self.mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
def getValue(self):
self.curVal = (((self.mcp.read_adc(self.channel)) / 1023.0) + 0.01) * 4;
return self.curVal
class StoredFiles(object):
def __init__(self):
self.reset()
self.scanfiles()
def reset(self):
self.numFiles = 0
self.files = []
def scanfiles(self):
mypath = "../"
self.files = glob.glob("../*.wav")
0 -10\n"
o)
c.Start()
freq_ctrl = InputData(1)
amp_ctrl = InputData(0)
res_ctrl = RandomLine(0.6, 8.0)
channels = [ChannelUpdater(c, "amp", freq_ctrl),
ChannelUpdater(c, "freq", amp_ctrl),
ChannelUpdater(c, "resonance", res_ctrl)]
for chn in channels:
chn.update()
while (c.PerformKsmps() == 0):
for chn in channels:
chn.update()
c.Stop()
| false | true |
790049e0bc9201565c25d7b7c3d13b97466874c5 | 1,513 | py | Python | doc/argparse2rst.py | Hertin/espnet | a0f2175df08b4750a9f0305c20b8c11f6e941867 | [
"Apache-2.0"
] | 5,053 | 2017-12-13T06:21:41.000Z | 2022-03-31T13:38:29.000Z | doc/argparse2rst.py | Hertin/espnet | a0f2175df08b4750a9f0305c20b8c11f6e941867 | [
"Apache-2.0"
] | 3,666 | 2017-12-14T05:58:50.000Z | 2022-03-31T22:11:49.000Z | doc/argparse2rst.py | Hertin/espnet | a0f2175df08b4750a9f0305c20b8c11f6e941867 | [
"Apache-2.0"
] | 1,709 | 2017-12-13T01:02:42.000Z | 2022-03-31T11:57:45.000Z | #!/usr/bin/env python3
import importlib.machinery as imm
import logging
import pathlib
import re
import configargparse
class ModuleInfo:
def __init__(self, path):
self.path = pathlib.Path(path)
name = str(self.path.parent / self.path.stem)
name = name.replace("/", ".")
self.name = re.sub(r"^[\.]+", "", name)
self.module = imm.SourceFileLoader(self.name, path).load_module()
if not hasattr(self.module, "get_parser"):
raise ValueError(f"{path} does not have get_parser()")
def get_parser():
parser = configargparse.ArgumentParser(
description='generate RST from argparse options',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('src', type=str, nargs='+',
help='source python files that contain get_parser() func')
return parser
# parser
args = get_parser().parse_args()
modinfo = []
for p in args.src:
if "__init__.py" in p:
continue
modinfo.append(ModuleInfo(p))
# print refs
for m in modinfo:
logging.info(f"processing: {m.path.name}")
d = m.module.get_parser().description
assert d is not None
print(f"- :ref:`{m.path.name}`: {d}")
print()
# print argparse
for m in modinfo:
cmd = m.path.name
sep = "~" * len(cmd)
print(f"""
.. _{cmd}:
{cmd}
{sep}
.. argparse::
:module: {m.name}
:func: get_parser
:prog: {cmd}
""")
| 21.927536 | 82 | 0.637145 |
import importlib.machinery as imm
import logging
import pathlib
import re
import configargparse
class ModuleInfo:
def __init__(self, path):
self.path = pathlib.Path(path)
name = str(self.path.parent / self.path.stem)
name = name.replace("/", ".")
self.name = re.sub(r"^[\.]+", "", name)
self.module = imm.SourceFileLoader(self.name, path).load_module()
if not hasattr(self.module, "get_parser"):
raise ValueError(f"{path} does not have get_parser()")
def get_parser():
parser = configargparse.ArgumentParser(
description='generate RST from argparse options',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('src', type=str, nargs='+',
help='source python files that contain get_parser() func')
return parser
args = get_parser().parse_args()
modinfo = []
for p in args.src:
if "__init__.py" in p:
continue
modinfo.append(ModuleInfo(p))
for m in modinfo:
logging.info(f"processing: {m.path.name}")
d = m.module.get_parser().description
assert d is not None
print(f"- :ref:`{m.path.name}`: {d}")
print()
for m in modinfo:
cmd = m.path.name
sep = "~" * len(cmd)
print(f"""
.. _{cmd}:
{cmd}
{sep}
.. argparse::
:module: {m.name}
:func: get_parser
:prog: {cmd}
""")
| true | true |
79004a2650724995b9107f426de6b76162790a79 | 564 | py | Python | q2_emperor/tests/test_plugin_setup.py | mortonjt/q2-emperor | 1e2f680349eebe077246fa083103a7764670c4e4 | [
"BSD-3-Clause"
] | null | null | null | q2_emperor/tests/test_plugin_setup.py | mortonjt/q2-emperor | 1e2f680349eebe077246fa083103a7764670c4e4 | [
"BSD-3-Clause"
] | null | null | null | q2_emperor/tests/test_plugin_setup.py | mortonjt/q2-emperor | 1e2f680349eebe077246fa083103a7764670c4e4 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from q2_emperor.plugin_setup import plugin as emperor_plugin
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(emperor_plugin.name, 'emperor')
| 31.333333 | 78 | 0.556738 |
import unittest
from q2_emperor.plugin_setup import plugin as emperor_plugin
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(emperor_plugin.name, 'emperor')
| true | true |
79004aa8b9d1be7d81c86db27d1b604a16e536ad | 17,283 | py | Python | tests/syndication/tests.py | adambrenecki/django | 28a571348bca9c5a3c137e495e7d3c9349a5bd56 | [
"BSD-3-Clause"
] | null | null | null | tests/syndication/tests.py | adambrenecki/django | 28a571348bca9c5a3c137e495e7d3c9349a5bd56 | [
"BSD-3-Clause"
] | null | null | null | tests/syndication/tests.py | adambrenecki/django | 28a571348bca9c5a3c137e495e7d3c9349a5bd56 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from .models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).published
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
# Assert that <guid> does not have any 'isPermaLink' attribute
self.assertIsNone(item.getElementsByTagName(
'guid')[0].attributes.get('isPermaLink'))
def test_rss2_feed_guid_permalink_false(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'false'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_false/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "false")
def test_rss2_feed_guid_permalink_true(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'true'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_true/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "true")
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'category',
'updated',
'published',
'rights',
'author',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_atom_feed_published_and_updated_elements(self):
"""
Test that the published and updated elements are not
the same and now adhere to RFC 4287.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
entries = feed.getElementsByTagName('entry')
published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText
updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText
self.assertNotEqual(published, updated)
def test_latest_post_date(self):
"""
Test that both the published and updated dates are
considered when determining the latest post date.
"""
# this feed has a `published` element with the latest date
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
latest_published = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest_published)
# this feed has an `updated` element with the latest date
response = self.client.get('/syndication/latest/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.exclude(pk=5).latest('updated').updated
ltz = tzinfo.LocalTimezone(d)
latest_updated = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest_updated)
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'ministry',
'rights',
'author',
'updated',
'published',
'category',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
published = doc.getElementsByTagName('published')[0].firstChild.wholeText
self.assertEqual(published[-6:], '+00:42')
def test_feed_last_modified_time(self):
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_template_context_feed(self):
"""
Test that custom context data can be passed to templates for title
and description.
"""
response = self.client.get('/syndication/template_context/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'My first entry (foo is bar)',
'description': 'My first entry (foo is bar)',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
| 40.006944 | 151 | 0.602847 | from __future__ import unicode_literals
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from .models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
d = Entry.objects.get(pk=1).published
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
self.assertIsNone(item.getElementsByTagName(
'guid')[0].attributes.get('isPermaLink'))
def test_rss2_feed_guid_permalink_false(self):
response = self.client.get(
'/syndication/rss2/guid_ispermalink_false/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "false")
def test_rss2_feed_guid_permalink_true(self):
response = self.client.get(
'/syndication/rss2/guid_ispermalink_true/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "true")
def test_rss091_feed(self):
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'category',
'updated',
'published',
'rights',
'author',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_atom_feed_published_and_updated_elements(self):
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
entries = feed.getElementsByTagName('entry')
published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText
updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText
self.assertNotEqual(published, updated)
def test_latest_post_date(self):
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
latest_published = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest_published)
response = self.client.get('/syndication/latest/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.exclude(pk=5).latest('updated').updated
ltz = tzinfo.LocalTimezone(d)
latest_updated = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest_updated)
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'ministry',
'rights',
'author',
'updated',
'published',
'category',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
published = doc.getElementsByTagName('published')[0].firstChild.wholeText
self.assertEqual(published[-6:], '+00:42')
def test_feed_last_modified_time(self):
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT')
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_template_context_feed(self):
response = self.client.get('/syndication/template_context/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'My first entry (foo is bar)',
'description': 'My first entry (foo is bar)',
})
def test_add_domain(self):
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
| true | true |
79004abbe41e5a7062a04a2280bfef598d81361d | 3,325 | py | Python | homeassistant/components/airly/const.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 1 | 2022-01-05T16:48:58.000Z | 2022-01-05T16:48:58.000Z | homeassistant/components/airly/const.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 69 | 2020-08-04T09:03:43.000Z | 2022-03-31T06:13:01.000Z | homeassistant/components/airly/const.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 1 | 2020-12-13T08:27:33.000Z | 2020-12-13T08:27:33.000Z | """Constants for Airly integration."""
from __future__ import annotations
from typing import Final
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
DEVICE_CLASS_AQI,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PM1,
DEVICE_CLASS_PM10,
DEVICE_CLASS_PM25,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from .model import AirlySensorEntityDescription
ATTR_API_ADVICE: Final = "ADVICE"
ATTR_API_CAQI: Final = "CAQI"
ATTR_API_CAQI_DESCRIPTION: Final = "DESCRIPTION"
ATTR_API_CAQI_LEVEL: Final = "LEVEL"
ATTR_API_HUMIDITY: Final = "HUMIDITY"
ATTR_API_PM10: Final = "PM10"
ATTR_API_PM1: Final = "PM1"
ATTR_API_PM25: Final = "PM25"
ATTR_API_PRESSURE: Final = "PRESSURE"
ATTR_API_TEMPERATURE: Final = "TEMPERATURE"
ATTR_ADVICE: Final = "advice"
ATTR_DESCRIPTION: Final = "description"
ATTR_LEVEL: Final = "level"
ATTR_LIMIT: Final = "limit"
ATTR_PERCENT: Final = "percent"
SUFFIX_PERCENT: Final = "PERCENT"
SUFFIX_LIMIT: Final = "LIMIT"
ATTRIBUTION: Final = "Data provided by Airly"
CONF_USE_NEAREST: Final = "use_nearest"
DEFAULT_NAME: Final = "Airly"
DOMAIN: Final = "airly"
LABEL_ADVICE: Final = "advice"
MANUFACTURER: Final = "Airly sp. z o.o."
MAX_UPDATE_INTERVAL: Final = 90
MIN_UPDATE_INTERVAL: Final = 5
NO_AIRLY_SENSORS: Final = "There are no Airly sensors in this area yet."
SENSOR_TYPES: tuple[AirlySensorEntityDescription, ...] = (
AirlySensorEntityDescription(
key=ATTR_API_CAQI,
device_class=DEVICE_CLASS_AQI,
name=ATTR_API_CAQI,
native_unit_of_measurement="CAQI",
),
AirlySensorEntityDescription(
key=ATTR_API_PM1,
device_class=DEVICE_CLASS_PM1,
name=ATTR_API_PM1,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_PM25,
device_class=DEVICE_CLASS_PM25,
name="PM2.5",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_PM10,
device_class=DEVICE_CLASS_PM10,
name=ATTR_API_PM10,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_HUMIDITY,
device_class=DEVICE_CLASS_HUMIDITY,
name=ATTR_API_HUMIDITY.capitalize(),
native_unit_of_measurement=PERCENTAGE,
state_class=STATE_CLASS_MEASUREMENT,
value=lambda value: round(value, 1),
),
AirlySensorEntityDescription(
key=ATTR_API_PRESSURE,
device_class=DEVICE_CLASS_PRESSURE,
name=ATTR_API_PRESSURE.capitalize(),
native_unit_of_measurement=PRESSURE_HPA,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_TEMPERATURE,
device_class=DEVICE_CLASS_TEMPERATURE,
name=ATTR_API_TEMPERATURE.capitalize(),
native_unit_of_measurement=TEMP_CELSIUS,
state_class=STATE_CLASS_MEASUREMENT,
value=lambda value: round(value, 1),
),
)
| 31.666667 | 76 | 0.73985 | from __future__ import annotations
from typing import Final
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
DEVICE_CLASS_AQI,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PM1,
DEVICE_CLASS_PM10,
DEVICE_CLASS_PM25,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from .model import AirlySensorEntityDescription
ATTR_API_ADVICE: Final = "ADVICE"
ATTR_API_CAQI: Final = "CAQI"
ATTR_API_CAQI_DESCRIPTION: Final = "DESCRIPTION"
ATTR_API_CAQI_LEVEL: Final = "LEVEL"
ATTR_API_HUMIDITY: Final = "HUMIDITY"
ATTR_API_PM10: Final = "PM10"
ATTR_API_PM1: Final = "PM1"
ATTR_API_PM25: Final = "PM25"
ATTR_API_PRESSURE: Final = "PRESSURE"
ATTR_API_TEMPERATURE: Final = "TEMPERATURE"
ATTR_ADVICE: Final = "advice"
ATTR_DESCRIPTION: Final = "description"
ATTR_LEVEL: Final = "level"
ATTR_LIMIT: Final = "limit"
ATTR_PERCENT: Final = "percent"
SUFFIX_PERCENT: Final = "PERCENT"
SUFFIX_LIMIT: Final = "LIMIT"
ATTRIBUTION: Final = "Data provided by Airly"
CONF_USE_NEAREST: Final = "use_nearest"
DEFAULT_NAME: Final = "Airly"
DOMAIN: Final = "airly"
LABEL_ADVICE: Final = "advice"
MANUFACTURER: Final = "Airly sp. z o.o."
MAX_UPDATE_INTERVAL: Final = 90
MIN_UPDATE_INTERVAL: Final = 5
NO_AIRLY_SENSORS: Final = "There are no Airly sensors in this area yet."
SENSOR_TYPES: tuple[AirlySensorEntityDescription, ...] = (
AirlySensorEntityDescription(
key=ATTR_API_CAQI,
device_class=DEVICE_CLASS_AQI,
name=ATTR_API_CAQI,
native_unit_of_measurement="CAQI",
),
AirlySensorEntityDescription(
key=ATTR_API_PM1,
device_class=DEVICE_CLASS_PM1,
name=ATTR_API_PM1,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_PM25,
device_class=DEVICE_CLASS_PM25,
name="PM2.5",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_PM10,
device_class=DEVICE_CLASS_PM10,
name=ATTR_API_PM10,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_HUMIDITY,
device_class=DEVICE_CLASS_HUMIDITY,
name=ATTR_API_HUMIDITY.capitalize(),
native_unit_of_measurement=PERCENTAGE,
state_class=STATE_CLASS_MEASUREMENT,
value=lambda value: round(value, 1),
),
AirlySensorEntityDescription(
key=ATTR_API_PRESSURE,
device_class=DEVICE_CLASS_PRESSURE,
name=ATTR_API_PRESSURE.capitalize(),
native_unit_of_measurement=PRESSURE_HPA,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_TEMPERATURE,
device_class=DEVICE_CLASS_TEMPERATURE,
name=ATTR_API_TEMPERATURE.capitalize(),
native_unit_of_measurement=TEMP_CELSIUS,
state_class=STATE_CLASS_MEASUREMENT,
value=lambda value: round(value, 1),
),
)
| true | true |
79004b19d0761374b84fa505adfa67cc3d731b38 | 6,753 | py | Python | python/BayesianBlocks_python.py | fermi-lat/BayesianBlocks | 83580da7938cfb7646d659974f727cc001e550cb | [
"BSD-3-Clause"
] | 2 | 2019-11-24T13:07:40.000Z | 2021-05-17T13:25:16.000Z | python/BayesianBlocks_python.py | fermi-lat/BayesianBlocks | 83580da7938cfb7646d659974f727cc001e550cb | [
"BSD-3-Clause"
] | null | null | null | python/BayesianBlocks_python.py | fermi-lat/BayesianBlocks | 83580da7938cfb7646d659974f727cc001e550cb | [
"BSD-3-Clause"
] | 2 | 2019-11-24T13:05:46.000Z | 2022-03-06T03:54:20.000Z | """
@brief Pure python implementation of the Bayesian Blocks algorithm
described by Jackson, Scargle et al. 2005, IEEE Signal Processing
Letters, 12, 105. (http://arxiv.org/abs/math/0309285)
@author J. Chiang <jchiang@slac.stanford.edu>
"""
#
# $Id: BayesianBlocks_python.py,v 1.1.1.1 2011/09/03 00:55:59 jchiang Exp $
#
import copy
import numpy as num
def gammln(xx):
cof = [76.18009172947146, -86.50532032941677,
24.01409824083091, -1.231739572450155,
0.1208650973866179e-2, -0.5395239384953e-5]
y = xx
x = xx
tmp = x + 5.5
tmp -= (x + 0.5)*num.log(tmp)
ser = 1.000000000190015
for j in range(6):
y += 1
ser += cof[j]/y
return -tmp + num.log(2.5066282746310005*ser/x)
class BayesianBlocks(object):
"""
Unbinned mode:
>>> bb = BayesianBlocks(arrival_times)
Binned:
>>> bb = BayesianBlocks(bin_content, bin_sizes, start_time)
Point measurements:
>>> bb = BayesianBlocks(time, flux, errors)
Obtaining the piecewise constant light curve:
>>> time, rate = bb.globalOpt(ncp_prior=1)
"""
def __init__(self, *argv):
self.point_mode = False
self.use_ml = True
if len(argv) == 1:
events = list(argv[0])
events.sort()
events = num.array(events)
self.cellContent = num.ones(len(argv[0]))
self.cellSizes = self._generateCells(events)
self.binned = False
else:
try:
self._readPointData(argv)
except TypeError:
self.cellContent = copy.deepcopy(argv[0])
self.cellSizes = copy.deepcopy(argv[1])
self.tstart = argv[2]
self.binned = True
def _readPointData(self, argv):
x, y, dy = (list(copy.deepcopy(argv[0])),
list(copy.deepcopy(argv[1])),
list(copy.deepcopy(argv[2])))
if len(x) != len(y) or len(y) != len(dy):
raise RuntimeError("Point measurement mode: " +
"input array sizes do not match")
x.insert(0, x[0] - (x[1] - x[0]))
x.append(x[-1] + (x[-1] - x[-2]))
x = num.array(x)
cell_bounds = (x[1:] + x[:-1])/2.
self.tstart = cell_bounds[0]
self.cellSizes = cell_bounds[1:] - cell_bounds[:-1]
self.cellContent = y
self.fluxes = num.array(y)
self.errors = num.array(dy)
self.point_mode = True
def lightCurve(self, ncp_prior=1, use_ml=True):
return self.globalOpt(ncp_prior, use_ml)
def globalOpt(self, ncp_prior=1, use_ml=True):
if self.point_mode:
blockCost = self.blockCost_point
else:
blockCost = self.blockCost
self.use_ml = use_ml
opt, last = [], []
opt.append(blockCost(0, 0) - ncp_prior)
last.append(0)
npts = len(self.cellContent)
for nn in range(1, npts):
max_opt = blockCost(0, nn) - ncp_prior
jmax = 0
for j in range(1, nn+1):
my_opt = opt[j-1] + blockCost(j, nn) - ncp_prior
if my_opt > max_opt:
max_opt = my_opt
jmax = j
opt.append(max_opt)
last.append(jmax)
changePoints = []
indx = last[-1]
while indx > 0:
changePoints.insert(0, indx)
indx = last[indx-1]
changePoints.insert(0, 0)
changePoints.append(npts)
return self._lightCurve(changePoints)
def _lightCurve(self, changePoints):
xx = []
yy = []
cell_sizes = self.cellSizes
for imin, imax in zip(changePoints[:-1], changePoints[1:]):
try:
xx.extend([self.tstart + sum(cell_sizes[:imin]),
self.tstart + sum(cell_sizes[:imax])])
except IndexError:
xx.extend([self.tstart + imin*cell_sizes,
self.tstart + imax*cell_sizes])
if self.point_mode:
f, sig, weights = self._point_block_data(imin, imax-1)
yval = sum(weights*f)
else:
yval = (sum(self.cellContent[imin:imax])
/sum(cell_sizes[imin:imax]))
yy.extend([yval, yval])
return xx, yy
def _point_block_data(self, imin, imax):
f, sig = self.fluxes[imin:imax+1], self.errors[imin:imax+1]
weights = 1./sig**2/sum(1./sig**2)
return f, sig, weights
def blockCost_point(self, imin, imax):
f, sig, weights = self._point_block_data(imin, imax)
sigx2 = sum(weights*f**2) - (sum(weights*f))**2
return -sigx2/2*sum(1./sig**2)
def blockCost(self, imin, imax):
size = self.blockSize(imin, imax)
content = self.blockContent(imin, imax)
if content == 0:
return 0
my_cost = content*(num.log(content/size) - 1)
return my_cost
def blockSize(self, imin, imax):
try:
return sum(self.cellSizes[imin:imax+1])
except IndexError:
return self.cellSizes*(imax - imin)
def blockContent(self, imin, imax):
return sum(self.cellContent[imin:imax+1])
def _generateCells(self, events):
self.tstart = (3*events[0] - events[1])/2.
bounds = ((events[1:] + events[:-1])/2.).tolist()
bounds.insert(0, self.tstart)
bounds.append((3*events[-1] - events[-2])/2.)
bounds = num.array(bounds)
return bounds[1:] - bounds[:-1]
if __name__ == '__main__':
# import hippoplotter as plot
# import distributions as dist
# nsamp = 200
# events = dist.sample(dist.stepFunction(0.5, 0.7, amp=0.7), nsamp)
#
# output = open('events.dat', 'w')
# for event in events:
# output.write("%12.4e\n" % event)
# output.close()
class Histogram(object):
def __init__(self, xmin, xmax, nx):
self.xmin = xmin
self.dx = (xmax - xmin)/float(nx)
self.binContent = num.zeros(nx)
self.binSizes = self.dx*num.ones(nx)
def add(self, xx, wt=1):
indx = int((xx - self.xmin)/self.dx)
self.binContent[indx] += wt
events = [float(x.strip()) for x in open('events.dat', 'r')]
hist = Histogram(0, 1, 50)
for event in events:
hist.add(event)
bb = BayesianBlocks(events)
xx, yy = bb.globalOpt(ncp_prior=1)
bb2 = BayesianBlocks(hist.binContent, hist.binSizes, 0)
xx2, yy2 = bb2.globalOpt(ncp_prior=1)
# plot.histogram(events)
# plot.scatter(xx, yy, oplot=1, pointRep='Line', color='red', autoscale=1)
# plot.scatter(xx2, yy2, oplot=1, pointRep='Line', color='blue')
| 35.171875 | 77 | 0.554568 |
import copy
import numpy as num
def gammln(xx):
cof = [76.18009172947146, -86.50532032941677,
24.01409824083091, -1.231739572450155,
0.1208650973866179e-2, -0.5395239384953e-5]
y = xx
x = xx
tmp = x + 5.5
tmp -= (x + 0.5)*num.log(tmp)
ser = 1.000000000190015
for j in range(6):
y += 1
ser += cof[j]/y
return -tmp + num.log(2.5066282746310005*ser/x)
class BayesianBlocks(object):
def __init__(self, *argv):
self.point_mode = False
self.use_ml = True
if len(argv) == 1:
events = list(argv[0])
events.sort()
events = num.array(events)
self.cellContent = num.ones(len(argv[0]))
self.cellSizes = self._generateCells(events)
self.binned = False
else:
try:
self._readPointData(argv)
except TypeError:
self.cellContent = copy.deepcopy(argv[0])
self.cellSizes = copy.deepcopy(argv[1])
self.tstart = argv[2]
self.binned = True
def _readPointData(self, argv):
x, y, dy = (list(copy.deepcopy(argv[0])),
list(copy.deepcopy(argv[1])),
list(copy.deepcopy(argv[2])))
if len(x) != len(y) or len(y) != len(dy):
raise RuntimeError("Point measurement mode: " +
"input array sizes do not match")
x.insert(0, x[0] - (x[1] - x[0]))
x.append(x[-1] + (x[-1] - x[-2]))
x = num.array(x)
cell_bounds = (x[1:] + x[:-1])/2.
self.tstart = cell_bounds[0]
self.cellSizes = cell_bounds[1:] - cell_bounds[:-1]
self.cellContent = y
self.fluxes = num.array(y)
self.errors = num.array(dy)
self.point_mode = True
def lightCurve(self, ncp_prior=1, use_ml=True):
return self.globalOpt(ncp_prior, use_ml)
def globalOpt(self, ncp_prior=1, use_ml=True):
if self.point_mode:
blockCost = self.blockCost_point
else:
blockCost = self.blockCost
self.use_ml = use_ml
opt, last = [], []
opt.append(blockCost(0, 0) - ncp_prior)
last.append(0)
npts = len(self.cellContent)
for nn in range(1, npts):
max_opt = blockCost(0, nn) - ncp_prior
jmax = 0
for j in range(1, nn+1):
my_opt = opt[j-1] + blockCost(j, nn) - ncp_prior
if my_opt > max_opt:
max_opt = my_opt
jmax = j
opt.append(max_opt)
last.append(jmax)
changePoints = []
indx = last[-1]
while indx > 0:
changePoints.insert(0, indx)
indx = last[indx-1]
changePoints.insert(0, 0)
changePoints.append(npts)
return self._lightCurve(changePoints)
def _lightCurve(self, changePoints):
xx = []
yy = []
cell_sizes = self.cellSizes
for imin, imax in zip(changePoints[:-1], changePoints[1:]):
try:
xx.extend([self.tstart + sum(cell_sizes[:imin]),
self.tstart + sum(cell_sizes[:imax])])
except IndexError:
xx.extend([self.tstart + imin*cell_sizes,
self.tstart + imax*cell_sizes])
if self.point_mode:
f, sig, weights = self._point_block_data(imin, imax-1)
yval = sum(weights*f)
else:
yval = (sum(self.cellContent[imin:imax])
/sum(cell_sizes[imin:imax]))
yy.extend([yval, yval])
return xx, yy
def _point_block_data(self, imin, imax):
f, sig = self.fluxes[imin:imax+1], self.errors[imin:imax+1]
weights = 1./sig**2/sum(1./sig**2)
return f, sig, weights
def blockCost_point(self, imin, imax):
f, sig, weights = self._point_block_data(imin, imax)
sigx2 = sum(weights*f**2) - (sum(weights*f))**2
return -sigx2/2*sum(1./sig**2)
def blockCost(self, imin, imax):
size = self.blockSize(imin, imax)
content = self.blockContent(imin, imax)
if content == 0:
return 0
my_cost = content*(num.log(content/size) - 1)
return my_cost
def blockSize(self, imin, imax):
try:
return sum(self.cellSizes[imin:imax+1])
except IndexError:
return self.cellSizes*(imax - imin)
def blockContent(self, imin, imax):
return sum(self.cellContent[imin:imax+1])
def _generateCells(self, events):
self.tstart = (3*events[0] - events[1])/2.
bounds = ((events[1:] + events[:-1])/2.).tolist()
bounds.insert(0, self.tstart)
bounds.append((3*events[-1] - events[-2])/2.)
bounds = num.array(bounds)
return bounds[1:] - bounds[:-1]
if __name__ == '__main__':
class Histogram(object):
def __init__(self, xmin, xmax, nx):
self.xmin = xmin
self.dx = (xmax - xmin)/float(nx)
self.binContent = num.zeros(nx)
self.binSizes = self.dx*num.ones(nx)
def add(self, xx, wt=1):
indx = int((xx - self.xmin)/self.dx)
self.binContent[indx] += wt
events = [float(x.strip()) for x in open('events.dat', 'r')]
hist = Histogram(0, 1, 50)
for event in events:
hist.add(event)
bb = BayesianBlocks(events)
xx, yy = bb.globalOpt(ncp_prior=1)
bb2 = BayesianBlocks(hist.binContent, hist.binSizes, 0)
xx2, yy2 = bb2.globalOpt(ncp_prior=1)
| true | true |
79004c005dbef1968cc4f7951bd7124b35fed207 | 8,025 | py | Python | examples/symbolic/test_symbolic_8.py | slamavl/quantarhei | d822bc2db86152c418e330a9152e7866869776f7 | [
"MIT"
] | 14 | 2016-10-16T13:26:05.000Z | 2021-11-09T11:40:52.000Z | examples/symbolic/test_symbolic_8.py | slamavl/quantarhei | d822bc2db86152c418e330a9152e7866869776f7 | [
"MIT"
] | 61 | 2016-09-19T10:45:56.000Z | 2021-11-10T13:53:06.000Z | examples/symbolic/test_symbolic_8.py | slamavl/quantarhei | d822bc2db86152c418e330a9152e7866869776f7 | [
"MIT"
] | 21 | 2016-08-30T09:09:28.000Z | 2022-03-30T03:16:35.000Z | # -*- coding: utf-8 -*-
"""
Calculation of cumulant expressions for non-linear response functions
of the third order for a multilevel three band system.
"""
from quantarhei.symbolic.cumulant import Ugde, Uedg, Uged, Uegd #, ExpdV
from quantarhei.symbolic.cumulant import gg #, g1, g2
from quantarhei.symbolic.cumulant import CumulantExpr
from quantarhei.symbolic.abc import a, b, f, tau, tau1, tau2, tau3, c, d #, e, t, T, tau, x, y
from quantarhei.symbolic.abc import t1, t2, t3
from quantarhei.symbolic.lang import python_code
from quantarhei.symbolic.lang import fortran_code
import time
def evaluate_cumulant(cum, positive_times = [], leading_index=None,
lang = "Python", arrays=None):
"""
"""
t0 = time.time()
A = cum.rewrite(gg)
expr = CumulantExpr(A)
expr = expr.evaluate()
t1 = time.time()
for tt in positive_times:
expr = CumulantExpr(expr)._make_positive(tt)
t2 = time.time()
#a = leading_index[0]
if leading_index is not None:
D = expr._leading_index(leading_index)
expr = D._getExpr()
t3 = time.time()
if lang == "Fortran":
ss = fortran_code(expr.__str__())
elif lang == "Python":
ss = python_code(expr.__str__(),arrays=arrays)
else:
raise Exception("Unknown language")
print(t1-t0)
print(t2-t1)
print(t3-t2)
return ss
def R1g():
"""
"""
A = Ugde(b,t1)*Uedg(b,t1+t2)*Ugde(a,t1+t2+t3)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R2g():
"""
"""
A = Uedg(a,t1+t2)*Ugde(b,t1+t2+t3)*Uedg(b,t1)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R3g():
"""
"""
A = Uedg(a,t1)*Ugde(b,t1+t2+t3)*Uedg(b,t1+t2)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R4g():
"""
"""
A = Ugde(b,t1+t2+t3)*Uedg(b,t1+t2)*Ugde(a,t1)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R1fs():
"""
"""
A = (Uedg(a,t1+t2+t3)*Ugde(f,t1+t2+t3)*Uedg(f,t1+t2)
*Ugde(b,t1+t2)*Uedg(b,t1))
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R2fs():
"""
"""
A = (Ugde(b,t1)*Uedg(b,t1+t2+t3)*Ugde(f,t1+t2+t3)
*Uedg(f,t1+t2)*Ugde(a,t1+t2))
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def print_R1gt():
"""
"""
A = Ugde(b,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Ugde(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R2gt():
"""
"""
A = Ugde(b,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Uedg(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R1fst():
"""
"""
A = Uedg(b,t3)*Ugde(f,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Uedg(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R2fst():
"""
"""
A = Uedg(b,t3)*Ugde(f,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Ugde(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g():
"""
"""
A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
*Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g_alt():
"""
"""
#A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
# *Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
A = (Uged(a,t1)*Uedg(a,tau1)*Ugde(b,tau1)*Uedg(b,t2)*Ugde(b,t2+t3)*Uedg(b,tau1)*Ugde(a,tau1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g_alt2():
"""
"""
#A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
# *Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
#A = (Uged(a,t1)*Uedg(a,tau1)*Ugde(b,tau1)*Uedg(b,t2)*Ugde(b,t2+t3)*Uedg(b,tau1)*Ugde(a,tau1))
A = (Uged(a,t1+tau1)*Uedg(b,t2-tau1)*Ugde(b,t2+t3-tau1)*Uegd(a,tau1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def generate_nth_order_R2g(states_tuple, times_tuple):
order = len(states_tuple)
if order != len(times_tuple):
raise Exception("Wrong tuple/list length")
# starting state
a = states_tuple[0]
# final state (can be the same as starting)
b = states_tuple[len(states_tuple)-1]
# final time (must be t2)
tt = times_tuple[len(times_tuple)-1]
AL = Uged(a,t1)
Amid = Uedg(b,tt)*Ugde(b,t3+tt)
filL = 1
filR = 1
for k in range(len(times_tuple)-1):
tau = times_tuple[k]
s1 = states_tuple[k]
s2 = states_tuple[k+1]
filL = filL*Uedg(s1,tau)*Ugde(s2,tau)
filR = Uedg(s2,tau)*Ugde(s1,tau)*filR
A = AL*filL*Amid*filR
print(A)
print(evaluate_cumulant(A, positive_times=(t1, tt, t3),
leading_index=a, arrays=["gg"]))
def test():
A = Uged(a,t1+t2)*Ugde(d,t3)*Uegd(a,t2)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def oneex_twoex():
A = Uedg(f,t1)*Ugde(a,t1)
print(evaluate_cumulant(A, positive_times=(t1,), leading_index=a,
arrays="gg"))
# =============================================================================
# print("R1g:")
# st_R1g = "numpy.exp("+R1g()+")"
# print(st_R1g)
#
# print("")
# print("R2g:")
# print(R2g())
#
# print("")
# print("R3g:")
# print(R3g())
#
# print("")
# print("R4g:")
# print(R4g())
#
# print("")
# print("R1fs:")
# print(R1fs())
#
# print("")
# print("R2fs:")
# print(R2fs())
#
# print("")
# print("R1gt")
# print_R1gt()
#
# print("")
# print("R2gt")
# print_R2gt()
#
# print("")
# print("R1fst")
# print_R1fst()
#
# print("")
# print("R2fst")
# print_R2fst()
#
# =============================================================================
#print("")
#print("Trans_R2g")
#print_trans_R2g()
#
#print("")
#print("Trans_R2g_alt")
#print_trans_R2g_alt()
#
#print("")
#print("Trans_R2g_alt2")
#print_trans_R2g_alt2()
#print("***")
#states = (a, c, b) #(a,c,b)
#times = (tau1, tau2, t2) # (tau1,tau2,t2)
#generate_nth_order_R2g(states, times)
#
#print("===")
#A = Uged(a,t1)*Uedg(a,tau1)*Ugde(c,tau1)*Uedg(c,tau2)*Ugde(b,tau2)*Uedg(b,t2)*Ugde(b,t2 + t3)*Uedg(b,tau2)*Ugde(c,tau2)*Uedg(c,tau1)*Ugde(a,tau1)
#
#print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
# leading_index=a, arrays=["gg"]))
#print("***")
#states = (a,b,c, d) #(a,c,b)
#times = (tau1, tau2, tau3, t2) # (tau1,tau2,t2)
#states = (a,c,b)
#times = (tau1,tau2,t2)
#generate_nth_order_R2g(states, times)
#test()
oneex_twoex() | 24.616564 | 146 | 0.52947 |
from quantarhei.symbolic.cumulant import Ugde, Uedg, Uged, Uegd
from quantarhei.symbolic.cumulant import gg
from quantarhei.symbolic.cumulant import CumulantExpr
from quantarhei.symbolic.abc import a, b, f, tau, tau1, tau2, tau3, c, d
from quantarhei.symbolic.abc import t1, t2, t3
from quantarhei.symbolic.lang import python_code
from quantarhei.symbolic.lang import fortran_code
import time
def evaluate_cumulant(cum, positive_times = [], leading_index=None,
lang = "Python", arrays=None):
t0 = time.time()
A = cum.rewrite(gg)
expr = CumulantExpr(A)
expr = expr.evaluate()
t1 = time.time()
for tt in positive_times:
expr = CumulantExpr(expr)._make_positive(tt)
t2 = time.time()
if leading_index is not None:
D = expr._leading_index(leading_index)
expr = D._getExpr()
t3 = time.time()
if lang == "Fortran":
ss = fortran_code(expr.__str__())
elif lang == "Python":
ss = python_code(expr.__str__(),arrays=arrays)
else:
raise Exception("Unknown language")
print(t1-t0)
print(t2-t1)
print(t3-t2)
return ss
def R1g():
A = Ugde(b,t1)*Uedg(b,t1+t2)*Ugde(a,t1+t2+t3)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R2g():
A = Uedg(a,t1+t2)*Ugde(b,t1+t2+t3)*Uedg(b,t1)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R3g():
A = Uedg(a,t1)*Ugde(b,t1+t2+t3)*Uedg(b,t1+t2)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R4g():
A = Ugde(b,t1+t2+t3)*Uedg(b,t1+t2)*Ugde(a,t1)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R1fs():
A = (Uedg(a,t1+t2+t3)*Ugde(f,t1+t2+t3)*Uedg(f,t1+t2)
*Ugde(b,t1+t2)*Uedg(b,t1))
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R2fs():
A = (Ugde(b,t1)*Uedg(b,t1+t2+t3)*Ugde(f,t1+t2+t3)
*Uedg(f,t1+t2)*Ugde(a,t1+t2))
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def print_R1gt():
A = Ugde(b,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Ugde(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R2gt():
A = Ugde(b,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Uedg(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R1fst():
A = Uedg(b,t3)*Ugde(f,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Uedg(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R2fst():
A = Uedg(b,t3)*Ugde(f,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Ugde(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g():
A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
*Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g_alt():
A = (Uged(a,t1)*Uedg(a,tau1)*Ugde(b,tau1)*Uedg(b,t2)*Ugde(b,t2+t3)*Uedg(b,tau1)*Ugde(a,tau1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g_alt2():
A = (Uged(a,t1+tau1)*Uedg(b,t2-tau1)*Ugde(b,t2+t3-tau1)*Uegd(a,tau1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def generate_nth_order_R2g(states_tuple, times_tuple):
order = len(states_tuple)
if order != len(times_tuple):
raise Exception("Wrong tuple/list length")
a = states_tuple[0]
b = states_tuple[len(states_tuple)-1]
tt = times_tuple[len(times_tuple)-1]
AL = Uged(a,t1)
Amid = Uedg(b,tt)*Ugde(b,t3+tt)
filL = 1
filR = 1
for k in range(len(times_tuple)-1):
tau = times_tuple[k]
s1 = states_tuple[k]
s2 = states_tuple[k+1]
filL = filL*Uedg(s1,tau)*Ugde(s2,tau)
filR = Uedg(s2,tau)*Ugde(s1,tau)*filR
A = AL*filL*Amid*filR
print(A)
print(evaluate_cumulant(A, positive_times=(t1, tt, t3),
leading_index=a, arrays=["gg"]))
def test():
A = Uged(a,t1+t2)*Ugde(d,t3)*Uegd(a,t2)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def oneex_twoex():
A = Uedg(f,t1)*Ugde(a,t1)
print(evaluate_cumulant(A, positive_times=(t1,), leading_index=a,
arrays="gg"))
| true | true |
79004d113eb31b70f4067b50139981c6b9e139c0 | 1,163 | py | Python | fairgraph/openminds/sands/miscellaneous/coordinate_point.py | HumanBrainProject/fairgraph | 6cc43ad7a6e0f8f5c533c9c8def9274ce7dc0810 | [
"Apache-2.0"
] | 8 | 2019-10-16T13:27:10.000Z | 2022-03-12T12:03:02.000Z | fairgraph/openminds/sands/miscellaneous/coordinate_point.py | HumanBrainProject/fairgraph | 6cc43ad7a6e0f8f5c533c9c8def9274ce7dc0810 | [
"Apache-2.0"
] | 26 | 2019-06-12T13:56:26.000Z | 2021-11-24T08:48:47.000Z | fairgraph/openminds/sands/miscellaneous/coordinate_point.py | HumanBrainProject/fairgraph | 6cc43ad7a6e0f8f5c533c9c8def9274ce7dc0810 | [
"Apache-2.0"
] | 8 | 2019-06-26T07:10:44.000Z | 2021-02-04T15:13:16.000Z | """
Structured information on a coordinate point.
"""
# this file was auto-generated
from datetime import date, datetime
from fairgraph.base_v3 import EmbeddedMetadata, IRI
from fairgraph.fields import Field
class CoordinatePoint(EmbeddedMetadata):
"""
Structured information on a coordinate point.
"""
type = ["https://openminds.ebrains.eu/sands/CoordinatePoint"]
context = {
"schema": "http://schema.org/",
"kg": "https://kg.ebrains.eu/api/instances/",
"vocab": "https://openminds.ebrains.eu/vocab/",
"terms": "https://openminds.ebrains.eu/controlledTerms/",
"core": "https://openminds.ebrains.eu/core/"
}
fields = [
Field("coordinates", "openminds.core.QuantitativeValue", "vocab:coordinates", multiple=True, required=True,
doc="Pair or triplet of numbers defining a location in a given coordinate space."),
Field("coordinate_space", ["openminds.sands.CommonCoordinateSpace", "openminds.sands.CustomCoordinateSpace"], "vocab:coordinateSpace", multiple=False, required=True,
doc="Two or three dimensional geometric setting."),
]
| 34.205882 | 173 | 0.675838 |
from datetime import date, datetime
from fairgraph.base_v3 import EmbeddedMetadata, IRI
from fairgraph.fields import Field
class CoordinatePoint(EmbeddedMetadata):
type = ["https://openminds.ebrains.eu/sands/CoordinatePoint"]
context = {
"schema": "http://schema.org/",
"kg": "https://kg.ebrains.eu/api/instances/",
"vocab": "https://openminds.ebrains.eu/vocab/",
"terms": "https://openminds.ebrains.eu/controlledTerms/",
"core": "https://openminds.ebrains.eu/core/"
}
fields = [
Field("coordinates", "openminds.core.QuantitativeValue", "vocab:coordinates", multiple=True, required=True,
doc="Pair or triplet of numbers defining a location in a given coordinate space."),
Field("coordinate_space", ["openminds.sands.CommonCoordinateSpace", "openminds.sands.CustomCoordinateSpace"], "vocab:coordinateSpace", multiple=False, required=True,
doc="Two or three dimensional geometric setting."),
]
| true | true |
79004d1c2a3386e1ca2a5d90181729809dbd2cd0 | 35,078 | py | Python | rplugin/python3/denite/ui/default.py | supermomonga/denite.nvim | c55e99ec45d16fb5cce33bf78d6ddbeb8dd73176 | [
"MIT"
] | null | null | null | rplugin/python3/denite/ui/default.py | supermomonga/denite.nvim | c55e99ec45d16fb5cce33bf78d6ddbeb8dd73176 | [
"MIT"
] | null | null | null | rplugin/python3/denite/ui/default.py | supermomonga/denite.nvim | c55e99ec45d16fb5cce33bf78d6ddbeb8dd73176 | [
"MIT"
] | null | null | null | # ============================================================================
# FILE: default.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import re
import typing
from denite.util import echo, error, clearmatch, regex_convert_py_vim
from denite.util import Nvim, UserContext, Candidates, Candidate
from denite.parent import SyncParent
class Default(object):
@property
def is_async(self) -> bool:
return self._is_async
def __init__(self, vim: Nvim) -> None:
self._vim = vim
self._denite: typing.Optional[SyncParent] = None
self._selected_candidates: typing.List[int] = []
self._candidates: Candidates = []
self._cursor = 0
self._entire_len = 0
self._result: typing.List[typing.Any] = []
self._context: UserContext = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._is_multi = False
self._is_async = False
self._matched_pattern = ''
self._displayed_texts: typing.List[str] = []
self._statusline_sources = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status: typing.Dict[str, typing.Any] = {}
self._prev_curpos: typing.List[typing.Any] = []
self._save_window_options: typing.Dict[str, typing.Any] = {}
self._sources_history: typing.List[typing.Any] = []
self._previous_text = ''
self._floating = False
self._filter_floating = False
self._updated = False
self._timers: typing.Dict[str, int] = {}
self._matched_range_id = -1
self._matched_char_id = -1
self._check_matchdelete = bool(self._vim.call(
'denite#util#check_matchdelete'))
def start(self, sources: typing.List[typing.Any],
context: UserContext) -> typing.List[typing.Any]:
if not self._denite:
# if hasattr(self._vim, 'run_coroutine'):
# self._denite = ASyncParent(self._vim)
# else:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._start_sources_queue(context)
return self._result
def do_action(self, action_name: str,
command: str = '', is_manual: bool = False) -> None:
if is_manual:
candidates = self._get_selected_candidates()
elif self._get_cursor_candidate():
candidates = [self._get_cursor_candidate()]
else:
candidates = []
if not self._denite or not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and post_action == 'open':
# Re-open denite buffer
prev_cursor = self._cursor
cursor_candidate = self._get_cursor_candidate()
self._init_buffer()
self.redraw(False)
if cursor_candidate == self._get_candidate(prev_cursor):
# Restore the cursor
self._move_to_pos(prev_cursor)
# Disable quit flag
is_quit = False
if not is_quit and is_manual:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if is_manual and self._context['sources_queue']:
self._context['input'] = ''
self._context['quick_move'] = ''
self._start_sources_queue(self._context)
return
def redraw(self, is_force: bool = True) -> None:
self._context['is_redraw'] = is_force
if is_force:
self._gather_candidates()
if self._update_candidates():
self._update_buffer()
else:
self._update_status()
self._context['is_redraw'] = False
def quit(self) -> None:
if self._denite:
self._denite.on_close(self._context)
self._quit_buffer()
self._result = []
return
def _restart(self) -> None:
self._context['input'] = ''
self._quit_buffer()
self._init_denite()
self._gather_candidates()
self._init_buffer()
self._update_candidates()
self._update_buffer()
def _start_sources_queue(self, context: UserContext) -> None:
if not context['sources_queue']:
return
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': context['path'],
})
self._start(context['sources_queue'][0], context)
if context['sources_queue']:
context['sources_queue'].pop(0)
context['path'] = self._context['path']
def _start(self, sources: typing.List[typing.Any],
context: UserContext) -> None:
from denite.ui.map import do_map
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
# Ignore command line window.
return
resume = self._initialized and context['resume']
if resume:
# Skip the initialization
update = ('immediately', 'immediately_1',
'cursor_pos', 'prev_winid',
'start_filter', 'quick_move')
for key in update:
self._context[key] = context[key]
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
if context['refresh']:
self.redraw()
self._move_to_pos(self._cursor)
else:
if self._context != context:
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._is_multi = len(sources) > 1
if not sources:
# Ignore empty sources.
error(self._vim, 'Empty sources')
return
self._init_denite()
self._gather_candidates()
self._update_candidates()
self._init_cursor()
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
self._update_displayed_texts()
self._update_buffer()
self._move_to_pos(self._cursor)
if self._context['quick_move'] and do_map(self, 'quick_move', []):
return
if self._context['start_filter']:
do_map(self, 'open_filter_buffer', [])
def _init_buffer(self) -> None:
self._prev_status = dict()
self._displayed_texts = []
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = int(self._context['prev_winid'])
self._winrestcmd = self._vim.call('winrestcmd')
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._resize_buffer(True)
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._bufvars = self._vim.current.buffer.vars
self._bufvars['denite'] = {
'buffer_name': self._context['buffer_name'],
}
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._save_window_options = {}
window_options = {
'colorcolumn',
'concealcursor',
'conceallevel',
'cursorcolumn',
'cursorline',
'foldcolumn',
'foldenable',
'list',
'number',
'relativenumber',
'signcolumn',
'spell',
'winfixheight',
'wrap',
}
for k in window_options:
self._save_window_options[k] = self._vim.current.window.options[k]
# Note: Have to use setlocal instead of "current.window.options"
# "current.window.options" changes global value instead of local in
# neovim.
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal conceallevel=3')
self._vim.command('setlocal concealcursor=inv')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nonumber')
self._vim.command('setlocal norelativenumber')
self._vim.command('setlocal nospell')
self._vim.command('setlocal winfixheight')
self._vim.command('setlocal nowrap')
self._vim.command('setlocal signcolumn=no')
if self._context['cursorline']:
self._vim.command('setlocal cursorline')
options = self._vim.current.buffer.options
if self._floating:
# Disable ruler
self._vim.options['ruler'] = False
options['buftype'] = 'nofile'
options['bufhidden'] = 'delete'
options['swapfile'] = False
options['buflisted'] = False
options['modeline'] = False
options['modifiable'] = False
options['filetype'] = 'denite'
if self._vim.call('exists', '#WinEnter'):
self._vim.command('doautocmd WinEnter')
if self._vim.call('exists', '#BufWinEnter'):
self._vim.command('doautocmd BufWinEnter')
if not self._vim.call('has', 'nvim'):
# In Vim8, FileType autocmd is not fired after set filetype option.
self._vim.command('silent doautocmd FileType denite')
if self._context['auto_action']:
self._vim.command('autocmd denite '
'CursorMoved <buffer> '
'call denite#call_map("auto_action")')
self._init_syntax()
def _switch_buffer(self) -> None:
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if split != 'vertical' and not self._floating:
# Move the window to bottom
self._vim.command('wincmd J')
self._winrestcmd = ''
return
self._floating = split in ['floating', 'floating_relative']
self._filter_floating = False
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif self._floating:
# Use floating window
if self._vim.current.buffer.options['filetype'] != 'denite':
self._titlestring = self._vim.options['titlestring']
if split == 'floating':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': int(self._context['winrow']),
'col': int(self._context['wincol']),
'width': int(self._context['winwidth']),
'height': int(self._context['winheight']),
})
elif split == 'floating_relative':
opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] +
self._vim.call('winline') - 1)
if self._context['auto_resize']:
height = max(self._winheight, 1)
width = max(self._winwidth, 1)
else:
width = int(self._context['winwidth'])
height = int(self._context['winheight'])
if opened_pos + height + 3 > self._vim.eval('&lines'):
anchor = 'SW'
row = 0
self._context['filter_winrow'] = row + opened_pos
else:
anchor = 'NW'
row = 1
self._context['filter_winrow'] = row + height + opened_pos
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'cursor',
'row': row,
'col': 0,
'width': width,
'height': height,
'anchor': anchor,
})
elif self._context['filter_split_direction'] == 'floating':
self._titlestring = self._vim.options['titlestring']
self._filter_floating = True
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
bufname = '[denite]-' + self._context['buffer_name']
if self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', bufname)
vertical = 'vertical' if split == 'vertical' else ''
command = (
'buffer' if split
in ['no', 'tab', 'floating', 'floating_relative'] else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._get_direction(),
vertical,
command,
bufnr,
)
)
else:
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', bufname)
def _get_direction(self) -> str:
direction = str(self._context['direction'])
if direction == 'dynamictop' or direction == 'dynamicbottom':
self._update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self) -> typing.List[typing.Any]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _switch_prev_buffer(self) -> None:
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def _init_syntax(self) -> None:
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteInput ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
if self._floating:
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
if self._denite:
self._denite.init_syntax(self._context, self._is_multi)
def _update_candidates(self) -> bool:
if not self._denite:
return False
[self._is_async, pattern, statuses, self._entire_len,
self._candidates] = self._denite.filter_candidates(self._context)
prev_displayed_texts = self._displayed_texts
self._update_displayed_texts()
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
prev_statusline_sources = self._statusline_sources
self._statusline_sources = ' '.join(statuses)
if self._is_async:
self._start_timer('update_candidates')
else:
self._stop_timer('update_candidates')
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern or
self._statusline_sources != prev_statusline_sources)
if updated:
self._updated = True
self._start_timer('update_buffer')
if self._context['search'] and self._context['input']:
self._vim.call('setreg', '/', self._context['input'])
return self._updated
def _update_displayed_texts(self) -> None:
candidates_len = len(self._candidates)
if not self._is_async and self._context['auto_resize']:
winminheight = int(self._context['winminheight'])
max_height = min(int(self._context['winheight']),
self._get_max_height())
if (winminheight != -1 and candidates_len < winminheight):
self._winheight = winminheight
elif candidates_len > max_height:
self._winheight = max_height
elif candidates_len != self._winheight:
self._winheight = candidates_len
max_source_name_len = 0
if self._candidates:
max_source_name_len = max([
len(self._get_display_source_name(x['source_name']))
for x in self._candidates])
self._context['max_source_name_len'] = max_source_name_len
self._context['max_source_name_format'] = (
'{:<' + str(self._context['max_source_name_len']) + '}')
self._displayed_texts = [
self._get_candidate_display_text(i)
for i in range(0, candidates_len)
]
def _update_buffer(self) -> None:
is_current_buffer = self._bufnr == self._vim.current.buffer.number
self._update_status()
if self._check_matchdelete and self._context['match_highlight']:
matches = [x['id'] for x in
self._vim.call('getmatches', self._winid)]
if self._matched_range_id in matches:
self._vim.call('matchdelete',
self._matched_range_id, self._winid)
self._matched_range_id = -1
if self._matched_char_id in matches:
self._vim.call('matchdelete',
self._matched_char_id, self._winid)
self._matched_char_id = -1
if self._matched_pattern != '':
self._matched_range_id = self._vim.call(
'matchadd', 'deniteMatchedRange',
r'\c' + regex_convert_py_vim(self._matched_pattern),
10, -1, {'window': self._winid})
matched_char_pattern = '[{}]'.format(re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._matched_char_id = self._vim.call(
'matchadd', 'deniteMatchedChar',
matched_char_pattern,
10, -1, {'window': self._winid})
prev_linenr = self._vim.call('line', '.')
prev_candidate = self._get_cursor_candidate()
buffer = self._vim.buffers[self._bufnr]
buffer.options['modifiable'] = True
self._vim.vars['denite#_candidates'] = [
x['word'] for x in self._candidates]
buffer[:] = self._displayed_texts
buffer.options['modifiable'] = False
self._previous_text = self._context['input']
self._resize_buffer(is_current_buffer)
is_changed = (self._context['reversed'] or
(is_current_buffer and
self._previous_text != self._context['input']))
if self._updated and is_changed:
if not is_current_buffer:
save_winid = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
self._init_cursor()
self._move_to_pos(self._cursor)
if not is_current_buffer:
self._vim.call('win_gotoid', save_winid)
elif is_current_buffer:
self._vim.call('cursor', [prev_linenr, 0])
if is_current_buffer:
if (self._context['auto_action'] and
prev_candidate != self._get_cursor_candidate()):
self.do_action(self._context['auto_action'])
self._updated = False
self._stop_timer('update_buffer')
def _update_status(self) -> None:
inpt = ''
if self._context['input']:
inpt = self._context['input'] + ' '
if self._context['error_messages']:
inpt = '[ERROR] ' + inpt
path = '[' + self._context['path'] + ']'
status = {
'input': inpt,
'sources': self._statusline_sources,
'path': path,
# Extra
'buffer_name': self._context['buffer_name'],
'line_total': len(self._candidates),
}
if status == self._prev_status:
return
self._bufvars['denite_statusline'] = status
self._prev_status = status
linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))"
if self._context['statusline']:
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = (
"%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} " +
" %{denite#get_status('path')}%*" +
"%{" + linenr + "}%*")
else:
winnr = self._vim.call('win_id2win', self._winid)
self._vim.call('setwinvar', winnr, '&statusline', (
"%#deniteInput#%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')}%*" +
"%#deniteStatusLineNumber#%{" + linenr + "}%*"))
def _get_display_source_name(self, name: str) -> str:
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def _get_candidate_display_text(self, index: int) -> str:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self._context['max_source_name_format'].format(
self._get_display_source_name(candidate['source_name'])))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (self._context['selected_icon'] # type: ignore
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def _get_max_height(self) -> int:
return int(self._vim.options['lines']) if not self._floating else (
int(self._vim.options['lines']) -
int(self._context['winrow']) -
int(self._vim.options['cmdheight']))
def _resize_buffer(self, is_current_buffer: bool) -> None:
split = self._context['split']
if (split == 'no' or split == 'tab' or
self._vim.call('winnr', '$') == 1):
return
winheight = max(self._winheight, 1)
winwidth = max(self._winwidth, 1)
is_vertical = split == 'vertical'
if not is_current_buffer:
restore = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
if not is_vertical and self._vim.current.window.height != winheight:
if self._floating:
wincol = int(self._context['winrow'])
row = wincol
if split == 'floating':
if self._context['auto_resize'] and row > 1:
row += int(self._context['winheight'])
row -= self._winheight
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'editor',
'row': row,
'col': int(self._context['wincol']),
'width': winwidth,
'height': winheight,
})
filter_row = 0 if wincol == 1 else row + winheight
filter_col = int(self._context['wincol'])
elif split == 'floating_relative':
init_pos = self._vim.call('nvim_win_get_config',
self._winid)
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'win',
'win': init_pos['win'],
'row': init_pos['row'],
'col': init_pos['col'],
'width': winwidth,
'height': winheight,
})
filter_col = init_pos['col']
if init_pos['anchor'] == 'NW':
winpos = self._vim.call('nvim_win_get_position',
self._winid)
filter_row = winpos[0] + winheight
filter_winid = self._vim.vars['denite#_filter_winid']
self._context['filter_winrow'] = row
if self._vim.call('win_id2win', filter_winid) > 0:
self._vim.call('nvim_win_set_config', filter_winid, {
'relative': 'editor',
'row': filter_row,
'col': filter_col,
})
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
if not is_current_buffer:
self._vim.call('win_gotoid', restore)
def _check_do_option(self) -> bool:
if self._context['do'] != '':
self._do_command(self._context['do'])
return True
elif (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self._do_immediately()
return True
return not (self._context['empty'] or
self._is_async or self._candidates)
def _check_move_option(self) -> None:
if self._context['cursor_pos'].isnumeric():
self._cursor = int(self._context['cursor_pos']) + 1
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self._move_to_last_line()
def _do_immediately(self) -> None:
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
# Jump to denite window
self._init_buffer()
self.do_action('default')
candidate = self._get_cursor_candidate()
if not candidate:
return
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor, len(self._candidates),
candidate.get('abbr', candidate['word'])))
if goto:
# Move to the previous window
self._vim.command('wincmd p')
def _do_command(self, command: str) -> None:
self._init_cursor()
cursor = 1
while cursor < len(self._candidates):
self.do_action('default', command)
self._move_to_next_line()
self._quit_buffer()
def _cleanup(self) -> None:
self._stop_timer('update_candidates')
self._stop_timer('update_buffer')
if self._vim.current.buffer.number == self._bufnr:
self._cursor = self._vim.call('line', '.')
# Note: Close filter window before preview window
self._vim.call('denite#filter#_close_filter_window')
if not self._context['has_preview_window']:
self._vim.command('pclose!')
# Clear previewed buffers
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('highlight! link CursorLine CursorLine')
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def _close_current_window(self) -> None:
if self._vim.call('winnr', '$') == 1:
self._vim.command('buffer #')
else:
self._vim.command('close!')
def _quit_buffer(self) -> None:
self._cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
# Denite buffer is already closed
return
winids = self._vim.call('win_findbuf',
self._vim.vars['denite#_filter_bufnr'])
if winids:
# Quit filter buffer
self._vim.call('win_gotoid', winids[0])
self._close_current_window()
# Move to denite window
self._vim.call('win_gotoid', self._winid)
# Restore the window
if self._context['split'] == 'no':
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._close_current_window()
self._vim.call('win_gotoid', self._prev_winid)
# Restore the position
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
# Note: execute restcmd twice to restore layout properly
self._vim.command(self._winrestcmd)
self._vim.command(self._winrestcmd)
clearmatch(self._vim)
def _get_cursor_candidate(self) -> Candidate:
return self._get_candidate(self._cursor)
def _get_candidate(self, pos: int) -> Candidate:
if not self._candidates or pos > len(self._candidates):
return {}
return self._candidates[pos - 1]
def _get_selected_candidates(self) -> Candidates:
if not self._selected_candidates:
return [self._get_cursor_candidate()
] if self._get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def _init_denite(self) -> None:
if self._denite:
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = int(self._context['winheight'])
self._winwidth = int(self._context['winwidth'])
def _gather_candidates(self) -> None:
self._selected_candidates = []
if self._denite:
self._denite.gather_candidates(self._context)
def _init_cursor(self) -> None:
if self._context['reversed']:
self._move_to_last_line()
self._vim.command('normal! zb')
else:
self._move_to_first_line()
def _move_to_pos(self, pos: int) -> None:
self._vim.call('cursor', pos, 0)
self._cursor = pos
def _move_to_next_line(self) -> None:
if self._cursor < len(self._candidates):
self._cursor += 1
def _move_to_prev_line(self) -> None:
if self._cursor >= 1:
self._cursor -= 1
def _move_to_first_line(self) -> None:
self._cursor = 1
def _move_to_last_line(self) -> None:
self._cursor = len(self._candidates)
def _start_timer(self, key: str) -> None:
if key in self._timers:
return
if key == 'update_candidates':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_candidates_timer', self._bufnr)
elif key == 'update_buffer':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_buffer_timer', self._bufnr)
def _stop_timer(self, key: str) -> None:
if key not in self._timers:
return
self._vim.call('timer_stop', self._timers[key])
# Note: After timer_stop is called, self._timers may be removed
if key in self._timers:
self._timers.pop(key)
| 38.211329 | 79 | 0.54852 |
import re
import typing
from denite.util import echo, error, clearmatch, regex_convert_py_vim
from denite.util import Nvim, UserContext, Candidates, Candidate
from denite.parent import SyncParent
class Default(object):
@property
def is_async(self) -> bool:
return self._is_async
def __init__(self, vim: Nvim) -> None:
self._vim = vim
self._denite: typing.Optional[SyncParent] = None
self._selected_candidates: typing.List[int] = []
self._candidates: Candidates = []
self._cursor = 0
self._entire_len = 0
self._result: typing.List[typing.Any] = []
self._context: UserContext = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._is_multi = False
self._is_async = False
self._matched_pattern = ''
self._displayed_texts: typing.List[str] = []
self._statusline_sources = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status: typing.Dict[str, typing.Any] = {}
self._prev_curpos: typing.List[typing.Any] = []
self._save_window_options: typing.Dict[str, typing.Any] = {}
self._sources_history: typing.List[typing.Any] = []
self._previous_text = ''
self._floating = False
self._filter_floating = False
self._updated = False
self._timers: typing.Dict[str, int] = {}
self._matched_range_id = -1
self._matched_char_id = -1
self._check_matchdelete = bool(self._vim.call(
'denite#util#check_matchdelete'))
def start(self, sources: typing.List[typing.Any],
context: UserContext) -> typing.List[typing.Any]:
if not self._denite:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._start_sources_queue(context)
return self._result
def do_action(self, action_name: str,
command: str = '', is_manual: bool = False) -> None:
if is_manual:
candidates = self._get_selected_candidates()
elif self._get_cursor_candidate():
candidates = [self._get_cursor_candidate()]
else:
candidates = []
if not self._denite or not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and post_action == 'open':
prev_cursor = self._cursor
cursor_candidate = self._get_cursor_candidate()
self._init_buffer()
self.redraw(False)
if cursor_candidate == self._get_candidate(prev_cursor):
self._move_to_pos(prev_cursor)
is_quit = False
if not is_quit and is_manual:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if is_manual and self._context['sources_queue']:
self._context['input'] = ''
self._context['quick_move'] = ''
self._start_sources_queue(self._context)
return
def redraw(self, is_force: bool = True) -> None:
self._context['is_redraw'] = is_force
if is_force:
self._gather_candidates()
if self._update_candidates():
self._update_buffer()
else:
self._update_status()
self._context['is_redraw'] = False
def quit(self) -> None:
if self._denite:
self._denite.on_close(self._context)
self._quit_buffer()
self._result = []
return
def _restart(self) -> None:
self._context['input'] = ''
self._quit_buffer()
self._init_denite()
self._gather_candidates()
self._init_buffer()
self._update_candidates()
self._update_buffer()
def _start_sources_queue(self, context: UserContext) -> None:
if not context['sources_queue']:
return
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': context['path'],
})
self._start(context['sources_queue'][0], context)
if context['sources_queue']:
context['sources_queue'].pop(0)
context['path'] = self._context['path']
def _start(self, sources: typing.List[typing.Any],
context: UserContext) -> None:
from denite.ui.map import do_map
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
return
resume = self._initialized and context['resume']
if resume:
update = ('immediately', 'immediately_1',
'cursor_pos', 'prev_winid',
'start_filter', 'quick_move')
for key in update:
self._context[key] = context[key]
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
if context['refresh']:
self.redraw()
self._move_to_pos(self._cursor)
else:
if self._context != context:
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._is_multi = len(sources) > 1
if not sources:
error(self._vim, 'Empty sources')
return
self._init_denite()
self._gather_candidates()
self._update_candidates()
self._init_cursor()
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
self._update_displayed_texts()
self._update_buffer()
self._move_to_pos(self._cursor)
if self._context['quick_move'] and do_map(self, 'quick_move', []):
return
if self._context['start_filter']:
do_map(self, 'open_filter_buffer', [])
def _init_buffer(self) -> None:
self._prev_status = dict()
self._displayed_texts = []
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = int(self._context['prev_winid'])
self._winrestcmd = self._vim.call('winrestcmd')
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._resize_buffer(True)
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._bufvars = self._vim.current.buffer.vars
self._bufvars['denite'] = {
'buffer_name': self._context['buffer_name'],
}
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._save_window_options = {}
window_options = {
'colorcolumn',
'concealcursor',
'conceallevel',
'cursorcolumn',
'cursorline',
'foldcolumn',
'foldenable',
'list',
'number',
'relativenumber',
'signcolumn',
'spell',
'winfixheight',
'wrap',
}
for k in window_options:
self._save_window_options[k] = self._vim.current.window.options[k]
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal conceallevel=3')
self._vim.command('setlocal concealcursor=inv')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nonumber')
self._vim.command('setlocal norelativenumber')
self._vim.command('setlocal nospell')
self._vim.command('setlocal winfixheight')
self._vim.command('setlocal nowrap')
self._vim.command('setlocal signcolumn=no')
if self._context['cursorline']:
self._vim.command('setlocal cursorline')
options = self._vim.current.buffer.options
if self._floating:
self._vim.options['ruler'] = False
options['buftype'] = 'nofile'
options['bufhidden'] = 'delete'
options['swapfile'] = False
options['buflisted'] = False
options['modeline'] = False
options['modifiable'] = False
options['filetype'] = 'denite'
if self._vim.call('exists', '#WinEnter'):
self._vim.command('doautocmd WinEnter')
if self._vim.call('exists', '#BufWinEnter'):
self._vim.command('doautocmd BufWinEnter')
if not self._vim.call('has', 'nvim'):
self._vim.command('silent doautocmd FileType denite')
if self._context['auto_action']:
self._vim.command('autocmd denite '
'CursorMoved <buffer> '
'call denite#call_map("auto_action")')
self._init_syntax()
def _switch_buffer(self) -> None:
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if split != 'vertical' and not self._floating:
self._vim.command('wincmd J')
self._winrestcmd = ''
return
self._floating = split in ['floating', 'floating_relative']
self._filter_floating = False
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif self._floating:
if self._vim.current.buffer.options['filetype'] != 'denite':
self._titlestring = self._vim.options['titlestring']
if split == 'floating':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': int(self._context['winrow']),
'col': int(self._context['wincol']),
'width': int(self._context['winwidth']),
'height': int(self._context['winheight']),
})
elif split == 'floating_relative':
opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] +
self._vim.call('winline') - 1)
if self._context['auto_resize']:
height = max(self._winheight, 1)
width = max(self._winwidth, 1)
else:
width = int(self._context['winwidth'])
height = int(self._context['winheight'])
if opened_pos + height + 3 > self._vim.eval('&lines'):
anchor = 'SW'
row = 0
self._context['filter_winrow'] = row + opened_pos
else:
anchor = 'NW'
row = 1
self._context['filter_winrow'] = row + height + opened_pos
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'cursor',
'row': row,
'col': 0,
'width': width,
'height': height,
'anchor': anchor,
})
elif self._context['filter_split_direction'] == 'floating':
self._titlestring = self._vim.options['titlestring']
self._filter_floating = True
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
bufname = '[denite]-' + self._context['buffer_name']
if self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', bufname)
vertical = 'vertical' if split == 'vertical' else ''
command = (
'buffer' if split
in ['no', 'tab', 'floating', 'floating_relative'] else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._get_direction(),
vertical,
command,
bufnr,
)
)
else:
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', bufname)
def _get_direction(self) -> str:
direction = str(self._context['direction'])
if direction == 'dynamictop' or direction == 'dynamicbottom':
self._update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self) -> typing.List[typing.Any]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _switch_prev_buffer(self) -> None:
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def _init_syntax(self) -> None:
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteInput ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
if self._floating:
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
if self._denite:
self._denite.init_syntax(self._context, self._is_multi)
def _update_candidates(self) -> bool:
if not self._denite:
return False
[self._is_async, pattern, statuses, self._entire_len,
self._candidates] = self._denite.filter_candidates(self._context)
prev_displayed_texts = self._displayed_texts
self._update_displayed_texts()
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
prev_statusline_sources = self._statusline_sources
self._statusline_sources = ' '.join(statuses)
if self._is_async:
self._start_timer('update_candidates')
else:
self._stop_timer('update_candidates')
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern or
self._statusline_sources != prev_statusline_sources)
if updated:
self._updated = True
self._start_timer('update_buffer')
if self._context['search'] and self._context['input']:
self._vim.call('setreg', '/', self._context['input'])
return self._updated
def _update_displayed_texts(self) -> None:
candidates_len = len(self._candidates)
if not self._is_async and self._context['auto_resize']:
winminheight = int(self._context['winminheight'])
max_height = min(int(self._context['winheight']),
self._get_max_height())
if (winminheight != -1 and candidates_len < winminheight):
self._winheight = winminheight
elif candidates_len > max_height:
self._winheight = max_height
elif candidates_len != self._winheight:
self._winheight = candidates_len
max_source_name_len = 0
if self._candidates:
max_source_name_len = max([
len(self._get_display_source_name(x['source_name']))
for x in self._candidates])
self._context['max_source_name_len'] = max_source_name_len
self._context['max_source_name_format'] = (
'{:<' + str(self._context['max_source_name_len']) + '}')
self._displayed_texts = [
self._get_candidate_display_text(i)
for i in range(0, candidates_len)
]
def _update_buffer(self) -> None:
is_current_buffer = self._bufnr == self._vim.current.buffer.number
self._update_status()
if self._check_matchdelete and self._context['match_highlight']:
matches = [x['id'] for x in
self._vim.call('getmatches', self._winid)]
if self._matched_range_id in matches:
self._vim.call('matchdelete',
self._matched_range_id, self._winid)
self._matched_range_id = -1
if self._matched_char_id in matches:
self._vim.call('matchdelete',
self._matched_char_id, self._winid)
self._matched_char_id = -1
if self._matched_pattern != '':
self._matched_range_id = self._vim.call(
'matchadd', 'deniteMatchedRange',
r'\c' + regex_convert_py_vim(self._matched_pattern),
10, -1, {'window': self._winid})
matched_char_pattern = '[{}]'.format(re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._matched_char_id = self._vim.call(
'matchadd', 'deniteMatchedChar',
matched_char_pattern,
10, -1, {'window': self._winid})
prev_linenr = self._vim.call('line', '.')
prev_candidate = self._get_cursor_candidate()
buffer = self._vim.buffers[self._bufnr]
buffer.options['modifiable'] = True
self._vim.vars['denite#_candidates'] = [
x['word'] for x in self._candidates]
buffer[:] = self._displayed_texts
buffer.options['modifiable'] = False
self._previous_text = self._context['input']
self._resize_buffer(is_current_buffer)
is_changed = (self._context['reversed'] or
(is_current_buffer and
self._previous_text != self._context['input']))
if self._updated and is_changed:
if not is_current_buffer:
save_winid = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
self._init_cursor()
self._move_to_pos(self._cursor)
if not is_current_buffer:
self._vim.call('win_gotoid', save_winid)
elif is_current_buffer:
self._vim.call('cursor', [prev_linenr, 0])
if is_current_buffer:
if (self._context['auto_action'] and
prev_candidate != self._get_cursor_candidate()):
self.do_action(self._context['auto_action'])
self._updated = False
self._stop_timer('update_buffer')
def _update_status(self) -> None:
inpt = ''
if self._context['input']:
inpt = self._context['input'] + ' '
if self._context['error_messages']:
inpt = '[ERROR] ' + inpt
path = '[' + self._context['path'] + ']'
status = {
'input': inpt,
'sources': self._statusline_sources,
'path': path,
'buffer_name': self._context['buffer_name'],
'line_total': len(self._candidates),
}
if status == self._prev_status:
return
self._bufvars['denite_statusline'] = status
self._prev_status = status
linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))"
if self._context['statusline']:
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = (
"%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} " +
" %{denite#get_status('path')}%*" +
"%{" + linenr + "}%*")
else:
winnr = self._vim.call('win_id2win', self._winid)
self._vim.call('setwinvar', winnr, '&statusline', (
"%#deniteInput#%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')}%*" +
"%#deniteStatusLineNumber#%{" + linenr + "}%*"))
def _get_display_source_name(self, name: str) -> str:
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def _get_candidate_display_text(self, index: int) -> str:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self._context['max_source_name_format'].format(
self._get_display_source_name(candidate['source_name'])))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (self._context['selected_icon']
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def _get_max_height(self) -> int:
return int(self._vim.options['lines']) if not self._floating else (
int(self._vim.options['lines']) -
int(self._context['winrow']) -
int(self._vim.options['cmdheight']))
def _resize_buffer(self, is_current_buffer: bool) -> None:
split = self._context['split']
if (split == 'no' or split == 'tab' or
self._vim.call('winnr', '$') == 1):
return
winheight = max(self._winheight, 1)
winwidth = max(self._winwidth, 1)
is_vertical = split == 'vertical'
if not is_current_buffer:
restore = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
if not is_vertical and self._vim.current.window.height != winheight:
if self._floating:
wincol = int(self._context['winrow'])
row = wincol
if split == 'floating':
if self._context['auto_resize'] and row > 1:
row += int(self._context['winheight'])
row -= self._winheight
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'editor',
'row': row,
'col': int(self._context['wincol']),
'width': winwidth,
'height': winheight,
})
filter_row = 0 if wincol == 1 else row + winheight
filter_col = int(self._context['wincol'])
elif split == 'floating_relative':
init_pos = self._vim.call('nvim_win_get_config',
self._winid)
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'win',
'win': init_pos['win'],
'row': init_pos['row'],
'col': init_pos['col'],
'width': winwidth,
'height': winheight,
})
filter_col = init_pos['col']
if init_pos['anchor'] == 'NW':
winpos = self._vim.call('nvim_win_get_position',
self._winid)
filter_row = winpos[0] + winheight
filter_winid = self._vim.vars['denite#_filter_winid']
self._context['filter_winrow'] = row
if self._vim.call('win_id2win', filter_winid) > 0:
self._vim.call('nvim_win_set_config', filter_winid, {
'relative': 'editor',
'row': filter_row,
'col': filter_col,
})
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
if not is_current_buffer:
self._vim.call('win_gotoid', restore)
def _check_do_option(self) -> bool:
if self._context['do'] != '':
self._do_command(self._context['do'])
return True
elif (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self._do_immediately()
return True
return not (self._context['empty'] or
self._is_async or self._candidates)
def _check_move_option(self) -> None:
if self._context['cursor_pos'].isnumeric():
self._cursor = int(self._context['cursor_pos']) + 1
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self._move_to_last_line()
def _do_immediately(self) -> None:
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
self._init_buffer()
self.do_action('default')
candidate = self._get_cursor_candidate()
if not candidate:
return
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor, len(self._candidates),
candidate.get('abbr', candidate['word'])))
if goto:
self._vim.command('wincmd p')
def _do_command(self, command: str) -> None:
self._init_cursor()
cursor = 1
while cursor < len(self._candidates):
self.do_action('default', command)
self._move_to_next_line()
self._quit_buffer()
def _cleanup(self) -> None:
self._stop_timer('update_candidates')
self._stop_timer('update_buffer')
if self._vim.current.buffer.number == self._bufnr:
self._cursor = self._vim.call('line', '.')
self._vim.call('denite#filter#_close_filter_window')
if not self._context['has_preview_window']:
self._vim.command('pclose!')
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('highlight! link CursorLine CursorLine')
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def _close_current_window(self) -> None:
if self._vim.call('winnr', '$') == 1:
self._vim.command('buffer #')
else:
self._vim.command('close!')
def _quit_buffer(self) -> None:
self._cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
return
winids = self._vim.call('win_findbuf',
self._vim.vars['denite#_filter_bufnr'])
if winids:
self._vim.call('win_gotoid', winids[0])
self._close_current_window()
self._vim.call('win_gotoid', self._winid)
if self._context['split'] == 'no':
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._close_current_window()
self._vim.call('win_gotoid', self._prev_winid)
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
self._vim.command(self._winrestcmd)
self._vim.command(self._winrestcmd)
clearmatch(self._vim)
def _get_cursor_candidate(self) -> Candidate:
return self._get_candidate(self._cursor)
def _get_candidate(self, pos: int) -> Candidate:
if not self._candidates or pos > len(self._candidates):
return {}
return self._candidates[pos - 1]
def _get_selected_candidates(self) -> Candidates:
if not self._selected_candidates:
return [self._get_cursor_candidate()
] if self._get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def _init_denite(self) -> None:
if self._denite:
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = int(self._context['winheight'])
self._winwidth = int(self._context['winwidth'])
def _gather_candidates(self) -> None:
self._selected_candidates = []
if self._denite:
self._denite.gather_candidates(self._context)
def _init_cursor(self) -> None:
if self._context['reversed']:
self._move_to_last_line()
self._vim.command('normal! zb')
else:
self._move_to_first_line()
def _move_to_pos(self, pos: int) -> None:
self._vim.call('cursor', pos, 0)
self._cursor = pos
def _move_to_next_line(self) -> None:
if self._cursor < len(self._candidates):
self._cursor += 1
def _move_to_prev_line(self) -> None:
if self._cursor >= 1:
self._cursor -= 1
def _move_to_first_line(self) -> None:
self._cursor = 1
def _move_to_last_line(self) -> None:
self._cursor = len(self._candidates)
def _start_timer(self, key: str) -> None:
if key in self._timers:
return
if key == 'update_candidates':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_candidates_timer', self._bufnr)
elif key == 'update_buffer':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_buffer_timer', self._bufnr)
def _stop_timer(self, key: str) -> None:
if key not in self._timers:
return
self._vim.call('timer_stop', self._timers[key])
if key in self._timers:
self._timers.pop(key)
| true | true |
79004d2a591ae728927e1e5bedd665bdda378dfe | 3,023 | py | Python | test/programytest/sentiment/test_extension.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | test/programytest/sentiment/test_extension.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | test/programytest/sentiment/test_extension.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | import unittest
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.sentiment.extension import SentimentExtension
from programytest.client import TestClient
class SentimentExtensionTests(unittest.TestCase):
def setUp(self):
self._client = TestClient()
config = BotConfiguration()
config.sentiment_analyser._classname = "programy.sentiment.textblob_sentiment.TextBlobSentimentAnalyser"
config.sentiment_analyser._scores = "programy.sentiment.scores.SentimentScores"
self.client_context = self._client.create_client_context("testuser")
self.client_context._bot = Bot(config=config, client=self._client)
self.client_context._bot.initiate_sentiment_analyser()
def test_invalid_command(self):
extension = SentimentExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "XXX")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT SCOREX")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING LAST")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT SCORES")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT CURRENT")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
def test_valid_scores_command(self):
extension = SentimentExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "SENTIMENT ENABLED")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT ENABLED", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING LAST 1")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT FEELING NEUTRAL AND NEUTRAL", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING OVERALL")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT FEELING NEUTRAL AND NEUTRAL", result)
result = extension.execute(self.client_context, "SENTIMENT SCORE I LIKE YOU")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT SCORES POSITIVITY NEUTRAL SUBJECTIVITY COMPLETELY OBJECTIVE", result)
| 39.25974 | 112 | 0.726431 | import unittest
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.sentiment.extension import SentimentExtension
from programytest.client import TestClient
class SentimentExtensionTests(unittest.TestCase):
def setUp(self):
self._client = TestClient()
config = BotConfiguration()
config.sentiment_analyser._classname = "programy.sentiment.textblob_sentiment.TextBlobSentimentAnalyser"
config.sentiment_analyser._scores = "programy.sentiment.scores.SentimentScores"
self.client_context = self._client.create_client_context("testuser")
self.client_context._bot = Bot(config=config, client=self._client)
self.client_context._bot.initiate_sentiment_analyser()
def test_invalid_command(self):
extension = SentimentExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "XXX")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT SCOREX")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING LAST")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT SCORES")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT CURRENT")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
def test_valid_scores_command(self):
extension = SentimentExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "SENTIMENT ENABLED")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT ENABLED", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING LAST 1")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT FEELING NEUTRAL AND NEUTRAL", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING OVERALL")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT FEELING NEUTRAL AND NEUTRAL", result)
result = extension.execute(self.client_context, "SENTIMENT SCORE I LIKE YOU")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT SCORES POSITIVITY NEUTRAL SUBJECTIVITY COMPLETELY OBJECTIVE", result)
| true | true |
79004e190316c02e9268a486cbbdd2f2f3c2737a | 1,140 | py | Python | apps/currency/serializers.py | ecoo-app/ecoo-backend | ffe54abcd2e8c1a18ef2fa992c45a10f8232a4a0 | [
"MIT"
] | 1 | 2021-03-31T18:25:44.000Z | 2021-03-31T18:25:44.000Z | apps/currency/serializers.py | ecoo-app/ecoo-backend | ffe54abcd2e8c1a18ef2fa992c45a10f8232a4a0 | [
"MIT"
] | null | null | null | apps/currency/serializers.py | ecoo-app/ecoo-backend | ffe54abcd2e8c1a18ef2fa992c45a10f8232a4a0 | [
"MIT"
] | 1 | 2021-01-14T09:27:42.000Z | 2021-01-14T09:27:42.000Z | from rest_framework import serializers
from apps.currency.models import Currency
class CurrencyWalletSerializer(serializers.ModelSerializer):
actual_nonce = serializers.SerializerMethodField("get_nonce")
def get_nonce(self, wallet):
return wallet.nonce
class Meta:
from apps.wallet.models import Wallet
model = Wallet
fields = ["wallet_id", "public_key", "actual_nonce", "category", "state"]
class CurrencySerializer(serializers.ModelSerializer):
owner_wallet = CurrencyWalletSerializer(source="cashout_wallet")
owner_wallet_new = CurrencyWalletSerializer(source="owner_wallet")
cashout_wallet = CurrencyWalletSerializer()
class Meta:
model = Currency
fields = [
"uuid",
"name",
"symbol",
"token_id",
"decimals",
"campaign_end",
"claim_deadline",
"allow_minting",
"owner_wallet_new",
"owner_wallet",
"cashout_wallet",
"starting_capital",
"is_public",
"needs_sms_verification",
]
| 27.142857 | 81 | 0.620175 | from rest_framework import serializers
from apps.currency.models import Currency
class CurrencyWalletSerializer(serializers.ModelSerializer):
actual_nonce = serializers.SerializerMethodField("get_nonce")
def get_nonce(self, wallet):
return wallet.nonce
class Meta:
from apps.wallet.models import Wallet
model = Wallet
fields = ["wallet_id", "public_key", "actual_nonce", "category", "state"]
class CurrencySerializer(serializers.ModelSerializer):
owner_wallet = CurrencyWalletSerializer(source="cashout_wallet")
owner_wallet_new = CurrencyWalletSerializer(source="owner_wallet")
cashout_wallet = CurrencyWalletSerializer()
class Meta:
model = Currency
fields = [
"uuid",
"name",
"symbol",
"token_id",
"decimals",
"campaign_end",
"claim_deadline",
"allow_minting",
"owner_wallet_new",
"owner_wallet",
"cashout_wallet",
"starting_capital",
"is_public",
"needs_sms_verification",
]
| true | true |
79004e283baa674ec188339eb670cdd150291ba9 | 5,814 | py | Python | scripts/proteinInteractionEBI/parse_ebi_test.py | pradh/data | de42fe45a169ccfb1decce53c20f2e9f32ed71e1 | [
"Apache-2.0"
] | null | null | null | scripts/proteinInteractionEBI/parse_ebi_test.py | pradh/data | de42fe45a169ccfb1decce53c20f2e9f32ed71e1 | [
"Apache-2.0"
] | null | null | null | scripts/proteinInteractionEBI/parse_ebi_test.py | pradh/data | de42fe45a169ccfb1decce53c20f2e9f32ed71e1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Test for parse_ebi.py.
Run "python3 parse_ebi_test.py"
'''
import copy
import unittest
import parse_ebi
CONST_TEST_TEXT = '''[Term]
id: MI:0001
name: interaction detection method
def: "Method to determine the interaction." [PMID:14755292]
[Term]
id: MI:0045
name: experimental interaction detection
def: "Methods based" [PMID:14755292]
is_a: MI:0001 ! interaction detection method
[Term]
id: MI:0401
name: biochemical
def: "The application" [PMID:14755292]
is_a: MI:0045 ! experimental interaction detection
[Term]
id: MI:0091
name: chromatography technology
def: "Used to separate" [PMID:14755292]
is_a: MI:0401 ! biochemical'''
CONST_ID_TO_CLASS_NAME = {'MI:0001': 'InteractionDetectionMethod',
'MI:0091': 'ChromatographyTechnology',
'MI:0045': 'ExperimentalInteractionDetection', 'MI:0401': 'Biochemical'}
CONST_ID_TO_NODE = {}
CONST_ID_TO_NODE_NO_RELATION = {}
for key in ['MI:0001', 'MI:0045', 'MI:0401', 'MI:0091']:
CONST_ID_TO_NODE[key] = parse_ebi.Node(key)
CONST_ID_TO_NODE_NO_RELATION[key] = parse_ebi.Node(key)
CONST_ID_TO_NODE['MI:0001'].child_list.append(CONST_ID_TO_NODE['MI:0045'])
CONST_ID_TO_NODE['MI:0045'].parent_list.append(CONST_ID_TO_NODE['MI:0001'])
CONST_ID_TO_NODE['MI:0045'].child_list.append(CONST_ID_TO_NODE['MI:0401'])
CONST_ID_TO_NODE['MI:0401'].parent_list.append(CONST_ID_TO_NODE['MI:0045'])
CONST_ID_TO_NODE['MI:0401'].child_list.append(CONST_ID_TO_NODE['MI:0091'])
CONST_ID_TO_NODE['MI:0091'].parent_list.append(CONST_ID_TO_NODE['MI:0401'])
CONST_SCHEMA1 = '''Node: dcid:ExperimentalInteractionDetection
typeOf: dcs:InteractionTypeEnum
name: "ExperimentalInteractionDetection"
psimiID: "MI:0045"
description: "Methods base"
pubMedID: "14755292"
descriptionUrl: "http://psidev.info/groups/controlled-vocabularies"'''
CONST_SCHEMA2 = '''Node: dcid:Biochemical
typeOf: dcs:InteractionTypeEnum
name: "Biochemical"
psimiID: "MI:0401"
description: "The applicatio"
pubMedID: "14755292"
specializationOf: dcs:ExperimentalInteractionDetection
descriptionUrl: "http://psidev.info/groups/controlled-vocabularies"'''
def get_file_terms(file):
"Ruturns a list of text blocks."
file_terms = file.split('\n\n')
file_terms = [term_text.split('\n') for term_text in file_terms
if term_text.startswith('[Term]')]
return file_terms
CONST_FILE_TERMS = get_file_terms(CONST_TEST_TEXT)
CONST_INTERACTION_TYPE_ID_SET = set(['MI:0045', 'MI:0091', 'MI:0401'])
class TestParseEbi(unittest.TestCase):
"""Test the functions in parse_ebi.py"""
def test_get_id_maps(self):
"""Test function get_id_maps. Note that id_to_node here doesn't have parent_child
relation, so only map keys are tested."""
id_to_class_name, id_to_node = parse_ebi.get_id_maps(CONST_FILE_TERMS)
self.assertEqual(id_to_class_name, CONST_ID_TO_CLASS_NAME)
self.assertEqual(id_to_node.keys(), CONST_ID_TO_NODE_NO_RELATION.keys())
def test_build_child_parent_link(self):
"""Test function build_child_parent_link by checking the values of
child_list and parent_list."""
id_to_node = copy.deepcopy(CONST_ID_TO_NODE_NO_RELATION)
id_to_node = parse_ebi.build_child_parent_link(CONST_FILE_TERMS, id_to_node)
def get_node_value_set(node_list):
value_set = set()
for node in node_list:
value_set.add(node.value)
return value_set
for id_key in id_to_node:
parent_value_set = get_node_value_set(id_to_node[id_key].parent_list)
const_parent_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].parent_list)
child_value_set = get_node_value_set(id_to_node[id_key].child_list)
const_child_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].child_list)
self.assertEqual(parent_value_set, const_parent_value_set)
self.assertEqual(child_value_set, const_child_value_set)
def test_TreeBuilder(self):
"""Test TreeBuilder class."""
dfs_caller = parse_ebi.TreeBuilder(CONST_ID_TO_NODE)
INTERACTION_TYPE_ROOT = 'MI:0001'
interaction_type_id_set = dfs_caller.get_subset_id(INTERACTION_TYPE_ROOT)
self.assertEqual(interaction_type_id_set, CONST_INTERACTION_TYPE_ID_SET)
def test_get_schema_from_text(self):
"""Test function get_schema_from_text by comparing the final schema."""
new_source_map = {'references':{}}
term = CONST_FILE_TERMS[1]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE,
new_source_map, CONST_ID_TO_CLASS_NAME,
CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA1)
term = CONST_FILE_TERMS[2]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE,
new_source_map, CONST_ID_TO_CLASS_NAME,
CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA2)
if __name__ == '__main__':
unittest.main()
| 41.528571 | 98 | 0.71259 |
import copy
import unittest
import parse_ebi
CONST_TEST_TEXT = '''[Term]
id: MI:0001
name: interaction detection method
def: "Method to determine the interaction." [PMID:14755292]
[Term]
id: MI:0045
name: experimental interaction detection
def: "Methods based" [PMID:14755292]
is_a: MI:0001 ! interaction detection method
[Term]
id: MI:0401
name: biochemical
def: "The application" [PMID:14755292]
is_a: MI:0045 ! experimental interaction detection
[Term]
id: MI:0091
name: chromatography technology
def: "Used to separate" [PMID:14755292]
is_a: MI:0401 ! biochemical'''
CONST_ID_TO_CLASS_NAME = {'MI:0001': 'InteractionDetectionMethod',
'MI:0091': 'ChromatographyTechnology',
'MI:0045': 'ExperimentalInteractionDetection', 'MI:0401': 'Biochemical'}
CONST_ID_TO_NODE = {}
CONST_ID_TO_NODE_NO_RELATION = {}
for key in ['MI:0001', 'MI:0045', 'MI:0401', 'MI:0091']:
CONST_ID_TO_NODE[key] = parse_ebi.Node(key)
CONST_ID_TO_NODE_NO_RELATION[key] = parse_ebi.Node(key)
CONST_ID_TO_NODE['MI:0001'].child_list.append(CONST_ID_TO_NODE['MI:0045'])
CONST_ID_TO_NODE['MI:0045'].parent_list.append(CONST_ID_TO_NODE['MI:0001'])
CONST_ID_TO_NODE['MI:0045'].child_list.append(CONST_ID_TO_NODE['MI:0401'])
CONST_ID_TO_NODE['MI:0401'].parent_list.append(CONST_ID_TO_NODE['MI:0045'])
CONST_ID_TO_NODE['MI:0401'].child_list.append(CONST_ID_TO_NODE['MI:0091'])
CONST_ID_TO_NODE['MI:0091'].parent_list.append(CONST_ID_TO_NODE['MI:0401'])
CONST_SCHEMA1 = '''Node: dcid:ExperimentalInteractionDetection
typeOf: dcs:InteractionTypeEnum
name: "ExperimentalInteractionDetection"
psimiID: "MI:0045"
description: "Methods base"
pubMedID: "14755292"
descriptionUrl: "http://psidev.info/groups/controlled-vocabularies"'''
CONST_SCHEMA2 = '''Node: dcid:Biochemical
typeOf: dcs:InteractionTypeEnum
name: "Biochemical"
psimiID: "MI:0401"
description: "The applicatio"
pubMedID: "14755292"
specializationOf: dcs:ExperimentalInteractionDetection
descriptionUrl: "http://psidev.info/groups/controlled-vocabularies"'''
def get_file_terms(file):
file_terms = file.split('\n\n')
file_terms = [term_text.split('\n') for term_text in file_terms
if term_text.startswith('[Term]')]
return file_terms
CONST_FILE_TERMS = get_file_terms(CONST_TEST_TEXT)
CONST_INTERACTION_TYPE_ID_SET = set(['MI:0045', 'MI:0091', 'MI:0401'])
class TestParseEbi(unittest.TestCase):
def test_get_id_maps(self):
id_to_class_name, id_to_node = parse_ebi.get_id_maps(CONST_FILE_TERMS)
self.assertEqual(id_to_class_name, CONST_ID_TO_CLASS_NAME)
self.assertEqual(id_to_node.keys(), CONST_ID_TO_NODE_NO_RELATION.keys())
def test_build_child_parent_link(self):
id_to_node = copy.deepcopy(CONST_ID_TO_NODE_NO_RELATION)
id_to_node = parse_ebi.build_child_parent_link(CONST_FILE_TERMS, id_to_node)
def get_node_value_set(node_list):
value_set = set()
for node in node_list:
value_set.add(node.value)
return value_set
for id_key in id_to_node:
parent_value_set = get_node_value_set(id_to_node[id_key].parent_list)
const_parent_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].parent_list)
child_value_set = get_node_value_set(id_to_node[id_key].child_list)
const_child_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].child_list)
self.assertEqual(parent_value_set, const_parent_value_set)
self.assertEqual(child_value_set, const_child_value_set)
def test_TreeBuilder(self):
dfs_caller = parse_ebi.TreeBuilder(CONST_ID_TO_NODE)
INTERACTION_TYPE_ROOT = 'MI:0001'
interaction_type_id_set = dfs_caller.get_subset_id(INTERACTION_TYPE_ROOT)
self.assertEqual(interaction_type_id_set, CONST_INTERACTION_TYPE_ID_SET)
def test_get_schema_from_text(self):
new_source_map = {'references':{}}
term = CONST_FILE_TERMS[1]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE,
new_source_map, CONST_ID_TO_CLASS_NAME,
CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA1)
term = CONST_FILE_TERMS[2]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE,
new_source_map, CONST_ID_TO_CLASS_NAME,
CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA2)
if __name__ == '__main__':
unittest.main()
| true | true |
79004f3fd5dd4da3a9d00eb59d8536856754ca47 | 52 | py | Python | uniplot/__init__.py | Sean1708/uniplot | c4a35b8f5cdbf6d9ecd5ace6a23c17ca76d876d5 | [
"MIT"
] | null | null | null | uniplot/__init__.py | Sean1708/uniplot | c4a35b8f5cdbf6d9ecd5ace6a23c17ca76d876d5 | [
"MIT"
] | 4 | 2016-03-11T10:57:48.000Z | 2016-04-02T12:34:37.000Z | uniplot/__init__.py | Sean1708/uniplot | c4a35b8f5cdbf6d9ecd5ace6a23c17ca76d876d5 | [
"MIT"
] | 2 | 2018-09-24T15:14:39.000Z | 2019-08-20T14:20:38.000Z | """Plot graphs from human-readable file formats."""
| 26 | 51 | 0.730769 | true | true | |
79004f575a433f46a6d9eec69c73cfe2b93d5a23 | 3,989 | py | Python | Codes/xiaohong2019/leetcode/4_median_of_two_sorted_arrays.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 256 | 2017-10-25T13:02:15.000Z | 2022-02-25T13:47:59.000Z | Codes/xiaohong2019/leetcode/4_median_of_two_sorted_arrays.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 56 | 2017-10-27T01:34:20.000Z | 2022-03-01T00:20:55.000Z | Codes/xiaohong2019/leetcode/4_median_of_two_sorted_arrays.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 83 | 2017-10-25T12:51:53.000Z | 2022-02-15T08:27:03.000Z | # -*- coding: utf-8 -*-
# URL : https://leetcode-cn.com/problems/median-of-two-sorted-arrays/
""""""
"""
problem:
给定两个大小为 m 和 n 的有序数组 nums1 和 nums2。
请你找出这两个有序数组的中位数,并且要求算法的时间复杂度为 O(log(m + n))。
你可以假设 nums1 和 nums2 不会同时为空。
示例 1:
nums1 = [1, 3]
nums2 = [2]
则中位数是 2.0
示例 2:
nums1 = [1, 2]
nums2 = [3, 4]
则中位数是 (2 + 3)/2 = 2.5
"""
"""
explain:
看清楚,复杂度是 O(log(m + n)),而不是 O(m + n),所以不能合并这两个数组,要原封不动,用下标去访问找出中位数。
中位数就是排序数组序列的中间位置的元素,奇数个元素取一个中间元素,偶数个元素取中间两个元素求平均。
要寻找的两个元素(非下标):(m + n + 1) / 2,(m + n + 2) / 2,当元素个数为奇数个时,这两个值是相等的,因此可以寻找这两个位置的元素出来求平均。
题目转变成找出第 k 个的元素,这里的 k 就是上面那两个。
这两个数组,是各自有序,要找这两个的元素,就需要进行比较淘汰。
找第 k 个元素的过程:
取出各自下标为 k / 2 - 1 的元素,也就是中间元素,这里就可以使得复杂度为 log 级别。
如果 nums1 < nums2,就表明 nums1 前面 k / 2 不可能有合并之后的 k,可以淘汰 nums1 的前 k / 2 个元素;
如果 nums1 > nums2,也表明 nums2 前面 k / 2 可以淘汰。
淘汰之后,k 变为 k - k / 2。
另外,k == 1 时,就不存在 k / 2(中间元素),此时比较 nums1、nums2 当前索引值的大小,取小的那一个,因为这里是取第 1(k) 个元素。
当索引值超出对应的 nums 长度时,表明 k 在另一个数组中,可以返回下标为 (索引值 + k - 1) 的元素,其中(k - 1)就是取下标。
演示:
nums1 = [1, 2, 3]
nums2 = [4, 5, 6]
根据 (m + n + 1) / 2,(m + n + 2) / 2,需要找出第 3,4 这两个元素,求平均值
初始索引值:index1 = index2 = 0
找 k == 3 的过程:
1. 根据 k / 2 - 1,各自取出下标为 0 的元素,分别是 1 和 4;由于 1 < 4,所以淘汰 nums1 中的前 k / 2 个元素,即 index1(索引值)为 1。
2. 根据 k - k / 2,k 变更为 2。
3. 变成寻找 k == 2 的过程,重复 1、2 步骤。
4. 各自取出下标为 0 的元素(叠加索引值),分别是 2 和 4;由于 2 < 4,所以 nums1 只剩下 3 这个元素,即 index1 == 2。
5. k 变更为 1。
6. 比较 nums1、nums2 当前索引值的大小,取小的那一个,即 3 和 4,取元素 3。
找 k == 4 的过程:
1. 根据 k / 2 - 1,各自取出下标为 1 的元素,分别是 2 和 5;由于 2 < 5,所以淘汰 nums1 中的前 k / 2 个元素,即 index1(索引值)为 2。
2. 根据 k - k / 2,k 变更为 2。
3. 变成寻找 k == 2 的过程,重复 1、2 步骤。
4. 各自取出下标为 0 的元素(叠加索引值),分别是 3 和 4;由于 3 < 4,所以 index1 == 3。
5. k 变更为 1。
6. 判断 index1 >= nums1.length,即 nums1 全部淘汰,取 nums2 中下标为 (index2 + k - 1)的元素,即元素 4。
平均值(中位数):
(3 + 4) / 2 = 3.5
"""
"""
out:
执行用时 : 88 ms, 在所有 python 提交中击败了 63.81% 的用户
内存消耗 : 11.8 MB, 在所有 python 提交中击败了 32.58% 的用户
"""
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m = len(nums1)
n = len(nums2)
def find_kth(nums1, nums2, index1, index2, k):
# 索引值范围检查
if index1 >= len(nums1):
return nums2[index2 + k - 1]
if index2 >= len(nums2):
return nums1[index1 + k - 1]
# k == 1
if k == 1:
return nums1[index1] if nums1[index1] < nums2[index2] else nums2[index2]
# 取中间值比较淘汰
do_discard_nums1 = True
mid = k // 2 - 1
if index1 + mid >= len(nums1) or (
index2 + mid < len(nums2) and nums1[index1 + mid] > nums2[index2 + mid]
):
do_discard_nums1 = False
mid += 1
if do_discard_nums1:
# 淘汰 nums1 的 mid 前面的元素
return find_kth(nums1, nums2, index1 + mid, index2, k - mid)
else:
return find_kth(nums1, nums2, index1, index2 + mid, k - mid)
return (
find_kth(nums1, nums2, 0, 0, (m + n + 1) // 2)
+ find_kth(nums1, nums2, 0, 0, (m + n + 2) // 2)
) / 2.0
if __name__ == "__main__":
solution = Solution()
assert solution.findMedianSortedArrays([1, 3], [2]) == 2.0
assert solution.findMedianSortedArrays([2], [1, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2], [3, 4]) == 2.5
assert solution.findMedianSortedArrays([1, 3], [2, 4]) == 2.5
assert solution.findMedianSortedArrays([], [1]) == 1.0
assert solution.findMedianSortedArrays([1], []) == 1.0
assert solution.findMedianSortedArrays([1, 3], []) == 2.0
assert solution.findMedianSortedArrays([], [1, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2, 3], []) == 2.0
assert solution.findMedianSortedArrays([], [1, 2, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2, 3, 5], [4, 6, 7, 8, 9]) == 5.0
assert solution.findMedianSortedArrays([1], [2, 3, 4, 5, 6]) == 3.5
| 30.450382 | 91 | 0.57107 |
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
m = len(nums1)
n = len(nums2)
def find_kth(nums1, nums2, index1, index2, k):
if index1 >= len(nums1):
return nums2[index2 + k - 1]
if index2 >= len(nums2):
return nums1[index1 + k - 1]
if k == 1:
return nums1[index1] if nums1[index1] < nums2[index2] else nums2[index2]
do_discard_nums1 = True
mid = k // 2 - 1
if index1 + mid >= len(nums1) or (
index2 + mid < len(nums2) and nums1[index1 + mid] > nums2[index2 + mid]
):
do_discard_nums1 = False
mid += 1
if do_discard_nums1:
return find_kth(nums1, nums2, index1 + mid, index2, k - mid)
else:
return find_kth(nums1, nums2, index1, index2 + mid, k - mid)
return (
find_kth(nums1, nums2, 0, 0, (m + n + 1) // 2)
+ find_kth(nums1, nums2, 0, 0, (m + n + 2) // 2)
) / 2.0
if __name__ == "__main__":
solution = Solution()
assert solution.findMedianSortedArrays([1, 3], [2]) == 2.0
assert solution.findMedianSortedArrays([2], [1, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2], [3, 4]) == 2.5
assert solution.findMedianSortedArrays([1, 3], [2, 4]) == 2.5
assert solution.findMedianSortedArrays([], [1]) == 1.0
assert solution.findMedianSortedArrays([1], []) == 1.0
assert solution.findMedianSortedArrays([1, 3], []) == 2.0
assert solution.findMedianSortedArrays([], [1, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2, 3], []) == 2.0
assert solution.findMedianSortedArrays([], [1, 2, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2, 3, 5], [4, 6, 7, 8, 9]) == 5.0
assert solution.findMedianSortedArrays([1], [2, 3, 4, 5, 6]) == 3.5
| true | true |
7900500e6c1b27381a31bdc7c2718dc80a3dca00 | 662 | py | Python | manage.py | aidswidjaja/PotatoBoard | e4fbd09c9d086509433b519db3e38b69dccac81e | [
"MIT"
] | null | null | null | manage.py | aidswidjaja/PotatoBoard | e4fbd09c9d086509433b519db3e38b69dccac81e | [
"MIT"
] | 13 | 2021-01-04T06:53:11.000Z | 2021-07-01T00:40:00.000Z | manage.py | aidswidjaja/PotatoBoard | e4fbd09c9d086509433b519db3e38b69dccac81e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'potato.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.782609 | 73 | 0.678248 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'potato.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
7900507d0a8fa8a1002ea9d0903685236f062905 | 32,054 | py | Python | scripts/gen_kobject_list.py | shijunjing/zephyr | b0d509bc0dd2104cd69250b5798b833e9104f919 | [
"Apache-2.0"
] | null | null | null | scripts/gen_kobject_list.py | shijunjing/zephyr | b0d509bc0dd2104cd69250b5798b833e9104f919 | [
"Apache-2.0"
] | null | null | null | scripts/gen_kobject_list.py | shijunjing/zephyr | b0d509bc0dd2104cd69250b5798b833e9104f919 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to generate gperf tables of kernel object metadata
User mode threads making system calls reference kernel objects by memory
address, as the kernel/driver APIs in Zephyr are the same for both user
and supervisor contexts. It is necessary for the kernel to be able to
validate accesses to kernel objects to make the following assertions:
- That the memory address points to a kernel object
- The kernel object is of the expected type for the API being invoked
- The kernel object is of the expected initialization state
- The calling thread has sufficient permissions on the object
For more details see the :ref:`kernelobjects` section in the documentation.
The zephyr build generates an intermediate ELF binary, zephyr_prebuilt.elf,
which this script scans looking for kernel objects by examining the DWARF
debug information to look for instances of data structures that are considered
kernel objects. For device drivers, the API struct pointer populated at build
time is also examined to disambiguate between various device driver instances
since they are all 'struct device'.
This script can generate five different output files:
- A gperf script to generate the hash table mapping kernel object memory
addresses to kernel object metadata, used to track permissions,
object type, initialization state, and any object-specific data.
- A header file containing generated macros for validating driver instances
inside the system call handlers for the driver subsystem APIs.
- A code fragment included by kernel.h with one enum constant for
each kernel object type and each driver instance.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping the kernel object types and driver
instances to their human-readable representation in the
otype_to_str() function.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping kernel object types to their sizes.
This is used for allocating instances of them at runtime
(CONFIG_DYNAMIC_OBJECTS) in the obj_size_get() function.
"""
import sys
import argparse
import math
import os
import struct
import json
from distutils.version import LooseVersion
import elftools
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
if LooseVersion(elftools.__version__) < LooseVersion('0.24'):
sys.exit("pyelftools is out of date, need version 0.24 or later")
from collections import OrderedDict
# Keys in this dictionary are structs which should be recognized as kernel
# objects. Values are a tuple:
#
# - The first item is None, or the name of a Kconfig that
# indicates the presence of this object's definition in case it is not
# available in all configurations.
#
# - The second item is a boolean indicating whether it is permissible for
# the object to be located in user-accessible memory.
# Regular dictionaries are ordered only with Python 3.6 and
# above. Good summary and pointers to official documents at:
# https://stackoverflow.com/questions/39980323/are-dictionaries-ordered-in-python-3-6
kobjects = OrderedDict([
("k_mem_slab", (None, False)),
("k_msgq", (None, False)),
("k_mutex", (None, False)),
("k_pipe", (None, False)),
("k_queue", (None, False)),
("k_poll_signal", (None, False)),
("k_sem", (None, False)),
("k_stack", (None, False)),
("k_thread", (None, False)),
("k_timer", (None, False)),
("z_thread_stack_element", (None, False)),
("device", (None, False)),
("sys_mutex", (None, True)),
("k_futex", (None, True))
])
def kobject_to_enum(kobj):
if kobj.startswith("k_") or kobj.startswith("z_"):
name = kobj[2:]
else:
name = kobj
return "K_OBJ_%s" % name.upper()
subsystems = [
# Editing the list is deprecated, add the __subsystem sentinal to your driver
# api declaration instead. e.x.
#
# __subsystem struct my_driver_api {
# ....
#};
]
def subsystem_to_enum(subsys):
return "K_OBJ_DRIVER_" + subsys[:-11].upper()
# --- debug stuff ---
scr = os.path.basename(sys.argv[0])
def debug(text):
if not args.verbose:
return
sys.stdout.write(scr + ": " + text + "\n")
def error(text):
sys.exit("%s ERROR: %s" % (scr, text))
def debug_die(die, text):
if 'DW_AT_decl_file' not in die.attributes:
abs_orig_val = die.attributes["DW_AT_abstract_origin"].value
offset = abs_orig_val + die.cu.cu_offset
for var in variables:
if var.offset == offset:
die = var
break
lp_header = die.dwarfinfo.line_program_for_CU(die.cu).header
files = lp_header["file_entry"]
includes = lp_header["include_directory"]
fileinfo = files[die.attributes["DW_AT_decl_file"].value - 1]
filename = fileinfo.name.decode("utf-8")
filedir = includes[fileinfo.dir_index - 1].decode("utf-8")
path = os.path.join(filedir, filename)
lineno = die.attributes["DW_AT_decl_line"].value
debug(str(die))
debug("File '%s', line %d:" % (path, lineno))
debug(" %s" % text)
# -- ELF processing
DW_OP_addr = 0x3
DW_OP_fbreg = 0x91
STACK_TYPE = "z_thread_stack_element"
thread_counter = 0
sys_mutex_counter = 0
futex_counter = 0
stack_counter = 0
# Global type environment. Populated by pass 1.
type_env = {}
extern_env = {}
variables = []
class KobjectInstance:
def __init__(self, type_obj, addr):
global thread_counter
global sys_mutex_counter
global futex_counter
global stack_counter
self.addr = addr
self.type_obj = type_obj
# Type name determined later since drivers needs to look at the
# API struct address
self.type_name = None
if self.type_obj.name == "k_thread":
# Assign an ID for this thread object, used to track its
# permissions to other kernel objects
self.data = thread_counter
thread_counter = thread_counter + 1
elif self.type_obj.name == "sys_mutex":
self.data = "&kernel_mutexes[%d]" % sys_mutex_counter
sys_mutex_counter += 1
elif self.type_obj.name == "k_futex":
self.data = "&futex_data[%d]" % futex_counter
futex_counter += 1
elif self.type_obj.name == STACK_TYPE:
stack_counter += 1
else:
self.data = 0
class KobjectType:
def __init__(self, offset, name, size, api=False):
self.name = name
self.size = size
self.offset = offset
self.api = api
def __repr__(self):
return "<kobject %s>" % self.name
@staticmethod
def has_kobject():
return True
def get_kobjects(self, addr):
return {addr: KobjectInstance(self, addr)}
class ArrayType:
def __init__(self, offset, elements, member_type):
self.elements = elements
self.member_type = member_type
self.offset = offset
def __repr__(self):
return "<array of %d>" % self.member_type
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
# Stacks are arrays of _k_stack_element_t but we want to treat
# the whole array as one kernel object (a thread stack)
# Data value gets set to size of entire region
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
# An array of stacks appears as a multi-dimensional array.
# The last size is the size of each stack. We need to track
# each stack within the array, not as one huge stack object.
*dimensions, stacksize = self.elements
num_members = 1
for e in dimensions:
num_members = num_members * e
ret = {}
for i in range(num_members):
a = addr + (i * stacksize)
o = mt.get_kobjects(a)
o[a].data = stacksize
ret.update(o)
return ret
objs = {}
# Multidimensional array flattened out
num_members = 1
for e in self.elements:
num_members = num_members * e
for i in range(num_members):
objs.update(mt.get_kobjects(addr + (i * mt.size)))
return objs
class AggregateTypeMember:
def __init__(self, offset, member_name, member_type, member_offset):
self.member_name = member_name
self.member_type = member_type
if isinstance(member_offset, list):
# DWARF v2, location encoded as set of operations
# only "DW_OP_plus_uconst" with ULEB128 argument supported
if member_offset[0] == 0x23:
self.member_offset = member_offset[1] & 0x7f
for i in range(1, len(member_offset)-1):
if member_offset[i] & 0x80:
self.member_offset += (
member_offset[i+1] & 0x7f) << i*7
else:
raise Exception("not yet supported location operation (%s:%d:%d)" %
(self.member_name, self.member_type, member_offset[0]))
else:
self.member_offset = member_offset
def __repr__(self):
return "<member %s, type %d, offset %d>" % (
self.member_name, self.member_type, self.member_offset)
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
return mt.get_kobjects(addr + self.member_offset)
class ConstType:
def __init__(self, child_type):
self.child_type = child_type
def __repr__(self):
return "<const %d>" % self.child_type
def has_kobject(self):
if self.child_type not in type_env:
return False
return type_env[self.child_type].has_kobject()
def get_kobjects(self, addr):
return type_env[self.child_type].get_kobjects(addr)
class AggregateType:
def __init__(self, offset, name, size):
self.name = name
self.size = size
self.offset = offset
self.members = []
def add_member(self, member):
self.members.append(member)
def __repr__(self):
return "<struct %s, with %s>" % (self.name, self.members)
def has_kobject(self):
result = False
bad_members = []
for member in self.members:
if member.has_kobject():
result = True
else:
bad_members.append(member)
# Don't need to consider this again, just remove it
for bad_member in bad_members:
self.members.remove(bad_member)
return result
def get_kobjects(self, addr):
objs = {}
for member in self.members:
objs.update(member.get_kobjects(addr))
return objs
# --- helper functions for getting data from DIEs ---
def die_get_spec(die):
if 'DW_AT_specification' not in die.attributes:
return None
spec_val = die.attributes["DW_AT_specification"].value
# offset of the DW_TAG_variable for the extern declaration
offset = spec_val + die.cu.cu_offset
return extern_env.get(offset)
def die_get_name(die):
if 'DW_AT_name' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_name"].value.decode("utf-8")
def die_get_type_offset(die):
if 'DW_AT_type' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_type"].value + die.cu.cu_offset
def die_get_byte_size(die):
if 'DW_AT_byte_size' not in die.attributes:
return 0
return die.attributes["DW_AT_byte_size"].value
def analyze_die_struct(die):
name = die_get_name(die) or "<anon>"
offset = die.offset
size = die_get_byte_size(die)
# Incomplete type
if not size:
return
if name in kobjects:
type_env[offset] = KobjectType(offset, name, size)
elif name in subsystems:
type_env[offset] = KobjectType(offset, name, size, api=True)
else:
at = AggregateType(offset, name, size)
type_env[offset] = at
for child in die.iter_children():
if child.tag != "DW_TAG_member":
continue
data_member_location = child.attributes.get("DW_AT_data_member_location")
if not data_member_location:
continue
child_type = die_get_type_offset(child)
member_offset = data_member_location.value
cname = die_get_name(child) or "<anon>"
m = AggregateTypeMember(child.offset, cname, child_type,
member_offset)
at.add_member(m)
return
def analyze_die_const(die):
type_offset = die_get_type_offset(die)
if not type_offset:
return
type_env[die.offset] = ConstType(type_offset)
def analyze_die_array(die):
type_offset = die_get_type_offset(die)
elements = []
for child in die.iter_children():
if child.tag != "DW_TAG_subrange_type":
continue
if "DW_AT_upper_bound" not in child.attributes:
continue
ub = child.attributes["DW_AT_upper_bound"]
if not ub.form.startswith("DW_FORM_data"):
continue
elements.append(ub.value + 1)
if not elements:
if type_offset in type_env.keys():
mt = type_env[type_offset]
if mt.has_kobject():
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
elements.append(1)
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
else:
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
def analyze_typedef(die):
type_offset = die_get_type_offset(die)
if type_offset not in type_env.keys():
return
type_env[die.offset] = type_env[type_offset]
def unpack_pointer(elf, data, offset):
endian_code = "<" if elf.little_endian else ">"
if elf.elfclass == 32:
size_code = "I"
size = 4
else:
size_code = "Q"
size = 8
return struct.unpack(endian_code + size_code,
data[offset:offset + size])[0]
def addr_deref(elf, addr):
for section in elf.iter_sections():
start = section['sh_addr']
end = start + section['sh_size']
if start <= addr < end:
data = section.data()
offset = addr - start
return unpack_pointer(elf, data, offset)
return 0
def device_get_api_addr(elf, addr):
# See include/device.h for a description of struct device
offset = 8 if elf.elfclass == 32 else 16
return addr_deref(elf, addr + offset)
def find_kobjects(elf, syms):
if not elf.has_dwarf_info():
sys.exit("ELF file has no DWARF information")
app_smem_start = syms["_app_smem_start"]
app_smem_end = syms["_app_smem_end"]
di = elf.get_dwarf_info()
# Step 1: collect all type information.
for CU in di.iter_CUs():
for die in CU.iter_DIEs():
# Unions are disregarded, kernel objects should never be union
# members since the memory is not dedicated to that object and
# could be something else
if die.tag == "DW_TAG_structure_type":
analyze_die_struct(die)
elif die.tag == "DW_TAG_const_type":
analyze_die_const(die)
elif die.tag == "DW_TAG_array_type":
analyze_die_array(die)
elif die.tag == "DW_TAG_typedef":
analyze_typedef(die)
elif die.tag == "DW_TAG_variable":
variables.append(die)
# Step 2: filter type_env to only contain kernel objects, or structs
# and arrays of kernel objects
bad_offsets = []
for offset, type_object in type_env.items():
if not type_object.has_kobject():
bad_offsets.append(offset)
for offset in bad_offsets:
del type_env[offset]
# Step 3: Now that we know all the types we are looking for, examine
# all variables
all_objs = {}
for die in variables:
name = die_get_name(die)
if not name:
continue
if name.startswith("__init_sys_init"):
# Boot-time initialization function; not an actual device
continue
type_offset = die_get_type_offset(die)
# Is this a kernel object, or a structure containing kernel
# objects?
if type_offset not in type_env:
continue
if "DW_AT_declaration" in die.attributes:
# Extern declaration, only used indirectly
extern_env[die.offset] = die
continue
if "DW_AT_location" not in die.attributes:
debug_die(die,
"No location information for object '%s'; possibly stack allocated"
% name)
continue
loc = die.attributes["DW_AT_location"]
if loc.form != "DW_FORM_exprloc" and \
loc.form != "DW_FORM_block1":
debug_die(die, "kernel object '%s' unexpected location format" %
name)
continue
opcode = loc.value[0]
if opcode != DW_OP_addr:
# Check if frame pointer offset DW_OP_fbreg
if opcode == DW_OP_fbreg:
debug_die(die, "kernel object '%s' found on stack" % name)
else:
debug_die(die,
"kernel object '%s' unexpected exprloc opcode %s" %
(name, hex(opcode)))
continue
addr = (loc.value[1] | (loc.value[2] << 8) |
(loc.value[3] << 16) | (loc.value[4] << 24))
if addr == 0:
# Never linked; gc-sections deleted it
continue
type_obj = type_env[type_offset]
objs = type_obj.get_kobjects(addr)
all_objs.update(objs)
debug("symbol '%s' at %s contains %d object(s)"
% (name, hex(addr), len(objs)))
# Step 4: objs is a dictionary mapping variable memory addresses to
# their associated type objects. Now that we have seen all variables
# and can properly look up API structs, convert this into a dictionary
# mapping variables to the C enumeration of what kernel object type it
# is.
ret = {}
for addr, ko in all_objs.items():
# API structs don't get into the gperf table
if ko.type_obj.api:
continue
_, user_ram_allowed = kobjects[ko.type_obj.name]
if not user_ram_allowed and app_smem_start <= addr < app_smem_end:
debug_die(die, "object '%s' found in invalid location %s"
% (name, hex(addr)))
continue
if ko.type_obj.name != "device":
# Not a device struct so we immediately know its type
ko.type_name = kobject_to_enum(ko.type_obj.name)
ret[addr] = ko
continue
# Device struct. Need to get the address of its API struct,
# if it has one.
apiaddr = device_get_api_addr(elf, addr)
if apiaddr not in all_objs:
if apiaddr == 0:
debug("device instance at 0x%x has no associated subsystem"
% addr)
else:
debug("device instance at 0x%x has unknown API 0x%x"
% (addr, apiaddr))
# API struct does not correspond to a known subsystem, skip it
continue
apiobj = all_objs[apiaddr]
ko.type_name = subsystem_to_enum(apiobj.type_obj.name)
ret[addr] = ko
debug("found %d kernel object instances total" % len(ret))
# 1. Before python 3.7 dict order is not guaranteed. With Python
# 3.5 it doesn't seem random with *integer* keys but can't
# rely on that.
# 2. OrderedDict means _insertion_ order, so not enough because
# built from other (random!) dicts: need to _sort_ first.
# 3. Sorting memory address looks good.
return OrderedDict(sorted(ret.items()))
def get_symbols(elf):
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()}
raise LookupError("Could not find symbol table")
# -- GPERF generation logic
header = """%compare-lengths
%define lookup-function-name z_object_lookup
%language=ANSI-C
%global-table
%struct-type
%{
#include <kernel.h>
#include <toolchain.h>
#include <syscall_handler.h>
#include <string.h>
%}
struct z_object;
"""
# Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not
# NULL-terminated.
footer = """%%
struct z_object *z_object_gperf_find(void *obj)
{
return z_object_lookup((const char *)obj, sizeof(void *));
}
void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
int i;
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
if (wordlist[i].name != NULL) {
func(&wordlist[i], context);
}
}
}
#ifndef CONFIG_DYNAMIC_OBJECTS
struct z_object *z_object_find(void *obj)
ALIAS_OF(z_object_gperf_find);
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
ALIAS_OF(z_object_gperf_wordlist_foreach);
#endif
"""
def write_gperf_table(fp, syms, objs, little_endian, static_begin, static_end):
fp.write(header)
if sys_mutex_counter != 0:
fp.write("static struct k_mutex kernel_mutexes[%d] = {\n"
% sys_mutex_counter)
for i in range(sys_mutex_counter):
fp.write("Z_MUTEX_INITIALIZER(kernel_mutexes[%d])" % i)
if i != sys_mutex_counter - 1:
fp.write(", ")
fp.write("};\n")
if futex_counter != 0:
fp.write("static struct z_futex_data futex_data[%d] = {\n"
% futex_counter)
for i in range(futex_counter):
fp.write("Z_FUTEX_DATA_INITIALIZER(futex_data[%d])" % i)
if i != futex_counter - 1:
fp.write(", ")
fp.write("};\n")
metadata_names = {
"K_OBJ_THREAD" : "thread_id",
"K_OBJ_SYS_MUTEX" : "mutex",
"K_OBJ_FUTEX" : "futex_data"
}
if "CONFIG_GEN_PRIV_STACKS" in syms:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_data"
if stack_counter != 0:
fp.write("static u8_t Z_GENERIC_SECTION(.priv_stacks.noinit) "
" __aligned(Z_PRIVILEGE_STACK_ALIGN)"
" priv_stacks[%d][CONFIG_PRIVILEGED_STACK_SIZE];\n"
% stack_counter)
fp.write("static struct z_stack_data stack_data[%d] = {\n"
% stack_counter)
counter = 0
for _, ko in objs.items():
if ko.type_name != "K_OBJ_THREAD_STACK_ELEMENT":
continue
# ko.data currently has the stack size. fetch the value to
# populate the appropriate entry in stack_data, and put
# a reference to the entry in stack_data into the data value
# instead
size = ko.data
ko.data = "&stack_data[%d]" % counter
fp.write("\t{ %d, (u8_t *)(&priv_stacks[%d]) }"
% (size, counter))
if counter != (stack_counter - 1):
fp.write(",")
fp.write("\n")
counter += 1
fp.write("};\n")
else:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_size"
fp.write("%%\n")
# Setup variables for mapping thread indexes
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]
thread_idx_map = {}
for i in range(0, thread_max_bytes):
thread_idx_map[i] = 0xFF
for obj_addr, ko in objs.items():
obj_type = ko.type_name
# pre-initialized objects fall within this memory range, they are
# either completely initialized at build time, or done automatically
# at boot during some PRE_KERNEL_* phase
initialized = static_begin <= obj_addr < static_end
is_driver = obj_type.startswith("K_OBJ_DRIVER_")
if "CONFIG_64BIT" in syms:
format_code = "Q"
else:
format_code = "I"
if little_endian:
endian = "<"
else:
endian = ">"
byte_str = struct.pack(endian + format_code, obj_addr)
fp.write("\"")
for byte in byte_str:
val = "\\x%02x" % byte
fp.write(val)
flags = "0"
if initialized:
flags += " | K_OBJ_FLAG_INITIALIZED"
if is_driver:
flags += " | K_OBJ_FLAG_DRIVER"
if ko.type_name in metadata_names:
tname = metadata_names[ko.type_name]
else:
tname = "unused"
fp.write("\", {}, %s, %s, { .%s = %s }\n" % (obj_type, flags,
tname, str(ko.data)))
if obj_type == "K_OBJ_THREAD":
idx = math.floor(ko.data / 8)
bit = ko.data % 8
thread_idx_map[idx] = thread_idx_map[idx] & ~(2**bit)
fp.write(footer)
# Generate the array of already mapped thread indexes
fp.write('\n')
fp.write('Z_GENERIC_SECTION(.kobject_data.data) ')
fp.write('u8_t _thread_idx_map[%d] = {' % (thread_max_bytes))
for i in range(0, thread_max_bytes):
fp.write(' 0x%x, ' % (thread_idx_map[i]))
fp.write('};\n')
driver_macro_tpl = """
#define Z_SYSCALL_DRIVER_%(driver_upper)s(ptr, op) Z_SYSCALL_DRIVER_GEN(ptr, op, %(driver_lower)s, %(driver_upper)s)
"""
def write_validation_output(fp):
fp.write("#ifndef DRIVER_VALIDATION_GEN_H\n")
fp.write("#define DRIVER_VALIDATION_GEN_H\n")
fp.write("""#define Z_SYSCALL_DRIVER_GEN(ptr, op, driver_lower_case, driver_upper_case) \\
(Z_SYSCALL_OBJ(ptr, K_OBJ_DRIVER_##driver_upper_case) || \\
Z_SYSCALL_DRIVER_OP(ptr, driver_lower_case##_driver_api, op))
""")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write(driver_macro_tpl % {
"driver_lower": subsystem.lower(),
"driver_upper": subsystem.upper(),
})
fp.write("#endif /* DRIVER_VALIDATION_GEN_H */\n")
def write_kobj_types_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write("%s,\n" % kobject_to_enum(kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "").upper()
fp.write("K_OBJ_DRIVER_%s,\n" % subsystem)
def write_kobj_otype_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = "%s"; break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write('case K_OBJ_DRIVER_%s: ret = "%s driver"; break;\n' % (
subsystem.upper(),
subsystem
))
def write_kobj_size_output(fp):
fp.write("/* Non device/stack objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
# device handled by default case. Stacks are not currently handled,
# if they eventually are it will be a special case.
if kobj in {"device", STACK_TYPE}:
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = sizeof(struct %s); break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
def parse_subsystems_list_file(path):
with open(path, "r") as fp:
subsys_list = json.load(fp)
subsystems.extend(subsys_list)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=False,
help="Input zephyr ELF binary")
parser.add_argument(
"-g", "--gperf-output", required=False,
help="Output list of kernel object addresses for gperf use")
parser.add_argument(
"-V", "--validation-output", required=False,
help="Output driver validation macros")
parser.add_argument(
"-K", "--kobj-types-output", required=False,
help="Output k_object enum constants")
parser.add_argument(
"-S", "--kobj-otype-output", required=False,
help="Output case statements for otype_to_str()")
parser.add_argument(
"-Z", "--kobj-size-output", required=False,
help="Output case statements for obj_size_get()")
parser.add_argument("-i", "--include-subsystem-list", required=False, action='append',
help='''Specifies a file with a JSON encoded list of subsystem names to append to
the driver subsystems list. Can be specified multiple times:
-i file1 -i file2 ...''')
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def main():
parse_args()
if args.include_subsystem_list is not None:
for list_file in args.include_subsystem_list:
parse_subsystems_list_file(list_file)
if args.gperf_output:
assert args.kernel, "--kernel ELF required for --gperf-output"
elf = ELFFile(open(args.kernel, "rb"))
syms = get_symbols(elf)
max_threads = syms["CONFIG_MAX_THREAD_BYTES"] * 8
objs = find_kobjects(elf, syms)
if not objs:
sys.stderr.write("WARNING: zero kobject found in %s\n"
% args.kernel)
if thread_counter > max_threads:
sys.exit("Too many thread objects ({})\n"
"Increase CONFIG_MAX_THREAD_BYTES to {}"
.format(thread_counter, -(-thread_counter // 8)))
with open(args.gperf_output, "w") as fp:
write_gperf_table(fp, syms, objs, elf.little_endian,
syms["_static_kernel_objects_begin"],
syms["_static_kernel_objects_end"])
if args.validation_output:
with open(args.validation_output, "w") as fp:
write_validation_output(fp)
if args.kobj_types_output:
with open(args.kobj_types_output, "w") as fp:
write_kobj_types_output(fp)
if args.kobj_otype_output:
with open(args.kobj_otype_output, "w") as fp:
write_kobj_otype_output(fp)
if args.kobj_size_output:
with open(args.kobj_size_output, "w") as fp:
write_kobj_size_output(fp)
if __name__ == "__main__":
main()
| 31.736634 | 116 | 0.613621 |
import sys
import argparse
import math
import os
import struct
import json
from distutils.version import LooseVersion
import elftools
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
if LooseVersion(elftools.__version__) < LooseVersion('0.24'):
sys.exit("pyelftools is out of date, need version 0.24 or later")
from collections import OrderedDict
# available in all configurations.
#
# - The second item is a boolean indicating whether it is permissible for
# the object to be located in user-accessible memory.
# Regular dictionaries are ordered only with Python 3.6 and
# above. Good summary and pointers to official documents at:
# https://stackoverflow.com/questions/39980323/are-dictionaries-ordered-in-python-3-6
kobjects = OrderedDict([
("k_mem_slab", (None, False)),
("k_msgq", (None, False)),
("k_mutex", (None, False)),
("k_pipe", (None, False)),
("k_queue", (None, False)),
("k_poll_signal", (None, False)),
("k_sem", (None, False)),
("k_stack", (None, False)),
("k_thread", (None, False)),
("k_timer", (None, False)),
("z_thread_stack_element", (None, False)),
("device", (None, False)),
("sys_mutex", (None, True)),
("k_futex", (None, True))
])
def kobject_to_enum(kobj):
if kobj.startswith("k_") or kobj.startswith("z_"):
name = kobj[2:]
else:
name = kobj
return "K_OBJ_%s" % name.upper()
subsystems = [
# Editing the list is deprecated, add the __subsystem sentinal to your driver
# api declaration instead. e.x.
#
# __subsystem struct my_driver_api {
# ....
#};
]
def subsystem_to_enum(subsys):
return "K_OBJ_DRIVER_" + subsys[:-11].upper()
# --- debug stuff ---
scr = os.path.basename(sys.argv[0])
def debug(text):
if not args.verbose:
return
sys.stdout.write(scr + ": " + text + "\n")
def error(text):
sys.exit("%s ERROR: %s" % (scr, text))
def debug_die(die, text):
if 'DW_AT_decl_file' not in die.attributes:
abs_orig_val = die.attributes["DW_AT_abstract_origin"].value
offset = abs_orig_val + die.cu.cu_offset
for var in variables:
if var.offset == offset:
die = var
break
lp_header = die.dwarfinfo.line_program_for_CU(die.cu).header
files = lp_header["file_entry"]
includes = lp_header["include_directory"]
fileinfo = files[die.attributes["DW_AT_decl_file"].value - 1]
filename = fileinfo.name.decode("utf-8")
filedir = includes[fileinfo.dir_index - 1].decode("utf-8")
path = os.path.join(filedir, filename)
lineno = die.attributes["DW_AT_decl_line"].value
debug(str(die))
debug("File '%s', line %d:" % (path, lineno))
debug(" %s" % text)
# -- ELF processing
DW_OP_addr = 0x3
DW_OP_fbreg = 0x91
STACK_TYPE = "z_thread_stack_element"
thread_counter = 0
sys_mutex_counter = 0
futex_counter = 0
stack_counter = 0
# Global type environment. Populated by pass 1.
type_env = {}
extern_env = {}
variables = []
class KobjectInstance:
def __init__(self, type_obj, addr):
global thread_counter
global sys_mutex_counter
global futex_counter
global stack_counter
self.addr = addr
self.type_obj = type_obj
# Type name determined later since drivers needs to look at the
# API struct address
self.type_name = None
if self.type_obj.name == "k_thread":
# Assign an ID for this thread object, used to track its
# permissions to other kernel objects
self.data = thread_counter
thread_counter = thread_counter + 1
elif self.type_obj.name == "sys_mutex":
self.data = "&kernel_mutexes[%d]" % sys_mutex_counter
sys_mutex_counter += 1
elif self.type_obj.name == "k_futex":
self.data = "&futex_data[%d]" % futex_counter
futex_counter += 1
elif self.type_obj.name == STACK_TYPE:
stack_counter += 1
else:
self.data = 0
class KobjectType:
def __init__(self, offset, name, size, api=False):
self.name = name
self.size = size
self.offset = offset
self.api = api
def __repr__(self):
return "<kobject %s>" % self.name
@staticmethod
def has_kobject():
return True
def get_kobjects(self, addr):
return {addr: KobjectInstance(self, addr)}
class ArrayType:
def __init__(self, offset, elements, member_type):
self.elements = elements
self.member_type = member_type
self.offset = offset
def __repr__(self):
return "<array of %d>" % self.member_type
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
# Stacks are arrays of _k_stack_element_t but we want to treat
# the whole array as one kernel object (a thread stack)
# Data value gets set to size of entire region
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
# An array of stacks appears as a multi-dimensional array.
# The last size is the size of each stack. We need to track
# each stack within the array, not as one huge stack object.
*dimensions, stacksize = self.elements
num_members = 1
for e in dimensions:
num_members = num_members * e
ret = {}
for i in range(num_members):
a = addr + (i * stacksize)
o = mt.get_kobjects(a)
o[a].data = stacksize
ret.update(o)
return ret
objs = {}
# Multidimensional array flattened out
num_members = 1
for e in self.elements:
num_members = num_members * e
for i in range(num_members):
objs.update(mt.get_kobjects(addr + (i * mt.size)))
return objs
class AggregateTypeMember:
def __init__(self, offset, member_name, member_type, member_offset):
self.member_name = member_name
self.member_type = member_type
if isinstance(member_offset, list):
# DWARF v2, location encoded as set of operations
# only "DW_OP_plus_uconst" with ULEB128 argument supported
if member_offset[0] == 0x23:
self.member_offset = member_offset[1] & 0x7f
for i in range(1, len(member_offset)-1):
if member_offset[i] & 0x80:
self.member_offset += (
member_offset[i+1] & 0x7f) << i*7
else:
raise Exception("not yet supported location operation (%s:%d:%d)" %
(self.member_name, self.member_type, member_offset[0]))
else:
self.member_offset = member_offset
def __repr__(self):
return "<member %s, type %d, offset %d>" % (
self.member_name, self.member_type, self.member_offset)
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
return mt.get_kobjects(addr + self.member_offset)
class ConstType:
def __init__(self, child_type):
self.child_type = child_type
def __repr__(self):
return "<const %d>" % self.child_type
def has_kobject(self):
if self.child_type not in type_env:
return False
return type_env[self.child_type].has_kobject()
def get_kobjects(self, addr):
return type_env[self.child_type].get_kobjects(addr)
class AggregateType:
def __init__(self, offset, name, size):
self.name = name
self.size = size
self.offset = offset
self.members = []
def add_member(self, member):
self.members.append(member)
def __repr__(self):
return "<struct %s, with %s>" % (self.name, self.members)
def has_kobject(self):
result = False
bad_members = []
for member in self.members:
if member.has_kobject():
result = True
else:
bad_members.append(member)
# Don't need to consider this again, just remove it
for bad_member in bad_members:
self.members.remove(bad_member)
return result
def get_kobjects(self, addr):
objs = {}
for member in self.members:
objs.update(member.get_kobjects(addr))
return objs
def die_get_spec(die):
if 'DW_AT_specification' not in die.attributes:
return None
spec_val = die.attributes["DW_AT_specification"].value
offset = spec_val + die.cu.cu_offset
return extern_env.get(offset)
def die_get_name(die):
if 'DW_AT_name' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_name"].value.decode("utf-8")
def die_get_type_offset(die):
if 'DW_AT_type' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_type"].value + die.cu.cu_offset
def die_get_byte_size(die):
if 'DW_AT_byte_size' not in die.attributes:
return 0
return die.attributes["DW_AT_byte_size"].value
def analyze_die_struct(die):
name = die_get_name(die) or "<anon>"
offset = die.offset
size = die_get_byte_size(die)
if not size:
return
if name in kobjects:
type_env[offset] = KobjectType(offset, name, size)
elif name in subsystems:
type_env[offset] = KobjectType(offset, name, size, api=True)
else:
at = AggregateType(offset, name, size)
type_env[offset] = at
for child in die.iter_children():
if child.tag != "DW_TAG_member":
continue
data_member_location = child.attributes.get("DW_AT_data_member_location")
if not data_member_location:
continue
child_type = die_get_type_offset(child)
member_offset = data_member_location.value
cname = die_get_name(child) or "<anon>"
m = AggregateTypeMember(child.offset, cname, child_type,
member_offset)
at.add_member(m)
return
def analyze_die_const(die):
type_offset = die_get_type_offset(die)
if not type_offset:
return
type_env[die.offset] = ConstType(type_offset)
def analyze_die_array(die):
type_offset = die_get_type_offset(die)
elements = []
for child in die.iter_children():
if child.tag != "DW_TAG_subrange_type":
continue
if "DW_AT_upper_bound" not in child.attributes:
continue
ub = child.attributes["DW_AT_upper_bound"]
if not ub.form.startswith("DW_FORM_data"):
continue
elements.append(ub.value + 1)
if not elements:
if type_offset in type_env.keys():
mt = type_env[type_offset]
if mt.has_kobject():
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
elements.append(1)
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
else:
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
def analyze_typedef(die):
type_offset = die_get_type_offset(die)
if type_offset not in type_env.keys():
return
type_env[die.offset] = type_env[type_offset]
def unpack_pointer(elf, data, offset):
endian_code = "<" if elf.little_endian else ">"
if elf.elfclass == 32:
size_code = "I"
size = 4
else:
size_code = "Q"
size = 8
return struct.unpack(endian_code + size_code,
data[offset:offset + size])[0]
def addr_deref(elf, addr):
for section in elf.iter_sections():
start = section['sh_addr']
end = start + section['sh_size']
if start <= addr < end:
data = section.data()
offset = addr - start
return unpack_pointer(elf, data, offset)
return 0
def device_get_api_addr(elf, addr):
offset = 8 if elf.elfclass == 32 else 16
return addr_deref(elf, addr + offset)
def find_kobjects(elf, syms):
if not elf.has_dwarf_info():
sys.exit("ELF file has no DWARF information")
app_smem_start = syms["_app_smem_start"]
app_smem_end = syms["_app_smem_end"]
di = elf.get_dwarf_info()
for CU in di.iter_CUs():
for die in CU.iter_DIEs():
if die.tag == "DW_TAG_structure_type":
analyze_die_struct(die)
elif die.tag == "DW_TAG_const_type":
analyze_die_const(die)
elif die.tag == "DW_TAG_array_type":
analyze_die_array(die)
elif die.tag == "DW_TAG_typedef":
analyze_typedef(die)
elif die.tag == "DW_TAG_variable":
variables.append(die)
bad_offsets = []
for offset, type_object in type_env.items():
if not type_object.has_kobject():
bad_offsets.append(offset)
for offset in bad_offsets:
del type_env[offset]
all_objs = {}
for die in variables:
name = die_get_name(die)
if not name:
continue
if name.startswith("__init_sys_init"):
continue
type_offset = die_get_type_offset(die)
if type_offset not in type_env:
continue
if "DW_AT_declaration" in die.attributes:
extern_env[die.offset] = die
continue
if "DW_AT_location" not in die.attributes:
debug_die(die,
"No location information for object '%s'; possibly stack allocated"
% name)
continue
loc = die.attributes["DW_AT_location"]
if loc.form != "DW_FORM_exprloc" and \
loc.form != "DW_FORM_block1":
debug_die(die, "kernel object '%s' unexpected location format" %
name)
continue
opcode = loc.value[0]
if opcode != DW_OP_addr:
if opcode == DW_OP_fbreg:
debug_die(die, "kernel object '%s' found on stack" % name)
else:
debug_die(die,
"kernel object '%s' unexpected exprloc opcode %s" %
(name, hex(opcode)))
continue
addr = (loc.value[1] | (loc.value[2] << 8) |
(loc.value[3] << 16) | (loc.value[4] << 24))
if addr == 0:
continue
type_obj = type_env[type_offset]
objs = type_obj.get_kobjects(addr)
all_objs.update(objs)
debug("symbol '%s' at %s contains %d object(s)"
% (name, hex(addr), len(objs)))
ret = {}
for addr, ko in all_objs.items():
if ko.type_obj.api:
continue
_, user_ram_allowed = kobjects[ko.type_obj.name]
if not user_ram_allowed and app_smem_start <= addr < app_smem_end:
debug_die(die, "object '%s' found in invalid location %s"
% (name, hex(addr)))
continue
if ko.type_obj.name != "device":
# Not a device struct so we immediately know its type
ko.type_name = kobject_to_enum(ko.type_obj.name)
ret[addr] = ko
continue
# Device struct. Need to get the address of its API struct,
# if it has one.
apiaddr = device_get_api_addr(elf, addr)
if apiaddr not in all_objs:
if apiaddr == 0:
debug("device instance at 0x%x has no associated subsystem"
% addr)
else:
debug("device instance at 0x%x has unknown API 0x%x"
% (addr, apiaddr))
# API struct does not correspond to a known subsystem, skip it
continue
apiobj = all_objs[apiaddr]
ko.type_name = subsystem_to_enum(apiobj.type_obj.name)
ret[addr] = ko
debug("found %d kernel object instances total" % len(ret))
# 1. Before python 3.7 dict order is not guaranteed. With Python
# 3.5 it doesn't seem random with *integer* keys but can't
# rely on that.
# 2. OrderedDict means _insertion_ order, so not enough because
# built from other (random!) dicts: need to _sort_ first.
# 3. Sorting memory address looks good.
return OrderedDict(sorted(ret.items()))
def get_symbols(elf):
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()}
raise LookupError("Could not find symbol table")
# -- GPERF generation logic
header = """%compare-lengths
%define lookup-function-name z_object_lookup
%language=ANSI-C
%global-table
%struct-type
%{
#include <kernel.h>
#include <toolchain.h>
#include <syscall_handler.h>
#include <string.h>
%}
struct z_object;
"""
# Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not
# NULL-terminated.
footer = """%%
struct z_object *z_object_gperf_find(void *obj)
{
return z_object_lookup((const char *)obj, sizeof(void *));
}
void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
int i;
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
if (wordlist[i].name != NULL) {
func(&wordlist[i], context);
}
}
}
#ifndef CONFIG_DYNAMIC_OBJECTS
struct z_object *z_object_find(void *obj)
ALIAS_OF(z_object_gperf_find);
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
ALIAS_OF(z_object_gperf_wordlist_foreach);
#endif
"""
def write_gperf_table(fp, syms, objs, little_endian, static_begin, static_end):
fp.write(header)
if sys_mutex_counter != 0:
fp.write("static struct k_mutex kernel_mutexes[%d] = {\n"
% sys_mutex_counter)
for i in range(sys_mutex_counter):
fp.write("Z_MUTEX_INITIALIZER(kernel_mutexes[%d])" % i)
if i != sys_mutex_counter - 1:
fp.write(", ")
fp.write("};\n")
if futex_counter != 0:
fp.write("static struct z_futex_data futex_data[%d] = {\n"
% futex_counter)
for i in range(futex_counter):
fp.write("Z_FUTEX_DATA_INITIALIZER(futex_data[%d])" % i)
if i != futex_counter - 1:
fp.write(", ")
fp.write("};\n")
metadata_names = {
"K_OBJ_THREAD" : "thread_id",
"K_OBJ_SYS_MUTEX" : "mutex",
"K_OBJ_FUTEX" : "futex_data"
}
if "CONFIG_GEN_PRIV_STACKS" in syms:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_data"
if stack_counter != 0:
fp.write("static u8_t Z_GENERIC_SECTION(.priv_stacks.noinit) "
" __aligned(Z_PRIVILEGE_STACK_ALIGN)"
" priv_stacks[%d][CONFIG_PRIVILEGED_STACK_SIZE];\n"
% stack_counter)
fp.write("static struct z_stack_data stack_data[%d] = {\n"
% stack_counter)
counter = 0
for _, ko in objs.items():
if ko.type_name != "K_OBJ_THREAD_STACK_ELEMENT":
continue
# ko.data currently has the stack size. fetch the value to
# populate the appropriate entry in stack_data, and put
# a reference to the entry in stack_data into the data value
# instead
size = ko.data
ko.data = "&stack_data[%d]" % counter
fp.write("\t{ %d, (u8_t *)(&priv_stacks[%d]) }"
% (size, counter))
if counter != (stack_counter - 1):
fp.write(",")
fp.write("\n")
counter += 1
fp.write("};\n")
else:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_size"
fp.write("%%\n")
# Setup variables for mapping thread indexes
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]
thread_idx_map = {}
for i in range(0, thread_max_bytes):
thread_idx_map[i] = 0xFF
for obj_addr, ko in objs.items():
obj_type = ko.type_name
# pre-initialized objects fall within this memory range, they are
# either completely initialized at build time, or done automatically
# at boot during some PRE_KERNEL_* phase
initialized = static_begin <= obj_addr < static_end
is_driver = obj_type.startswith("K_OBJ_DRIVER_")
if "CONFIG_64BIT" in syms:
format_code = "Q"
else:
format_code = "I"
if little_endian:
endian = "<"
else:
endian = ">"
byte_str = struct.pack(endian + format_code, obj_addr)
fp.write("\"")
for byte in byte_str:
val = "\\x%02x" % byte
fp.write(val)
flags = "0"
if initialized:
flags += " | K_OBJ_FLAG_INITIALIZED"
if is_driver:
flags += " | K_OBJ_FLAG_DRIVER"
if ko.type_name in metadata_names:
tname = metadata_names[ko.type_name]
else:
tname = "unused"
fp.write("\", {}, %s, %s, { .%s = %s }\n" % (obj_type, flags,
tname, str(ko.data)))
if obj_type == "K_OBJ_THREAD":
idx = math.floor(ko.data / 8)
bit = ko.data % 8
thread_idx_map[idx] = thread_idx_map[idx] & ~(2**bit)
fp.write(footer)
# Generate the array of already mapped thread indexes
fp.write('\n')
fp.write('Z_GENERIC_SECTION(.kobject_data.data) ')
fp.write('u8_t _thread_idx_map[%d] = {' % (thread_max_bytes))
for i in range(0, thread_max_bytes):
fp.write(' 0x%x, ' % (thread_idx_map[i]))
fp.write('};\n')
driver_macro_tpl = """
#define Z_SYSCALL_DRIVER_%(driver_upper)s(ptr, op) Z_SYSCALL_DRIVER_GEN(ptr, op, %(driver_lower)s, %(driver_upper)s)
"""
def write_validation_output(fp):
fp.write("#ifndef DRIVER_VALIDATION_GEN_H\n")
fp.write("#define DRIVER_VALIDATION_GEN_H\n")
fp.write("""#define Z_SYSCALL_DRIVER_GEN(ptr, op, driver_lower_case, driver_upper_case) \\
(Z_SYSCALL_OBJ(ptr, K_OBJ_DRIVER_##driver_upper_case) || \\
Z_SYSCALL_DRIVER_OP(ptr, driver_lower_case##_driver_api, op))
""")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write(driver_macro_tpl % {
"driver_lower": subsystem.lower(),
"driver_upper": subsystem.upper(),
})
fp.write("#endif /* DRIVER_VALIDATION_GEN_H */\n")
def write_kobj_types_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write("%s,\n" % kobject_to_enum(kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "").upper()
fp.write("K_OBJ_DRIVER_%s,\n" % subsystem)
def write_kobj_otype_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = "%s"; break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write('case K_OBJ_DRIVER_%s: ret = "%s driver"; break;\n' % (
subsystem.upper(),
subsystem
))
def write_kobj_size_output(fp):
fp.write("/* Non device/stack objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
# device handled by default case. Stacks are not currently handled,
# if they eventually are it will be a special case.
if kobj in {"device", STACK_TYPE}:
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = sizeof(struct %s); break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
def parse_subsystems_list_file(path):
with open(path, "r") as fp:
subsys_list = json.load(fp)
subsystems.extend(subsys_list)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=False,
help="Input zephyr ELF binary")
parser.add_argument(
"-g", "--gperf-output", required=False,
help="Output list of kernel object addresses for gperf use")
parser.add_argument(
"-V", "--validation-output", required=False,
help="Output driver validation macros")
parser.add_argument(
"-K", "--kobj-types-output", required=False,
help="Output k_object enum constants")
parser.add_argument(
"-S", "--kobj-otype-output", required=False,
help="Output case statements for otype_to_str()")
parser.add_argument(
"-Z", "--kobj-size-output", required=False,
help="Output case statements for obj_size_get()")
parser.add_argument("-i", "--include-subsystem-list", required=False, action='append',
help='''Specifies a file with a JSON encoded list of subsystem names to append to
the driver subsystems list. Can be specified multiple times:
-i file1 -i file2 ...''')
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def main():
parse_args()
if args.include_subsystem_list is not None:
for list_file in args.include_subsystem_list:
parse_subsystems_list_file(list_file)
if args.gperf_output:
assert args.kernel, "--kernel ELF required for --gperf-output"
elf = ELFFile(open(args.kernel, "rb"))
syms = get_symbols(elf)
max_threads = syms["CONFIG_MAX_THREAD_BYTES"] * 8
objs = find_kobjects(elf, syms)
if not objs:
sys.stderr.write("WARNING: zero kobject found in %s\n"
% args.kernel)
if thread_counter > max_threads:
sys.exit("Too many thread objects ({})\n"
"Increase CONFIG_MAX_THREAD_BYTES to {}"
.format(thread_counter, -(-thread_counter // 8)))
with open(args.gperf_output, "w") as fp:
write_gperf_table(fp, syms, objs, elf.little_endian,
syms["_static_kernel_objects_begin"],
syms["_static_kernel_objects_end"])
if args.validation_output:
with open(args.validation_output, "w") as fp:
write_validation_output(fp)
if args.kobj_types_output:
with open(args.kobj_types_output, "w") as fp:
write_kobj_types_output(fp)
if args.kobj_otype_output:
with open(args.kobj_otype_output, "w") as fp:
write_kobj_otype_output(fp)
if args.kobj_size_output:
with open(args.kobj_size_output, "w") as fp:
write_kobj_size_output(fp)
if __name__ == "__main__":
main()
| true | true |
79005094b5d9f0d86599dc6eea29e4b5f8533ad4 | 7,204 | py | Python | bcpandas/utils.py | alon-r/bcpandas | 73ee5a2228024ec1894e8c87986360a7eea3cc14 | [
"MIT"
] | null | null | null | bcpandas/utils.py | alon-r/bcpandas | 73ee5a2228024ec1894e8c87986360a7eea3cc14 | [
"MIT"
] | null | null | null | bcpandas/utils.py | alon-r/bcpandas | 73ee5a2228024ec1894e8c87986360a7eea3cc14 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 23:07:15 2019
@author: ydima
"""
import logging
import os
from pathlib import Path
import random
import shlex
import string
from subprocess import PIPE, Popen
import tempfile
from typing import Dict, List, Optional, Union
import pandas as pd
from .constants import (
DIRECTIONS,
IN,
IS_WIN32,
NEWLINE,
OUT,
QUERY,
QUERYOUT,
SQLCHAR,
TABLE,
VIEW,
BCPandasException,
BCPandasValueError,
read_data_settings,
sql_collation,
)
logger = logging.getLogger(__name__)
def bcp(
sql_item: str,
direction: str,
flat_file: str,
creds,
sql_type: str = "table",
schema: str = "dbo",
format_file_path: str = None,
batch_size: int = None,
col_delimiter: str = None,
row_terminator: str = None,
bcp_path: Union[str, Path] = None,
error_file_path: str = None
):
"""
See https://docs.microsoft.com/en-us/sql/tools/bcp-utility
"""
combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]}
direc = direction.lower()
# validation
if direc not in DIRECTIONS:
raise BCPandasValueError(
f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}"
)
if direc not in combos[sql_type]:
raise BCPandasValueError(
f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ."
)
# auth
if creds.with_krb_auth:
auth = ["-T"]
else:
auth = ["-U", creds.username, "-P", creds.password]
# prepare SQL item string
if sql_type == QUERY:
# remove newlines for queries, otherwise messes up BCP
sql_item_string = quote_this("".join(sql_item.splitlines()))
else:
sql_item_string = f"{schema}.{sql_item}"
# construct BCP command
bcp_command = [
"bcp" if bcp_path is None else quote_this(str(bcp_path)),
sql_item_string,
direc,
flat_file,
"-S",
creds.server,
"-d",
creds.database,
"-q", # Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW
"-e",
error_file_path
] + auth
if batch_size:
bcp_command += ["-b", str(batch_size)]
# formats
if direc == IN:
bcp_command += ["-f", format_file_path]
elif direc in (OUT, QUERYOUT):
bcp_command += [
"-c", # marking as character data, not Unicode (maybe make as param?)
quote_this(
f"-t{read_data_settings['delimiter'] if col_delimiter is None else col_delimiter}"
),
quote_this(
f"-r{read_data_settings['newline'] if row_terminator is None else row_terminator}"
),
]
# execute
bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command]
logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}")
ret_code = run_cmd(bcp_command)
if ret_code:
raise BCPandasException(f"Bcp command failed with exit code {ret_code}")
def get_temp_file() -> str:
"""
Returns full path to a temporary file without creating it.
"""
tmp_dir = tempfile.gettempdir()
file_path = os.path.join(
tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21))
)
return file_path
def _escape(input_string: str) -> str:
"""
Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25
"""
return (
input_string.replace('"', '\\"')
.replace("'", "\\'")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
def build_format_file(
df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None
) -> str:
"""
Creates the non-xml SQL format file. Puts 4 spaces between each section.
See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server
for the specification of the file.
# TODO add params/options to control:
# - the char type (not just SQLCHAR),
Parameters
----------
df : pandas DataFrame
delimiter : a valid delimiter character
db_cols_order : dict, optional
Dict of {database column name -> ordinal position of the column}.
Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table.
1-indexed, so the first columns is 1, second is 2, etc.
Only needed if the order of the columns in the dataframe doesn't match the database.
Returns
-------
A string containing the format file
"""
_space = " " * 4
format_file_str = f"9.0\n{len(df.columns)}\n" # Version and Number of columns
for col_num, col_name in enumerate(df.columns, start=1):
# last col gets a newline sep
_delim = delimiter if col_num != len(df.columns) else NEWLINE
_line = _space.join(
[
str(col_num), # Host file field order
SQLCHAR, # Host file data type
str(0), # Prefix length
str(0), # Host file data length
f'"{_escape(_delim)}"', # Terminator (see note below)
str(
col_num if not db_cols_order else db_cols_order[str(col_name)]
), # Server column order
str(col_name), # Server column name, optional as long as not blank
sql_collation, # Column collation
"\n",
]
)
format_file_str += _line
# FYI very important to surround the Terminator with quotes, otherwise BCP fails with:
# "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug.
return format_file_str
def quote_this(this: str, skip: bool = False) -> str:
"""
OS-safe way to quote a string.
Returns the string with quotes around it.
On Windows ~~it's double quotes~~ we skip quoting,
on Linux it's single quotes.
"""
if isinstance(this, str):
if IS_WIN32:
return this # TODO maybe change?
else:
return shlex.quote(this)
else:
return this
def run_cmd(cmd: List[str]) -> int:
"""
Runs the given command.
Prints STDOUT in real time, prints STDERR when command is complete,
and logs both STDOUT and STDERR.
Paramters
---------
cmd : list of str
The command to run, to be submitted to `subprocess.Popen()`
Returns
-------
The exit code of the command
"""
if IS_WIN32:
with_shell = False
else:
with_shell = True
cmd = " ".join(cmd) # type: ignore
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,)
# live stream STDOUT
while True:
outs = proc.stdout.readline()
if outs:
print(outs, end="")
logger.info(outs)
if proc.poll() is not None and outs == "":
break
errs = proc.stderr.readlines()
if errs:
print(errs, end="")
logger.error(errs)
return proc.returncode
| 29.048387 | 119 | 0.600222 |
import logging
import os
from pathlib import Path
import random
import shlex
import string
from subprocess import PIPE, Popen
import tempfile
from typing import Dict, List, Optional, Union
import pandas as pd
from .constants import (
DIRECTIONS,
IN,
IS_WIN32,
NEWLINE,
OUT,
QUERY,
QUERYOUT,
SQLCHAR,
TABLE,
VIEW,
BCPandasException,
BCPandasValueError,
read_data_settings,
sql_collation,
)
logger = logging.getLogger(__name__)
def bcp(
sql_item: str,
direction: str,
flat_file: str,
creds,
sql_type: str = "table",
schema: str = "dbo",
format_file_path: str = None,
batch_size: int = None,
col_delimiter: str = None,
row_terminator: str = None,
bcp_path: Union[str, Path] = None,
error_file_path: str = None
):
combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]}
direc = direction.lower()
if direc not in DIRECTIONS:
raise BCPandasValueError(
f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}"
)
if direc not in combos[sql_type]:
raise BCPandasValueError(
f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ."
)
if creds.with_krb_auth:
auth = ["-T"]
else:
auth = ["-U", creds.username, "-P", creds.password]
if sql_type == QUERY:
sql_item_string = quote_this("".join(sql_item.splitlines()))
else:
sql_item_string = f"{schema}.{sql_item}"
bcp_command = [
"bcp" if bcp_path is None else quote_this(str(bcp_path)),
sql_item_string,
direc,
flat_file,
"-S",
creds.server,
"-d",
creds.database,
"-q",
"-e",
error_file_path
] + auth
if batch_size:
bcp_command += ["-b", str(batch_size)]
if direc == IN:
bcp_command += ["-f", format_file_path]
elif direc in (OUT, QUERYOUT):
bcp_command += [
"-c",
quote_this(
f"-t{read_data_settings['delimiter'] if col_delimiter is None else col_delimiter}"
),
quote_this(
f"-r{read_data_settings['newline'] if row_terminator is None else row_terminator}"
),
]
bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command]
logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}")
ret_code = run_cmd(bcp_command)
if ret_code:
raise BCPandasException(f"Bcp command failed with exit code {ret_code}")
def get_temp_file() -> str:
tmp_dir = tempfile.gettempdir()
file_path = os.path.join(
tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21))
)
return file_path
def _escape(input_string: str) -> str:
return (
input_string.replace('"', '\\"')
.replace("'", "\\'")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
def build_format_file(
df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None
) -> str:
_space = " " * 4
format_file_str = f"9.0\n{len(df.columns)}\n"
for col_num, col_name in enumerate(df.columns, start=1):
_delim = delimiter if col_num != len(df.columns) else NEWLINE
_line = _space.join(
[
str(col_num),
SQLCHAR,
str(0),
str(0),
f'"{_escape(_delim)}"',
str(
col_num if not db_cols_order else db_cols_order[str(col_name)]
),
str(col_name),
sql_collation,
"\n",
]
)
format_file_str += _line
return format_file_str
def quote_this(this: str, skip: bool = False) -> str:
if isinstance(this, str):
if IS_WIN32:
return this
else:
return shlex.quote(this)
else:
return this
def run_cmd(cmd: List[str]) -> int:
if IS_WIN32:
with_shell = False
else:
with_shell = True
cmd = " ".join(cmd)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,)
while True:
outs = proc.stdout.readline()
if outs:
print(outs, end="")
logger.info(outs)
if proc.poll() is not None and outs == "":
break
errs = proc.stderr.readlines()
if errs:
print(errs, end="")
logger.error(errs)
return proc.returncode
| true | true |
790050f22facf78354c0fc3e85f0e3ce9c8ea649 | 4,366 | py | Python | ansys_corba/omniORB/COS/CosObjectIdentity_idl.py | pyansys/ansys_corba | 91e4e66a48143c827f56cf1113145bb48d5f4d6a | [
"MIT"
] | 6 | 2021-04-26T09:25:48.000Z | 2022-03-26T05:09:38.000Z | ansys_corba/omniORB/COS/CosObjectIdentity_idl.py | pyansys/ansys_corba | 91e4e66a48143c827f56cf1113145bb48d5f4d6a | [
"MIT"
] | 3 | 2022-03-14T08:17:21.000Z | 2022-03-17T20:07:23.000Z | ansys_corba/omniORB/COS/CosObjectIdentity_idl.py | pyansys/pymapdl-corba | 91e4e66a48143c827f56cf1113145bb48d5f4d6a | [
"MIT"
] | 1 | 2020-11-11T11:10:19.000Z | 2020-11-11T11:10:19.000Z | # Python stubs generated by omniidl from /tmp/corba/omni/share/idl/omniORB/COS/CosObjectIdentity.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
#
# Start of module "CosObjectIdentity"
#
__name__ = "CosObjectIdentity"
_0_CosObjectIdentity = omniORB.openModule("CosObjectIdentity", r"/tmp/corba/omni/share/idl/omniORB/COS/CosObjectIdentity.idl")
_0_CosObjectIdentity__POA = omniORB.openModule("CosObjectIdentity__POA", r"/tmp/corba/omni/share/idl/omniORB/COS/CosObjectIdentity.idl")
# typedef ... ObjectIdentifier
class ObjectIdentifier:
_NP_RepositoryId = "IDL:omg.org/CosObjectIdentity/ObjectIdentifier:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosObjectIdentity.ObjectIdentifier = ObjectIdentifier
_0_CosObjectIdentity._d_ObjectIdentifier = omniORB.tcInternal.tv_ulong
_0_CosObjectIdentity._ad_ObjectIdentifier = (omniORB.tcInternal.tv_alias, ObjectIdentifier._NP_RepositoryId, "ObjectIdentifier", omniORB.tcInternal.tv_ulong)
_0_CosObjectIdentity._tc_ObjectIdentifier = omniORB.tcInternal.createTypeCode(_0_CosObjectIdentity._ad_ObjectIdentifier)
omniORB.registerType(ObjectIdentifier._NP_RepositoryId, _0_CosObjectIdentity._ad_ObjectIdentifier, _0_CosObjectIdentity._tc_ObjectIdentifier)
del ObjectIdentifier
# interface IdentifiableObject
_0_CosObjectIdentity._d_IdentifiableObject = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0", "IdentifiableObject")
omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0"] = _0_CosObjectIdentity._d_IdentifiableObject
_0_CosObjectIdentity.IdentifiableObject = omniORB.newEmptyClass()
class IdentifiableObject :
_NP_RepositoryId = _0_CosObjectIdentity._d_IdentifiableObject[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosObjectIdentity.IdentifiableObject = IdentifiableObject
_0_CosObjectIdentity._tc_IdentifiableObject = omniORB.tcInternal.createTypeCode(_0_CosObjectIdentity._d_IdentifiableObject)
omniORB.registerType(IdentifiableObject._NP_RepositoryId, _0_CosObjectIdentity._d_IdentifiableObject, _0_CosObjectIdentity._tc_IdentifiableObject)
# IdentifiableObject operations and attributes
IdentifiableObject._d__get_constant_random_id = ((),(omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/ObjectIdentifier:1.0"],),None)
IdentifiableObject._d_is_identical = ((omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0"], ), (omniORB.tcInternal.tv_boolean, ), None)
# IdentifiableObject object reference
class _objref_IdentifiableObject (CORBA.Object):
_NP_RepositoryId = IdentifiableObject._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def _get_constant_random_id(self, *args):
return self._obj.invoke("_get_constant_random_id", _0_CosObjectIdentity.IdentifiableObject._d__get_constant_random_id, args)
constant_random_id = property(_get_constant_random_id)
def is_identical(self, *args):
return self._obj.invoke("is_identical", _0_CosObjectIdentity.IdentifiableObject._d_is_identical, args)
omniORB.registerObjref(IdentifiableObject._NP_RepositoryId, _objref_IdentifiableObject)
_0_CosObjectIdentity._objref_IdentifiableObject = _objref_IdentifiableObject
del IdentifiableObject, _objref_IdentifiableObject
# IdentifiableObject skeleton
__name__ = "CosObjectIdentity__POA"
class IdentifiableObject (PortableServer.Servant):
_NP_RepositoryId = _0_CosObjectIdentity.IdentifiableObject._NP_RepositoryId
_omni_op_d = {"_get_constant_random_id": _0_CosObjectIdentity.IdentifiableObject._d__get_constant_random_id, "is_identical": _0_CosObjectIdentity.IdentifiableObject._d_is_identical}
IdentifiableObject._omni_skeleton = IdentifiableObject
_0_CosObjectIdentity__POA.IdentifiableObject = IdentifiableObject
omniORB.registerSkeleton(IdentifiableObject._NP_RepositoryId, IdentifiableObject)
del IdentifiableObject
__name__ = "CosObjectIdentity"
#
# End of module "CosObjectIdentity"
#
__name__ = "CosObjectIdentity_idl"
_exported_modules = ( "CosObjectIdentity", )
# The end.
| 43.227723 | 185 | 0.830508 |
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
__name__ = "CosObjectIdentity"
_0_CosObjectIdentity = omniORB.openModule("CosObjectIdentity", r"/tmp/corba/omni/share/idl/omniORB/COS/CosObjectIdentity.idl")
_0_CosObjectIdentity__POA = omniORB.openModule("CosObjectIdentity__POA", r"/tmp/corba/omni/share/idl/omniORB/COS/CosObjectIdentity.idl")
class ObjectIdentifier:
_NP_RepositoryId = "IDL:omg.org/CosObjectIdentity/ObjectIdentifier:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosObjectIdentity.ObjectIdentifier = ObjectIdentifier
_0_CosObjectIdentity._d_ObjectIdentifier = omniORB.tcInternal.tv_ulong
_0_CosObjectIdentity._ad_ObjectIdentifier = (omniORB.tcInternal.tv_alias, ObjectIdentifier._NP_RepositoryId, "ObjectIdentifier", omniORB.tcInternal.tv_ulong)
_0_CosObjectIdentity._tc_ObjectIdentifier = omniORB.tcInternal.createTypeCode(_0_CosObjectIdentity._ad_ObjectIdentifier)
omniORB.registerType(ObjectIdentifier._NP_RepositoryId, _0_CosObjectIdentity._ad_ObjectIdentifier, _0_CosObjectIdentity._tc_ObjectIdentifier)
del ObjectIdentifier
_0_CosObjectIdentity._d_IdentifiableObject = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0", "IdentifiableObject")
omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0"] = _0_CosObjectIdentity._d_IdentifiableObject
_0_CosObjectIdentity.IdentifiableObject = omniORB.newEmptyClass()
class IdentifiableObject :
_NP_RepositoryId = _0_CosObjectIdentity._d_IdentifiableObject[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosObjectIdentity.IdentifiableObject = IdentifiableObject
_0_CosObjectIdentity._tc_IdentifiableObject = omniORB.tcInternal.createTypeCode(_0_CosObjectIdentity._d_IdentifiableObject)
omniORB.registerType(IdentifiableObject._NP_RepositoryId, _0_CosObjectIdentity._d_IdentifiableObject, _0_CosObjectIdentity._tc_IdentifiableObject)
IdentifiableObject._d__get_constant_random_id = ((),(omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/ObjectIdentifier:1.0"],),None)
IdentifiableObject._d_is_identical = ((omniORB.typeMapping["IDL:omg.org/CosObjectIdentity/IdentifiableObject:1.0"], ), (omniORB.tcInternal.tv_boolean, ), None)
class _objref_IdentifiableObject (CORBA.Object):
_NP_RepositoryId = IdentifiableObject._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def _get_constant_random_id(self, *args):
return self._obj.invoke("_get_constant_random_id", _0_CosObjectIdentity.IdentifiableObject._d__get_constant_random_id, args)
constant_random_id = property(_get_constant_random_id)
def is_identical(self, *args):
return self._obj.invoke("is_identical", _0_CosObjectIdentity.IdentifiableObject._d_is_identical, args)
omniORB.registerObjref(IdentifiableObject._NP_RepositoryId, _objref_IdentifiableObject)
_0_CosObjectIdentity._objref_IdentifiableObject = _objref_IdentifiableObject
del IdentifiableObject, _objref_IdentifiableObject
__name__ = "CosObjectIdentity__POA"
class IdentifiableObject (PortableServer.Servant):
_NP_RepositoryId = _0_CosObjectIdentity.IdentifiableObject._NP_RepositoryId
_omni_op_d = {"_get_constant_random_id": _0_CosObjectIdentity.IdentifiableObject._d__get_constant_random_id, "is_identical": _0_CosObjectIdentity.IdentifiableObject._d_is_identical}
IdentifiableObject._omni_skeleton = IdentifiableObject
_0_CosObjectIdentity__POA.IdentifiableObject = IdentifiableObject
omniORB.registerSkeleton(IdentifiableObject._NP_RepositoryId, IdentifiableObject)
del IdentifiableObject
__name__ = "CosObjectIdentity"
__name__ = "CosObjectIdentity_idl"
_exported_modules = ( "CosObjectIdentity", )
| true | true |
7900515320c3b3319c03f61841dc3f24a082e7f3 | 12,476 | py | Python | src/lpb.py | RobbinBouwmeester/LIT | 0516a69fbf1b8e9976524e0c243f82de041df544 | [
"Apache-2.0"
] | null | null | null | src/lpb.py | RobbinBouwmeester/LIT | 0516a69fbf1b8e9976524e0c243f82de041df544 | [
"Apache-2.0"
] | null | null | null | src/lpb.py | RobbinBouwmeester/LIT | 0516a69fbf1b8e9976524e0c243f82de041df544 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2017 Robbin Bouwmeester
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE."""
__author__ = "Robbin Bouwmeester"
__copyright__ = "Copyright 2017"
__credits__ = ["Robbin Bouwmeester"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Robbin Bouwmeester"
__email__ = "Robbin.bouwmeester@ugent.be"
__status__ = "nightly funzies"
import pandas as pd
from itertools import groupby
import logging
class LipidBLAST_entry():
def __init__(self,
name="",
ion="",
mw=0.0,
chem_form="",
num_ms2_peaks=0,
f_acyl_lengths=[],
unsats=[],
ms2=[]):
self.name = name
self.ion = ion
self.mw = mw
self.chem_form = chem_form
self.num_ms2_peaks = num_ms2_peaks
self.ms2 = ms2
self.f_acyl_lengths = f_acyl_lengths
self.unsats = unsats
def __str__(self):
ret_string = []
ret_string.append("================")
ret_string.append("")
ret_string.append("Lipid: %s" % (self.name))
ret_string.append("MW: %s" % (self.mw))
ret_string.append("Formula: %s" % (self.chem_form))
ret_string.append ("")
for f in self.ms2:
ret_string.append("%s\t%s\t%s" % (f[0],f[1],f[2]))
ret_string.append("")
ret_string.append("================")
return("\n".join(ret_string))
class LipidBLAST():
def __init__(self,
f_names=["LipidBlast-pos.msp","LipidBlast-neg.msp"],
min_acyl_length=10,
exclude_lyso=False,
include_ions=["[M-H]-"], #,"[M+]","[M+H]+","[M+NH4]+","[M-H]-","[M-2H](2-)","[M-Ac-H]-","[M+Na2-H]+","[M+]","[M+NH4]+","[M+Na]+","[M-2H](2-)","[M-Ac-H]-" "[M+]","[M+H]+","[M+NH4]+","[M-H]-","[M-2H](2-)","[M-Ac-H]-","[M+Na2-H]+","[M+]","[M+NH4]+","[M+Na]+","[M-2H](2-)","[M-Ac-H]-"
include_class=["PE","GPSer","GPCho","PC","GPA","PE","GPIns","GPEtn","GPGro"], #,"SM","TG","CL", #,"SM","TG","CL","GPSer","GPCho","PC","GPA","PE","GPIns","GPEtn","GPGro
aggregate_acyls=False,
use_simplified_names=True,
dalt_diff_lookup_bin=1):
self.f_names = f_names
self.min_acyl_length = min_acyl_length
self.exclude_lyso = exclude_lyso
self.include_ions = include_ions
self.include_class = include_class
self.use_simplified_names = use_simplified_names
self.dalt_diff_lookup_bin = dalt_diff_lookup_bin
self.aggregate_acyls = aggregate_acyls
self.lpb_dict = {}
self.ms1_dict = {}
self.ms1_dict_lookup = {}
self.tot_entr_read = 0
if len(self.f_names) > 0:
for f_name in f_names:
self.read_lpb(f_name)
def __str__(self):
ret_string = []
ret_string.append("Filenames: %s" % (self.f_names))
ret_string.append("Min acyl length: %s" % (self.min_acyl_length))
ret_string.append("Exclude lyso: %s" % (self.exclude_lyso))
ret_string.append("Include ions: %s" % (self.include_ions))
ret_string.append("Include lipid classes: %s" % (self.include_class))
ret_string.append("Use simplified names: %s" % (self.use_simplified_names))
ret_string.append("Lookup diff: %s Da" % (self.dalt_diff_lookup_bin))
ret_string.append("Total entries read: %s" % (self.tot_entr_read))
return("\n".join(ret_string))
def read_lpb(self,f_name):
def _get_general_info(name):
# Currently limited to max 9 unsats
unsats = [n[0] for n in name.split(":")[1:]]
class_name = name.split("(")[0]
if "-" in class_name:
name_split = name.split("(")
name_split[0] = name.split("(")[0].replace("-","")
name = "(".join(name_split)
acyl_lengths = name.split(":")
acyl_lengths.pop()
f_acyl_lengths = []
for acl in acyl_lengths:
try:
if "/" in acl:
f_acyl_lengths.append(acl.split("/")[1].replace("d","").replace("methyl-",""))
elif "-" in acl:
f_acyl_lengths.append(acl.split("-")[1].replace("d","").replace("methyl-",""))
else:
f_acyl_lengths.append(acl.split("(")[1].replace("d","").replace("methyl-",""))
except:
logging.warning("Could not format to get acyl lengths: %s" % (name))
return([0],[0],"")
try:
f_acyl_lengths = list(map(int,f_acyl_lengths))
unsats = list(map(int,unsats))
except:
logging.warning("Could not format to get acyl lengths: %s" % (name))
return([0],[0],"")
return(f_acyl_lengths,unsats,class_name)
def _simplify_name(class_name,acyls,unsats):
simplified_name = ""
simplified_name += class_name
simplified_name += "("
if not self.aggregate_acyls:
for f,u in zip(f_acyl_lengths,unsats):
simplified_name += str(f)
simplified_name += ":"
simplified_name += str(u)
simplified_name += "/"
simplified_name = simplified_name[:-1]
else:
simplified_name += str(sum(f_acyl_lengths))
simplified_name += ":"
simplified_name += str(sum(unsats))
simplified_name += ")"
return(simplified_name)
def _get_chem_form(chem_form_native,ion):
chem_form_ion = ""
for i,c in enumerate(chem_form_native):
if i+1 >= len(chem_form_native):
if c.isdigit(): chem_form_ion += c
else:
chem_form_ion += c
chem_form_ion += "1"
elif c.isdigit(): chem_form_ion += c
elif c.isupper() and chem_form_native[i+1].isdigit(): chem_form_ion += c
elif c.isupper() and chem_form_native[i+1].isupper():
chem_form_ion += c
chem_form_ion += "1"
elif chem_form_native[i+1].isdigit(): chem_form_ion += c
list_chem= [''.join(g) for _, g in groupby(chem_form_ion, str.isalpha)]
chem_form_ion = dict(zip(list_chem[::2],map(int,list_chem[1::2])))
if "+" not in ion:
if "[M-H]-" in ion:
try: chem_form_ion["H"] -= 1
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
if "[M-2H](2-)" in ion:
try: chem_form_ion["H"] -= 2
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
if "[M-Ac-H]-" in ion:
try:
chem_form_ion["C"] += 2
chem_form_ion["H"] += 3
chem_form_ion["O"] += 2
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
else:
if "[M+H]+" in ion:
try: chem_form_ion["H"] += 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+NH4]+" in ion:
try:
if chem_form_ion.has_key("N"): chem_form_ion["N"] += 1
else: chem_form_ion["N"] = 1
chem_form_ion["H"] += 4
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+Na]+" in ion:
try:
if chem_form_ion.has_key("Na"): chem_form_ion["Na"] += 1
else: chem_form_ion["Na"] = 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+Na2-H]+" in ion:
try:
if chem_form_ion.has_key("Na"): chem_form_ion["Na"] += 2
else: chem_form_ion["Na"] = 2
chem_form_ion["H"] -= 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
return("".join([atom+str(num_atom) for atom,num_atom in sorted(chem_form_ion.items())]))
with open(f_name) as infile:
fragments = []
pre_c_mass = 0.0
name = ""
ion = ""
for line in infile:
line = line.strip()
#print(line)
if len(line) == 0:
f_acyl_lengths,unsats,class_name = _get_general_info(name)
f_acyl_lengths_error = [a for a in f_acyl_lengths if a < self.min_acyl_length and a != 0]
if (len(class_name) == 0) or \
(ion_type not in self.include_ions) or \
(len([c for c in self.include_class if c in name]) == 0) or \
(self.exclude_lyso and "/0:0" in name) or \
(len(f_acyl_lengths_error) > 0):
fragments = []
pre_c_mass = 0.0
name = ""
ion_type = ""
continue
simplified_name = _simplify_name(class_name,f_acyl_lengths,unsats)
new_entry = LipidBLAST_entry(name=name,
ion=ion_type,
mw=pre_c_mass,
chem_form=chem_form_ion,
num_ms2_peaks=num_peaks,
ms2=fragments,
f_acyl_lengths=f_acyl_lengths,
unsats=unsats)
self.lpb_dict["%s|%s" % (simplified_name,ion_type)] = new_entry
loc_dict = int(pre_c_mass) - int(pre_c_mass) % self.dalt_diff_lookup_bin
if loc_dict in self.ms1_dict_lookup.keys():
self.ms1_dict_lookup[loc_dict]["%s|%s" % (simplified_name,ion_type)] = new_entry
else:
self.ms1_dict_lookup[loc_dict] = {}
self.ms1_dict_lookup[loc_dict]["%s|%s" % (simplified_name,ion_type)] = new_entry
self.tot_entr_read += 1
fragments = []
pre_c_mass = 0.0
name = ""
ion_type = ""
elif ":" in line:
if line.startswith("PRECURSORMZ"):
pre_c_mass = float(line.split(": ")[1])
if line.startswith("Name: "):
name = line.split("; ")[-1]
ion_type = line.split("; ")[1]
if line.startswith("Comment: "):
# Some of the chemical formulas contain a ";" at the end; remove
chem_form_native = line.split("; ")[-1].replace(";","")
#print(chem_form_native)
chem_form_ion = _get_chem_form(chem_form_native,ion_type)
if line.startswith("Num Peaks:"):
num_peaks = int(line.split(": ")[-1])
else:
if line=="\x1a": #EOF
continue
fragments.append([float(line.split(" ")[0]),float(line.split(" ")[1]),line.split(" ")[2].replace("\"","")])
class PrecursorFilter():
def __init__(self,db,ppm=10):
self.db = db
self.ppm = ppm
def retrieve_entry_pre_c_mass(self,pre_c_mass):
mass_error_threshold = (pre_c_mass*self.ppm)/1000000
ret_entries = []
loc_dict = int(pre_c_mass) - int(pre_c_mass) % self.db.dalt_diff_lookup_bin
loc_dict_lower = (int(pre_c_mass-mass_error_threshold)) - (int(pre_c_mass-mass_error_threshold)) % self.db.dalt_diff_lookup_bin
loc_dict_upper = (int(pre_c_mass+mass_error_threshold)) - (int(pre_c_mass+mass_error_threshold)) % self.db.dalt_diff_lookup_bin
# TODO set does not have to be list
locs_to_search = list(set([loc_dict,loc_dict_lower,loc_dict_upper]))
for loc in locs_to_search:
try:
for name,entr in self.db.ms1_dict_lookup[loc].items():
mass_error = abs(entr.mw-pre_c_mass)
if mass_error < mass_error_threshold:
ret_entries.append([name,mass_error,entr])
except KeyError:
logging.warning("Could not find an entry in the database for prec mass: %s" % (pre_c_mass))
continue
return(ret_entries)
if __name__ == "__main__":
logging.basicConfig(filename="prec_filter.log",
level=logging.DEBUG,
filemode="w",
format="%(levelname)s:%(created)f:%(asctime)s:%(message)s")
logging.info("Reading the LPB database ...")
lpb = LipidBLAST()
logging.info("Done reading the LPB database ...")
logging.info(lpb)
step_three_df = pd.read_csv("stepone_new.csv")
precf = Precursor_filter(lpb)
prec_filt_result = []
for index,row in step_three_df.iterrows():
if (index % 10000==0):
logging.info("Analyzing row number and m/z: %s - %s" % (index,row["mz"]))
prec_hits = precf.retrieve_entry_pre_c_mass(row["mz"])
for hit in prec_hits:
prec_filt_result.append([row["mz"],hit[2].mw,hit[1],hit[0].split("|")[0],hit[2].chem_form,hit[0].split("|")[1]])
prec_filt_result = pd.DataFrame(prec_filt_result)
prec_filt_result.columns = ["Input Mass","Matched Mass","Delta","Abbreviation","Formula","Ion"]
prec_filt_result.to_excel("batch_results.xlsx",index=False)
| 36.162319 | 303 | 0.655579 |
__author__ = "Robbin Bouwmeester"
__copyright__ = "Copyright 2017"
__credits__ = ["Robbin Bouwmeester"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Robbin Bouwmeester"
__email__ = "Robbin.bouwmeester@ugent.be"
__status__ = "nightly funzies"
import pandas as pd
from itertools import groupby
import logging
class LipidBLAST_entry():
def __init__(self,
name="",
ion="",
mw=0.0,
chem_form="",
num_ms2_peaks=0,
f_acyl_lengths=[],
unsats=[],
ms2=[]):
self.name = name
self.ion = ion
self.mw = mw
self.chem_form = chem_form
self.num_ms2_peaks = num_ms2_peaks
self.ms2 = ms2
self.f_acyl_lengths = f_acyl_lengths
self.unsats = unsats
def __str__(self):
ret_string = []
ret_string.append("================")
ret_string.append("")
ret_string.append("Lipid: %s" % (self.name))
ret_string.append("MW: %s" % (self.mw))
ret_string.append("Formula: %s" % (self.chem_form))
ret_string.append ("")
for f in self.ms2:
ret_string.append("%s\t%s\t%s" % (f[0],f[1],f[2]))
ret_string.append("")
ret_string.append("================")
return("\n".join(ret_string))
class LipidBLAST():
def __init__(self,
f_names=["LipidBlast-pos.msp","LipidBlast-neg.msp"],
min_acyl_length=10,
exclude_lyso=False,
include_ions=["[M-H]-"],
include_class=["PE","GPSer","GPCho","PC","GPA","PE","GPIns","GPEtn","GPGro"], diff_lookup_bin=1):
self.f_names = f_names
self.min_acyl_length = min_acyl_length
self.exclude_lyso = exclude_lyso
self.include_ions = include_ions
self.include_class = include_class
self.use_simplified_names = use_simplified_names
self.dalt_diff_lookup_bin = dalt_diff_lookup_bin
self.aggregate_acyls = aggregate_acyls
self.lpb_dict = {}
self.ms1_dict = {}
self.ms1_dict_lookup = {}
self.tot_entr_read = 0
if len(self.f_names) > 0:
for f_name in f_names:
self.read_lpb(f_name)
def __str__(self):
ret_string = []
ret_string.append("Filenames: %s" % (self.f_names))
ret_string.append("Min acyl length: %s" % (self.min_acyl_length))
ret_string.append("Exclude lyso: %s" % (self.exclude_lyso))
ret_string.append("Include ions: %s" % (self.include_ions))
ret_string.append("Include lipid classes: %s" % (self.include_class))
ret_string.append("Use simplified names: %s" % (self.use_simplified_names))
ret_string.append("Lookup diff: %s Da" % (self.dalt_diff_lookup_bin))
ret_string.append("Total entries read: %s" % (self.tot_entr_read))
return("\n".join(ret_string))
def read_lpb(self,f_name):
def _get_general_info(name):
# Currently limited to max 9 unsats
unsats = [n[0] for n in name.split(":")[1:]]
class_name = name.split("(")[0]
if "-" in class_name:
name_split = name.split("(")
name_split[0] = name.split("(")[0].replace("-","")
name = "(".join(name_split)
acyl_lengths = name.split(":")
acyl_lengths.pop()
f_acyl_lengths = []
for acl in acyl_lengths:
try:
if "/" in acl:
f_acyl_lengths.append(acl.split("/")[1].replace("d","").replace("methyl-",""))
elif "-" in acl:
f_acyl_lengths.append(acl.split("-")[1].replace("d","").replace("methyl-",""))
else:
f_acyl_lengths.append(acl.split("(")[1].replace("d","").replace("methyl-",""))
except:
logging.warning("Could not format to get acyl lengths: %s" % (name))
return([0],[0],"")
try:
f_acyl_lengths = list(map(int,f_acyl_lengths))
unsats = list(map(int,unsats))
except:
logging.warning("Could not format to get acyl lengths: %s" % (name))
return([0],[0],"")
return(f_acyl_lengths,unsats,class_name)
def _simplify_name(class_name,acyls,unsats):
simplified_name = ""
simplified_name += class_name
simplified_name += "("
if not self.aggregate_acyls:
for f,u in zip(f_acyl_lengths,unsats):
simplified_name += str(f)
simplified_name += ":"
simplified_name += str(u)
simplified_name += "/"
simplified_name = simplified_name[:-1]
else:
simplified_name += str(sum(f_acyl_lengths))
simplified_name += ":"
simplified_name += str(sum(unsats))
simplified_name += ")"
return(simplified_name)
def _get_chem_form(chem_form_native,ion):
chem_form_ion = ""
for i,c in enumerate(chem_form_native):
if i+1 >= len(chem_form_native):
if c.isdigit(): chem_form_ion += c
else:
chem_form_ion += c
chem_form_ion += "1"
elif c.isdigit(): chem_form_ion += c
elif c.isupper() and chem_form_native[i+1].isdigit(): chem_form_ion += c
elif c.isupper() and chem_form_native[i+1].isupper():
chem_form_ion += c
chem_form_ion += "1"
elif chem_form_native[i+1].isdigit(): chem_form_ion += c
list_chem= [''.join(g) for _, g in groupby(chem_form_ion, str.isalpha)]
chem_form_ion = dict(zip(list_chem[::2],map(int,list_chem[1::2])))
if "+" not in ion:
if "[M-H]-" in ion:
try: chem_form_ion["H"] -= 1
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
if "[M-2H](2-)" in ion:
try: chem_form_ion["H"] -= 2
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
if "[M-Ac-H]-" in ion:
try:
chem_form_ion["C"] += 2
chem_form_ion["H"] += 3
chem_form_ion["O"] += 2
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
else:
if "[M+H]+" in ion:
try: chem_form_ion["H"] += 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+NH4]+" in ion:
try:
if chem_form_ion.has_key("N"): chem_form_ion["N"] += 1
else: chem_form_ion["N"] = 1
chem_form_ion["H"] += 4
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+Na]+" in ion:
try:
if chem_form_ion.has_key("Na"): chem_form_ion["Na"] += 1
else: chem_form_ion["Na"] = 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+Na2-H]+" in ion:
try:
if chem_form_ion.has_key("Na"): chem_form_ion["Na"] += 2
else: chem_form_ion["Na"] = 2
chem_form_ion["H"] -= 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
return("".join([atom+str(num_atom) for atom,num_atom in sorted(chem_form_ion.items())]))
with open(f_name) as infile:
fragments = []
pre_c_mass = 0.0
name = ""
ion = ""
for line in infile:
line = line.strip()
#print(line)
if len(line) == 0:
f_acyl_lengths,unsats,class_name = _get_general_info(name)
f_acyl_lengths_error = [a for a in f_acyl_lengths if a < self.min_acyl_length and a != 0]
if (len(class_name) == 0) or \
(ion_type not in self.include_ions) or \
(len([c for c in self.include_class if c in name]) == 0) or \
(self.exclude_lyso and "/0:0" in name) or \
(len(f_acyl_lengths_error) > 0):
fragments = []
pre_c_mass = 0.0
name = ""
ion_type = ""
continue
simplified_name = _simplify_name(class_name,f_acyl_lengths,unsats)
new_entry = LipidBLAST_entry(name=name,
ion=ion_type,
mw=pre_c_mass,
chem_form=chem_form_ion,
num_ms2_peaks=num_peaks,
ms2=fragments,
f_acyl_lengths=f_acyl_lengths,
unsats=unsats)
self.lpb_dict["%s|%s" % (simplified_name,ion_type)] = new_entry
loc_dict = int(pre_c_mass) - int(pre_c_mass) % self.dalt_diff_lookup_bin
if loc_dict in self.ms1_dict_lookup.keys():
self.ms1_dict_lookup[loc_dict]["%s|%s" % (simplified_name,ion_type)] = new_entry
else:
self.ms1_dict_lookup[loc_dict] = {}
self.ms1_dict_lookup[loc_dict]["%s|%s" % (simplified_name,ion_type)] = new_entry
self.tot_entr_read += 1
fragments = []
pre_c_mass = 0.0
name = ""
ion_type = ""
elif ":" in line:
if line.startswith("PRECURSORMZ"):
pre_c_mass = float(line.split(": ")[1])
if line.startswith("Name: "):
name = line.split("; ")[-1]
ion_type = line.split("; ")[1]
if line.startswith("Comment: "):
# Some of the chemical formulas contain a ";" at the end; remove
chem_form_native = line.split("; ")[-1].replace(";","")
#print(chem_form_native)
chem_form_ion = _get_chem_form(chem_form_native,ion_type)
if line.startswith("Num Peaks:"):
num_peaks = int(line.split(": ")[-1])
else:
if line=="\x1a": #EOF
continue
fragments.append([float(line.split(" ")[0]),float(line.split(" ")[1]),line.split(" ")[2].replace("\"","")])
class PrecursorFilter():
def __init__(self,db,ppm=10):
self.db = db
self.ppm = ppm
def retrieve_entry_pre_c_mass(self,pre_c_mass):
mass_error_threshold = (pre_c_mass*self.ppm)/1000000
ret_entries = []
loc_dict = int(pre_c_mass) - int(pre_c_mass) % self.db.dalt_diff_lookup_bin
loc_dict_lower = (int(pre_c_mass-mass_error_threshold)) - (int(pre_c_mass-mass_error_threshold)) % self.db.dalt_diff_lookup_bin
loc_dict_upper = (int(pre_c_mass+mass_error_threshold)) - (int(pre_c_mass+mass_error_threshold)) % self.db.dalt_diff_lookup_bin
locs_to_search = list(set([loc_dict,loc_dict_lower,loc_dict_upper]))
for loc in locs_to_search:
try:
for name,entr in self.db.ms1_dict_lookup[loc].items():
mass_error = abs(entr.mw-pre_c_mass)
if mass_error < mass_error_threshold:
ret_entries.append([name,mass_error,entr])
except KeyError:
logging.warning("Could not find an entry in the database for prec mass: %s" % (pre_c_mass))
continue
return(ret_entries)
if __name__ == "__main__":
logging.basicConfig(filename="prec_filter.log",
level=logging.DEBUG,
filemode="w",
format="%(levelname)s:%(created)f:%(asctime)s:%(message)s")
logging.info("Reading the LPB database ...")
lpb = LipidBLAST()
logging.info("Done reading the LPB database ...")
logging.info(lpb)
step_three_df = pd.read_csv("stepone_new.csv")
precf = Precursor_filter(lpb)
prec_filt_result = []
for index,row in step_three_df.iterrows():
if (index % 10000==0):
logging.info("Analyzing row number and m/z: %s - %s" % (index,row["mz"]))
prec_hits = precf.retrieve_entry_pre_c_mass(row["mz"])
for hit in prec_hits:
prec_filt_result.append([row["mz"],hit[2].mw,hit[1],hit[0].split("|")[0],hit[2].chem_form,hit[0].split("|")[1]])
prec_filt_result = pd.DataFrame(prec_filt_result)
prec_filt_result.columns = ["Input Mass","Matched Mass","Delta","Abbreviation","Formula","Ion"]
prec_filt_result.to_excel("batch_results.xlsx",index=False)
| true | true |
790051dad9636751beaebf2f7a3af72b9f9dd2cb | 2,275 | py | Python | homeassistant/components/blockchain/sensor.py | CantankerousBullMoose/core | 2178e27fb4c62271d4872e16838331defed82226 | [
"Apache-2.0"
] | 1 | 2021-03-12T20:46:40.000Z | 2021-03-12T20:46:40.000Z | homeassistant/components/blockchain/sensor.py | CantankerousBullMoose/core | 2178e27fb4c62271d4872e16838331defed82226 | [
"Apache-2.0"
] | 51 | 2020-08-03T07:30:44.000Z | 2022-03-22T06:02:42.000Z | homeassistant/components/blockchain/sensor.py | CantankerousBullMoose/core | 2178e27fb4c62271d4872e16838331defed82226 | [
"Apache-2.0"
] | 2 | 2021-03-22T21:42:48.000Z | 2021-04-12T12:26:39.000Z | """Support for Blockchain.com sensors."""
from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Blockchain.com sensors."""
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return False
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(Entity):
"""Representation of a Blockchain.com sensor."""
def __init__(self, name, addresses):
"""Initialize the sensor."""
self._name = name
self.addresses = addresses
self._state = None
self._unit_of_measurement = "BTC"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._state = get_balance(self.addresses)
| 26.453488 | 77 | 0.688791 | from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return False
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(Entity):
def __init__(self, name, addresses):
self._name = name
self.addresses = addresses
self._state = None
self._unit_of_measurement = "BTC"
@property
def name(self):
return self._name
@property
def state(self):
return self._state
@property
def unit_of_measurement(self):
return self._unit_of_measurement
@property
def icon(self):
return ICON
@property
def extra_state_attributes(self):
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
self._state = get_balance(self.addresses)
| true | true |
7900542b19f99c7d0c7def2a8f3eee27dffd51c7 | 2,047 | py | Python | python/tests/test_print.py | borglab/GTDynamics | ffdb0c5c3bc2d9b13555caee075b1b4304e1e3f1 | [
"BSD-2-Clause"
] | 6 | 2021-08-09T23:43:52.000Z | 2021-11-11T16:16:37.000Z | python/tests/test_print.py | borglab/GTDynamics | ffdb0c5c3bc2d9b13555caee075b1b4304e1e3f1 | [
"BSD-2-Clause"
] | 104 | 2021-08-03T14:15:28.000Z | 2022-03-26T08:18:09.000Z | python/tests/test_print.py | borglab/GTDynamics | ffdb0c5c3bc2d9b13555caee075b1b4304e1e3f1 | [
"BSD-2-Clause"
] | 4 | 2021-08-02T17:42:28.000Z | 2021-12-24T00:43:17.000Z | """
* GTDynamics Copyright 2021, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file test_print.py
* @brief Test printing with DynamicsSymbol.
* @author Gerry Chen
"""
import unittest
from io import StringIO
from unittest.mock import patch
import gtdynamics as gtd
import gtsam
class TestPrint(unittest.TestCase):
"""Test printing of keys."""
def test_values(self):
"""Checks that printing Values uses the GTDKeyFormatter instead of gtsam's default"""
v = gtd.Values()
gtd.InsertJointAngle(v, 0, 1, 2)
self.assertTrue('q(0)1' in v.__repr__())
def test_nonlinear_factor_graph(self):
"""Checks that printing NonlinearFactorGraph uses the GTDKeyFormatter"""
fg = gtd.NonlinearFactorGraph()
fg.push_back(
gtd.MinTorqueFactor(
gtd.TorqueKey(0, 0).key(),
gtsam.noiseModel.Unit.Create(1)))
self.assertTrue('T(0)0' in fg.__repr__())
def test_key_formatter(self):
"""Tests print method with various key formatters"""
torqueKey = gtd.TorqueKey(0, 0).key()
factor = gtd.MinTorqueFactor(torqueKey,
gtsam.noiseModel.Unit.Create(1))
with patch('sys.stdout', new=StringIO()) as fake_out:
factor.print('factor: ', gtd.GTDKeyFormatter)
self.assertTrue('factor: min torque factor' in fake_out.getvalue())
self.assertTrue('keys = { T(0)0 }' in fake_out.getvalue())
def myKeyFormatter(key):
return 'this is my key formatter {}'.format(key)
with patch('sys.stdout', new=StringIO()) as fake_out:
factor.print('factor: ', myKeyFormatter)
self.assertTrue('factor: min torque factor' in fake_out.getvalue())
self.assertTrue('keys = {{ this is my key formatter {} }}'.format(
torqueKey) in fake_out.getvalue())
if __name__ == "__main__":
unittest.main()
| 34.694915 | 93 | 0.633122 |
import unittest
from io import StringIO
from unittest.mock import patch
import gtdynamics as gtd
import gtsam
class TestPrint(unittest.TestCase):
def test_values(self):
v = gtd.Values()
gtd.InsertJointAngle(v, 0, 1, 2)
self.assertTrue('q(0)1' in v.__repr__())
def test_nonlinear_factor_graph(self):
fg = gtd.NonlinearFactorGraph()
fg.push_back(
gtd.MinTorqueFactor(
gtd.TorqueKey(0, 0).key(),
gtsam.noiseModel.Unit.Create(1)))
self.assertTrue('T(0)0' in fg.__repr__())
def test_key_formatter(self):
torqueKey = gtd.TorqueKey(0, 0).key()
factor = gtd.MinTorqueFactor(torqueKey,
gtsam.noiseModel.Unit.Create(1))
with patch('sys.stdout', new=StringIO()) as fake_out:
factor.print('factor: ', gtd.GTDKeyFormatter)
self.assertTrue('factor: min torque factor' in fake_out.getvalue())
self.assertTrue('keys = { T(0)0 }' in fake_out.getvalue())
def myKeyFormatter(key):
return 'this is my key formatter {}'.format(key)
with patch('sys.stdout', new=StringIO()) as fake_out:
factor.print('factor: ', myKeyFormatter)
self.assertTrue('factor: min torque factor' in fake_out.getvalue())
self.assertTrue('keys = {{ this is my key formatter {} }}'.format(
torqueKey) in fake_out.getvalue())
if __name__ == "__main__":
unittest.main()
| true | true |
79005453d0fe8a9fdd2a776edc602dc232c208ca | 3,791 | py | Python | drfs/filesystems/util.py | datarevenue-berlin/drfs | d44274b0ae6e1b802b7763b5088825a83cc12fa6 | [
"MIT"
] | 2 | 2021-07-29T10:38:30.000Z | 2021-09-08T11:48:39.000Z | drfs/filesystems/util.py | datarevenue-berlin/drfs | d44274b0ae6e1b802b7763b5088825a83cc12fa6 | [
"MIT"
] | 2 | 2020-10-07T07:47:31.000Z | 2021-11-15T17:52:33.000Z | drfs/filesystems/util.py | datarevenue-berlin/drfs | d44274b0ae6e1b802b7763b5088825a83cc12fa6 | [
"MIT"
] | null | null | null | import urllib.parse
from functools import partial, wraps
from pathlib import Path
from drfs import config
from drfs.util import prepend_scheme, remove_scheme
def get_fs(path, opts=None, rtype="instance"):
"""Helper to infer filesystem correctly.
Gets filesystem options from settings and updates them with given `opts`.
Parameters
----------
path: str
Path for which we want to infer filesystem.
opts: dict
Kwargs that will be passed to inferred filesystem instance.
rtype: str
Either 'instance' (default) or 'class'.
"""
from drfs.filesystems import FILESYSTEMS
try:
protocol = path.scheme
except AttributeError:
protocol = _get_protocol(path)
try:
cls = FILESYSTEMS[protocol]
if rtype == "class":
return cls
except KeyError:
raise KeyError(
f"No filesystem for protocol {protocol}. Try "
f"installing it. Available protocols are: "
f"{set(FILESYSTEMS.keys())}"
)
config_scheme_key = protocol if protocol else "file"
opts_ = config["fs_opts"][config_scheme_key].get(dict).copy() # type: dict
if opts is not None:
opts_.update(opts)
opts_ = _fix_opts_abfs(cls, path, opts_)
return cls(**opts_)
def _get_protocol(path):
if "://" in str(path):
protocol = urllib.parse.urlparse(str(path)).scheme
else:
# most likely a windows path, basically if in doubt assume local
protocol = ""
return protocol
def _fix_opts_abfs(cls, path, opts: dict):
try:
from drfs.filesystems.azure_blob import AzureBlobFileSystem, extract_abfs_parts
except ImportError:
AzureBlobFileSystem = extract_abfs_parts = None
if (
AzureBlobFileSystem is not None
and cls is AzureBlobFileSystem
and "account_name" not in opts
):
opts = opts.copy()
opts["account_name"] = extract_abfs_parts(path)[0]
return opts
def allow_pathlib(func):
"""Allow methods to receive pathlib.Path objects.
Parameters
----------
func: callable
function to decorate must have the following signature
self, path, *args, **kwargs
Returns
-------
wrapper: callable
"""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
# Can only be used if path is passed as first argument right
# after self
from drfs.path import asstr
p = asstr(path)
return func(self, p, *args, **kwargs)
return wrapper
def return_pathlib(func):
@wraps(func)
def wrapper(self, path, *args, **kwargs):
from drfs.path import aspath
res = func(self, path, *args, **kwargs)
as_path = aspath(res)
return as_path
return wrapper
def return_schemes(func):
"""Make sure method returns full path with scheme."""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
res = func(self, path, *args, **kwargs)
try:
res = list(map(partial(prepend_scheme, self.scheme), res))
except TypeError:
res = prepend_scheme(self.scheme, res)
return res
return wrapper
def maybe_remove_scheme(func):
"""Remove scheme from args and kwargs in case underlying fs does not support it."""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
if not self.supports_scheme:
path = remove_scheme(path, raise_=False)
args = [remove_scheme(a, raise_=False) for a in args]
kwargs = {
k: remove_scheme(v, raise_=False) if isinstance(v, (Path, str)) else v
for k, v in kwargs.items()
}
return func(self, path, *args, **kwargs)
return wrapper
| 26.697183 | 87 | 0.619889 | import urllib.parse
from functools import partial, wraps
from pathlib import Path
from drfs import config
from drfs.util import prepend_scheme, remove_scheme
def get_fs(path, opts=None, rtype="instance"):
from drfs.filesystems import FILESYSTEMS
try:
protocol = path.scheme
except AttributeError:
protocol = _get_protocol(path)
try:
cls = FILESYSTEMS[protocol]
if rtype == "class":
return cls
except KeyError:
raise KeyError(
f"No filesystem for protocol {protocol}. Try "
f"installing it. Available protocols are: "
f"{set(FILESYSTEMS.keys())}"
)
config_scheme_key = protocol if protocol else "file"
opts_ = config["fs_opts"][config_scheme_key].get(dict).copy()
if opts is not None:
opts_.update(opts)
opts_ = _fix_opts_abfs(cls, path, opts_)
return cls(**opts_)
def _get_protocol(path):
if "://" in str(path):
protocol = urllib.parse.urlparse(str(path)).scheme
else:
protocol = ""
return protocol
def _fix_opts_abfs(cls, path, opts: dict):
try:
from drfs.filesystems.azure_blob import AzureBlobFileSystem, extract_abfs_parts
except ImportError:
AzureBlobFileSystem = extract_abfs_parts = None
if (
AzureBlobFileSystem is not None
and cls is AzureBlobFileSystem
and "account_name" not in opts
):
opts = opts.copy()
opts["account_name"] = extract_abfs_parts(path)[0]
return opts
def allow_pathlib(func):
@wraps(func)
def wrapper(self, path, *args, **kwargs):
from drfs.path import asstr
p = asstr(path)
return func(self, p, *args, **kwargs)
return wrapper
def return_pathlib(func):
@wraps(func)
def wrapper(self, path, *args, **kwargs):
from drfs.path import aspath
res = func(self, path, *args, **kwargs)
as_path = aspath(res)
return as_path
return wrapper
def return_schemes(func):
@wraps(func)
def wrapper(self, path, *args, **kwargs):
res = func(self, path, *args, **kwargs)
try:
res = list(map(partial(prepend_scheme, self.scheme), res))
except TypeError:
res = prepend_scheme(self.scheme, res)
return res
return wrapper
def maybe_remove_scheme(func):
@wraps(func)
def wrapper(self, path, *args, **kwargs):
if not self.supports_scheme:
path = remove_scheme(path, raise_=False)
args = [remove_scheme(a, raise_=False) for a in args]
kwargs = {
k: remove_scheme(v, raise_=False) if isinstance(v, (Path, str)) else v
for k, v in kwargs.items()
}
return func(self, path, *args, **kwargs)
return wrapper
| true | true |
7900545c0d4817fb80a8a0b55d46ac0ebdf60db0 | 3,216 | py | Python | tools/analyze_model.py | gasvn/Res2Net-detectron2 | 3677895d5d23635b67837e64a79370b9ee117c27 | [
"Apache-2.0"
] | 29 | 2020-05-11T07:22:46.000Z | 2021-09-20T12:21:26.000Z | tools/analyze_model.py | gasvn/Res2Net-detectron2 | 3677895d5d23635b67837e64a79370b9ee117c27 | [
"Apache-2.0"
] | 4 | 2021-06-08T21:22:09.000Z | 2022-03-12T00:25:40.000Z | tools/analyze_model.py | gasvn/Res2Net-detectron2 | 3677895d5d23635b67837e64a79370b9ee117c27 | [
"Apache-2.0"
] | 10 | 2020-05-11T08:28:20.000Z | 2021-08-25T08:17:41.000Z | # -*- coding: utf-8 -*-
# noqa: B950
import logging
from collections import Counter
import tqdm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.utils.analysis import (
activation_count_operators,
flop_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
setup_logger()
return cfg
def do_flop(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
counts += flop_count_operators(model, data)
logger.info(
"(G)Flops for Each Type of Operators:\n" + str([(k, v / idx) for k, v in counts.items()])
)
def do_activation(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
counts += activation_count_operators(model, data)
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
def do_parameter(cfg):
model = build_model(cfg)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
def do_structure(cfg):
model = build_model(cfg)
logger.info("Model Structure:\n" + str(model))
if __name__ == "__main__":
parser = default_argument_parser(
epilog="""
Examples:
To show parameters of a model:
$ ./analyze_model.py --tasks parameter \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
Flops and activations are data-dependent, therefore inputs and model weights
are needed to count them:
$ ./analyze_model.py --num-inputs 100 --tasks flop \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
MODEL.WEIGHTS /path/to/model.pkl
"""
)
parser.add_argument(
"--tasks",
choices=["flop", "activation", "parameter", "structure"],
required=True,
nargs="+",
)
parser.add_argument(
"--num-inputs",
default=100,
type=int,
help="number of inputs used to compute statistics for flops/activations, "
"both are data dependent.",
)
args = parser.parse_args()
assert not args.eval_only
assert args.num_gpus == 1
cfg = setup(args)
for task in args.tasks:
{
"flop": do_flop,
"activation": do_activation,
"parameter": do_parameter,
"structure": do_structure,
}[task](cfg)
| 27.965217 | 97 | 0.679415 |
import logging
from collections import Counter
import tqdm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.utils.analysis import (
activation_count_operators,
flop_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
setup_logger()
return cfg
def do_flop(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader):
counts += flop_count_operators(model, data)
logger.info(
"(G)Flops for Each Type of Operators:\n" + str([(k, v / idx) for k, v in counts.items()])
)
def do_activation(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader):
counts += activation_count_operators(model, data)
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
def do_parameter(cfg):
model = build_model(cfg)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
def do_structure(cfg):
model = build_model(cfg)
logger.info("Model Structure:\n" + str(model))
if __name__ == "__main__":
parser = default_argument_parser(
epilog="""
Examples:
To show parameters of a model:
$ ./analyze_model.py --tasks parameter \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
Flops and activations are data-dependent, therefore inputs and model weights
are needed to count them:
$ ./analyze_model.py --num-inputs 100 --tasks flop \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
MODEL.WEIGHTS /path/to/model.pkl
"""
)
parser.add_argument(
"--tasks",
choices=["flop", "activation", "parameter", "structure"],
required=True,
nargs="+",
)
parser.add_argument(
"--num-inputs",
default=100,
type=int,
help="number of inputs used to compute statistics for flops/activations, "
"both are data dependent.",
)
args = parser.parse_args()
assert not args.eval_only
assert args.num_gpus == 1
cfg = setup(args)
for task in args.tasks:
{
"flop": do_flop,
"activation": do_activation,
"parameter": do_parameter,
"structure": do_structure,
}[task](cfg)
| true | true |
790054f824f61a92d991eef2aa187ebe7e531824 | 292 | py | Python | src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task6_w1.py | MingjunGeng/Code-Knowledge | 5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa | [
"MIT"
] | null | null | null | src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task6_w1.py | MingjunGeng/Code-Knowledge | 5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa | [
"MIT"
] | null | null | null | src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task6_w1.py | MingjunGeng/Code-Knowledge | 5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa | [
"MIT"
] | 1 | 2022-03-18T04:52:10.000Z | 2022-03-18T04:52:10.000Z | #!/usr/bin/python3
# --- 001 > U5W2P1_Task6_w1
def solution( n ):
if(n > 2 and n < 7 ):
return True;
else:
return False;
if __name__ == "__main__":
print('----------start------------')
n = 10
print(solution( n ))
print('------------end------------')
| 19.466667 | 40 | 0.445205 |
def solution( n ):
if(n > 2 and n < 7 ):
return True;
else:
return False;
if __name__ == "__main__":
print('----------start------------')
n = 10
print(solution( n ))
print('------------end------------')
| true | true |
7900558fda7459a70dea4a5e3d196f7c1eebd412 | 7,993 | py | Python | test/test_init.py | matthiasdiener/mirgecom | 4fb879023ec124047be9f3001485c69a8f4660c6 | [
"MIT"
] | null | null | null | test/test_init.py | matthiasdiener/mirgecom | 4fb879023ec124047be9f3001485c69a8f4660c6 | [
"MIT"
] | null | null | null | test/test_init.py | matthiasdiener/mirgecom | 4fb879023ec124047be9f3001485c69a8f4660c6 | [
"MIT"
] | null | null | null | __copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import numpy as np
import numpy.linalg as la # noqa
import pyopencl as cl
import pyopencl.clrandom
import pyopencl.clmath
import pytest
from meshmode.array_context import PyOpenCLArrayContext
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa
from mirgecom.initializers import Vortex2D
from mirgecom.initializers import Lump
from mirgecom.euler import split_conserved
from mirgecom.initializers import SodShock1D
from mirgecom.eos import IdealSingleGas
from grudge.eager import EagerDGDiscretization
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests,
)
logger = logging.getLogger(__name__)
def test_lump_init(ctx_factory):
"""
Simple test to check that Lump initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim
)
order = 3
logger.info(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# Init soln with Vortex
center = np.zeros(shape=(dim,))
velocity = np.zeros(shape=(dim,))
center[0] = 5
velocity[0] = 1
lump = Lump(center=center, velocity=velocity)
lump_soln = lump(0, nodes)
cv = split_conserved(dim, lump_soln)
p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)
exp_p = 1.0
errmax = discr.norm(p - exp_p, np.inf)
logger.info(f"lump_soln = {lump_soln}")
logger.info(f"pressure = {p}")
assert errmax < 1e-15
def test_vortex_init(ctx_factory):
"""
Simple test to check that Vortex2D initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim
)
order = 3
logger.info(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# Init soln with Vortex
vortex = Vortex2D()
vortex_soln = vortex(0, nodes)
gamma = 1.4
cv = split_conserved(dim, vortex_soln)
p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)
exp_p = cv.mass ** gamma
errmax = discr.norm(p - exp_p, np.inf)
logger.info(f"vortex_soln = {vortex_soln}")
logger.info(f"pressure = {p}")
assert errmax < 1e-15
def test_shock_init(ctx_factory):
"""
Simple test to check that Shock1D initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
dim = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (1.0,)], b=[(-0.5,), (0.5,)], n=(nel_1d,) * dim
)
order = 3
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
initr = SodShock1D()
initsoln = initr(t=0.0, x_vec=nodes)
print("Sod Soln:", initsoln)
xpl = 1.0
xpr = 0.1
tol = 1e-15
nodes_x = nodes[0]
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
assert discr.norm(actx.np.where(nodes_x < 0.5, p-xpl, p-xpr), np.inf) < tol
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_uniform(ctx_factory, dim):
"""
Simple test to check that Uniform initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
)
order = 1
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f"DIM = {dim}, {len(nodes)}")
print(f"Nodes={nodes}")
from mirgecom.initializers import Uniform
initr = Uniform(numdim=dim)
initsoln = initr(t=0.0, x_vec=nodes)
tol = 1e-15
ssoln = split_conserved(dim, initsoln)
assert discr.norm(ssoln.mass - 1.0, np.inf) < tol
assert discr.norm(ssoln.energy - 2.5, np.inf) < tol
print(f"Uniform Soln:{initsoln}")
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
print(f"Press:{p}")
assert discr.norm(p - 1.0, np.inf) < tol
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_pulse(ctx_factory, dim):
"""
Test of Gaussian pulse generator.
If it looks, walks, and quacks like a duck, then ...
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
)
order = 1
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f"DIM = {dim}, {len(nodes)}")
print(f"Nodes={nodes}")
tol = 1e-15
from mirgecom.initializers import _make_pulse
amp = 1.0
w = .1
rms2 = w * w
r0 = np.zeros(dim)
r2 = np.dot(nodes, nodes) / rms2
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
print(f"Pulse = {pulse}")
# does it return the expected exponential?
pulse_check = actx.np.exp(-.5 * r2)
print(f"exact: {pulse_check}")
pulse_resid = pulse - pulse_check
print(f"pulse residual: {pulse_resid}")
assert(discr.norm(pulse_resid, np.inf) < tol)
# proper scaling with amplitude?
amp = 2.0
pulse = 0
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
pulse_resid = pulse - (pulse_check + pulse_check)
assert(discr.norm(pulse_resid, np.inf) < tol)
# proper scaling with r?
amp = 1.0
rcheck = np.sqrt(2.0) * nodes
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=rcheck)
assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)
# proper scaling with w?
w = w / np.sqrt(2.0)
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)
| 29.278388 | 79 | 0.671337 | __copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import numpy as np
import numpy.linalg as la
import pyopencl as cl
import pyopencl.clrandom
import pyopencl.clmath
import pytest
from meshmode.array_context import PyOpenCLArrayContext
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL, BTAG_NONE
from mirgecom.initializers import Vortex2D
from mirgecom.initializers import Lump
from mirgecom.euler import split_conserved
from mirgecom.initializers import SodShock1D
from mirgecom.eos import IdealSingleGas
from grudge.eager import EagerDGDiscretization
from pyopencl.tools import (
pytest_generate_tests_for_pyopencl as pytest_generate_tests,
)
logger = logging.getLogger(__name__)
def test_lump_init(ctx_factory):
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim
)
order = 3
logger.info(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
center = np.zeros(shape=(dim,))
velocity = np.zeros(shape=(dim,))
center[0] = 5
velocity[0] = 1
lump = Lump(center=center, velocity=velocity)
lump_soln = lump(0, nodes)
cv = split_conserved(dim, lump_soln)
p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)
exp_p = 1.0
errmax = discr.norm(p - exp_p, np.inf)
logger.info(f"lump_soln = {lump_soln}")
logger.info(f"pressure = {p}")
assert errmax < 1e-15
def test_vortex_init(ctx_factory):
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim
)
order = 3
logger.info(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
vortex = Vortex2D()
vortex_soln = vortex(0, nodes)
gamma = 1.4
cv = split_conserved(dim, vortex_soln)
p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)
exp_p = cv.mass ** gamma
errmax = discr.norm(p - exp_p, np.inf)
logger.info(f"vortex_soln = {vortex_soln}")
logger.info(f"pressure = {p}")
assert errmax < 1e-15
def test_shock_init(ctx_factory):
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
dim = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (1.0,)], b=[(-0.5,), (0.5,)], n=(nel_1d,) * dim
)
order = 3
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
initr = SodShock1D()
initsoln = initr(t=0.0, x_vec=nodes)
print("Sod Soln:", initsoln)
xpl = 1.0
xpr = 0.1
tol = 1e-15
nodes_x = nodes[0]
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
assert discr.norm(actx.np.where(nodes_x < 0.5, p-xpl, p-xpr), np.inf) < tol
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_uniform(ctx_factory, dim):
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
)
order = 1
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f"DIM = {dim}, {len(nodes)}")
print(f"Nodes={nodes}")
from mirgecom.initializers import Uniform
initr = Uniform(numdim=dim)
initsoln = initr(t=0.0, x_vec=nodes)
tol = 1e-15
ssoln = split_conserved(dim, initsoln)
assert discr.norm(ssoln.mass - 1.0, np.inf) < tol
assert discr.norm(ssoln.energy - 2.5, np.inf) < tol
print(f"Uniform Soln:{initsoln}")
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
print(f"Press:{p}")
assert discr.norm(p - 1.0, np.inf) < tol
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_pulse(ctx_factory, dim):
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
)
order = 1
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f"DIM = {dim}, {len(nodes)}")
print(f"Nodes={nodes}")
tol = 1e-15
from mirgecom.initializers import _make_pulse
amp = 1.0
w = .1
rms2 = w * w
r0 = np.zeros(dim)
r2 = np.dot(nodes, nodes) / rms2
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
print(f"Pulse = {pulse}")
pulse_check = actx.np.exp(-.5 * r2)
print(f"exact: {pulse_check}")
pulse_resid = pulse - pulse_check
print(f"pulse residual: {pulse_resid}")
assert(discr.norm(pulse_resid, np.inf) < tol)
amp = 2.0
pulse = 0
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
pulse_resid = pulse - (pulse_check + pulse_check)
assert(discr.norm(pulse_resid, np.inf) < tol)
amp = 1.0
rcheck = np.sqrt(2.0) * nodes
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=rcheck)
assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)
w = w / np.sqrt(2.0)
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)
| true | true |
790056483285f244869c93d2abc1dc3c276c4eff | 816 | py | Python | emonitor/modules/locations/__init__.py | Durburz/eMonitor | 56f3b1fe39b9da3a12b49bdd60d0cfca51c23351 | [
"BSD-3-Clause"
] | 21 | 2015-03-04T11:36:47.000Z | 2021-04-20T07:51:53.000Z | emonitor/modules/locations/__init__.py | Durburz/eMonitor | 56f3b1fe39b9da3a12b49bdd60d0cfca51c23351 | [
"BSD-3-Clause"
] | 79 | 2015-01-04T21:35:49.000Z | 2020-03-05T07:22:10.000Z | emonitor/modules/locations/__init__.py | Durburz/eMonitor | 56f3b1fe39b9da3a12b49bdd60d0cfca51c23351 | [
"BSD-3-Clause"
] | 27 | 2015-03-04T11:36:48.000Z | 2021-09-20T08:15:17.000Z | from emonitor.utils import Module
from emonitor.extensions import babel
from .content_frontend import getFrontendContent, getFrontendData
class LocationsModule(Module):
info = dict(area=['frontend'], name='locations', path='locations', icon='fa-code-fork', version='0.1')
def __repr__(self):
return "locations"
def __init__(self, app):
Module.__init__(self, app)
# add template path
app.jinja_loader.searchpath.append("%s/emonitor/modules/locations/templates" % app.config.get('PROJECT_ROOT'))
# translations
babel.gettext(u'module.locations')
def frontendContent(self):
return 1
def getFrontendContent(self, **params):
return getFrontendContent(**params)
def getFrontendData(self):
return getFrontendData(self)
| 29.142857 | 118 | 0.693627 | from emonitor.utils import Module
from emonitor.extensions import babel
from .content_frontend import getFrontendContent, getFrontendData
class LocationsModule(Module):
info = dict(area=['frontend'], name='locations', path='locations', icon='fa-code-fork', version='0.1')
def __repr__(self):
return "locations"
def __init__(self, app):
Module.__init__(self, app)
app.jinja_loader.searchpath.append("%s/emonitor/modules/locations/templates" % app.config.get('PROJECT_ROOT'))
babel.gettext(u'module.locations')
def frontendContent(self):
return 1
def getFrontendContent(self, **params):
return getFrontendContent(**params)
def getFrontendData(self):
return getFrontendData(self)
| true | true |
790056b7d6b9304321397dda60c9623d08f6fd60 | 17,213 | py | Python | src/transformers/adas.py | MathieuTuli/transformers | da3db8ba7a18deed492808b0d6c5d29669241fa0 | [
"Apache-2.0"
] | null | null | null | src/transformers/adas.py | MathieuTuli/transformers | da3db8ba7a18deed492808b0d6c5d29669241fa0 | [
"Apache-2.0"
] | null | null | null | src/transformers/adas.py | MathieuTuli/transformers | da3db8ba7a18deed492808b0d6c5d29669241fa0 | [
"Apache-2.0"
] | null | null | null | """
"""
from __future__ import division
from torch.optim.optimizer import Optimizer, required
import numpy as np
import torch
from typing import NamedTuple, List
from dataclasses import dataclass
from enum import Enum
from typing import Union, Tuple
# from scipy.sparse.linalg import svds
from scipy.optimize import minimize_scalar
class LayerType(Enum):
CONV = 1
FC = 2
NON_CONV = 3
@dataclass
class LayerMetrics:
rank: float
KG: float
condition: float
@dataclass
class ConvLayerMetrics:
input_channel: LayerMetrics
output_channel: LayerMetrics
class LRMetrics(NamedTuple):
rank_velocity: List[float]
r_conv: List[float]
def EVBMF(Y, sigma2=None, H=None):
"""Implementation of the analytical solution to Empirical Variational
Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to
empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix
factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free
energy.
If H is unspecified, it is set to the smallest of the sides of the
input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of
fully-observed variational Bayesian matrix factorization." Journal of
Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by
variational Bayesian PCA." Advances in Neural Information Processing
Systems. 2012.
"""
L, M = Y.shape # has to be L<=M
if H is None:
H = L
alpha = L / M
tauubar = 2.5129 * np.sqrt(alpha)
# SVD of the input matrix, max rank of H
# U, s, V = np.linalg.svd(Y)
U, s, V = torch.svd(Y)
U = U[:, :H]
s = s[:H]
V = V[:H].T
# Calculate residual
residual = 0.
if H < L:
# residual = np.sum(np.sum(Y**2)-np.sum(s**2))
residual = torch.sum(np.sum(Y**2) - np.sum(s**2))
# Estimation of the variance when sigma2 is unspecified
if sigma2 is None:
xubar = (1 + tauubar) * (1 + alpha / tauubar)
eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1
# upper_bound = (np.sum(s**2)+residual)/(L*M)
# lower_bound = np.max(
# [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M])
upper_bound = (torch.sum(s**2) + residual) / (L * M)
lower_bound = torch.max(torch.stack(
[s[eH_ub + 1]**2 / (M * xubar), torch.mean(s[eH_ub + 1:]**2) / M], dim=0))
scale = 1. # /lower_bound
s = s * np.sqrt(scale)
residual = residual * scale
lower_bound = lower_bound * scale
upper_bound = upper_bound * scale
sigma2_opt = minimize_scalar(
EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar),
bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()],
method='Bounded')
sigma2 = sigma2_opt.x
# Threshold gamma term
threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar))
# pos = np.sum(s > threshold)
pos = torch.sum(s > threshold)
# Formula (15) from [2]
# d = torch.multiply(s[:pos]/2,
# 1-torch.divide(
# torch.tensor((L+M)*sigma2, device=s.device),
# s[:pos]**2) + torch.sqrt((1-torch.divide(
# torch.tensor(
# (L+M)*sigma2, device=s.device),
# s[:pos]**2))**2 -
# 4*L*M*sigma2**2/s[:pos]**4))
# d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt(
# (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4))
d = (s[:pos] / 2) * (1 - (L + M) * sigma2 / s[:pos]**2
+ torch.sqrt((1 -
(L + M) * sigma2 / s[:pos]**2)**2 - 4 * L * M * sigma2**2 / s[:pos]**4))
# Computation of the posterior
# post = {}
# post['ma'] = np.zeros(H)
# post['mb'] = np.zeros(H)
# post['sa2'] = np.zeros(H)
# post['sb2'] = np.zeros(H)
# post['cacb'] = np.zeros(H)
# tau = np.multiply(d, s[:pos])/(M*sigma2)
# delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau)
# post['ma'][:pos] = np.sqrt(np.multiply(d, delta))
# post['mb'][:pos] = np.sqrt(np.divide(d, delta))
# post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos])
# post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
# post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M))
# post['sigma2'] = sigma2
# post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) +
# (residual+np.sum(s**2))/sigma2 + np.sum(
# M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau))
return U[:, :pos], torch.diag(d), V[:, :pos] # , post
def EVBsigma2(sigma2, L, M, s, residual, xubar):
H = len(s)
alpha = L / M
x = s**2 / (M * sigma2)
z1 = x[x > xubar]
z2 = x[x <= xubar]
tau_z1 = tau(z1, alpha)
term1 = np.sum(z2 - np.log(z2))
term2 = np.sum(z1 - tau_z1)
term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1)))
term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1))
obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2)
return obj
def phi0(x):
return x - np.log(x)
def phi1(x, alpha):
return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1
) - tau(x, alpha)
def tau(x, alpha):
return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha))**2 - 4 * alpha))
class Metrics:
def __init__(self, params, linear: bool = False) -> None:
'''
parameters: list of torch.nn.Module.parameters()
'''
self.params = params
self.history = list()
mask = list()
for param_idx, param in enumerate(params):
param_shape = param.shape
if not linear:
if len(param_shape) != 4:
mask.append(param_idx)
else:
if len(param_shape) != 4 and len(param_shape) != 2:
mask.append(param_idx)
self.mask = set(mask)
def compute_low_rank(self,
tensor: torch.Tensor,
normalizer: float) -> torch.Tensor:
if tensor.requires_grad:
tensor = tensor.detach()
try:
tensor_size = tensor.shape
if tensor_size[0] > tensor_size[1]:
tensor = tensor.T
U_approx, S_approx, V_approx = EVBMF(tensor)
except RuntimeError:
return None, None, None
rank = S_approx.shape[0] / tensor_size[0] # normalizer
low_rank_eigen = torch.diag(S_approx).data.cpu().numpy()
if len(low_rank_eigen) != 0:
condition = low_rank_eigen[0] / low_rank_eigen[-1]
sum_low_rank_eigen = low_rank_eigen / \
max(low_rank_eigen)
sum_low_rank_eigen = np.sum(sum_low_rank_eigen)
else:
condition = 0
sum_low_rank_eigen = 0
KG = sum_low_rank_eigen / tensor_size[0] # normalizer
return rank, KG, condition
def KG(self, epoch: int) -> np.ndarray:
KG_list = list()
for i, (index, metric) in enumerate(self.history[epoch]):
if isinstance(metric, ConvLayerMetrics):
KG_list.append((metric.input_channel.KG
+ metric.output_channel.KG) / 2)
elif isinstance(metric, LayerMetrics):
KG_list.append(metric.KG)
return np.array(KG_list)
def __call__(self) -> List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]]:
'''
Computes the knowledge gain (S) and mapping condition (condition)
'''
metrics: List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]] = list()
for layer_index, layer in enumerate(self.params):
if layer_index in self.mask:
metrics.append((layer_index, None))
continue
# if np.less(np.prod(layer.shape), 10_000):
# metrics.append((layer_index, None))
if len(layer.shape) == 4:
layer_tensor = layer.data
tensor_size = layer_tensor.shape
mode_3_unfold = layer_tensor.permute(1, 0, 2, 3)
mode_3_unfold = torch.reshape(
mode_3_unfold, [tensor_size[1], tensor_size[0]
* tensor_size[2] * tensor_size[3]])
mode_4_unfold = layer_tensor
mode_4_unfold = torch.reshape(
mode_4_unfold, [tensor_size[0], tensor_size[1]
* tensor_size[2] * tensor_size[3]])
in_rank, in_KG, in_condition = self.compute_low_rank(
mode_3_unfold, tensor_size[1])
if in_rank is None and in_KG is None and in_condition is None:
if len(self.history) > 0:
in_rank = self.history[-1][
layer_index][1].input_channel.rank
in_KG = self.history[-1][
layer_index][1].input_channel.KG
in_condition = self.history[-1][
layer_index][1].input_channel.condition
else:
in_rank = in_KG = in_condition = 0.
out_rank, out_KG, out_condition = self.compute_low_rank(
mode_4_unfold, tensor_size[0])
if out_rank is None and out_KG is None and out_condition is None:
if len(self.history) > 0:
out_rank = self.history[-1][
layer_index][1].output_channel.rank
out_KG = self.history[-1][
layer_index][1].output_channel.KG
out_condition = self.history[-1][
layer_index][1].output_channel.condition
else:
out_rank = out_KG = out_condition = 0.
metrics.append((layer_index, ConvLayerMetrics(
input_channel=LayerMetrics(
rank=in_rank,
KG=in_KG,
condition=in_condition),
output_channel=LayerMetrics(
rank=out_rank,
KG=out_KG,
condition=out_condition))))
elif len(layer.shape) == 2:
rank, KG, condition = self.compute_low_rank(
layer, layer.shape[0])
if rank is None and KG is None and condition is None:
if len(self.history) > 0:
rank = self.history[-1][layer_index][1].rank
KG = self.history[-1][layer_index][1].KG
condition = self.history[-1][layer_index][1].condition
else:
rank = KG = condition = 0.
metrics.append((layer_index, LayerMetrics(
rank=rank,
KG=KG,
condition=condition)))
else:
metrics.append((layer_index, None))
self.history.append(metrics)
return metrics
class Adas(Optimizer):
"""
Vectorized SGD from torch.optim.SGD
"""
def __init__(self,
params,
lr: float = required,
beta: float = 0.8,
step_size: int = None,
linear: bool = True,
gamma: float = 1,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super(Adas, self).__init__(params[:2], defaults)
# Adas Specific stuff (not SGD)
if np.less(beta, 0) or np.greater_equal(beta, 1):
raise ValueError(f'Invalid beta: {beta}')
if np.less(gamma, 0):
raise ValueError(f'Invalid gamma: {gamma}')
if step_size is not None:
if np.less_equal(step_size, 0):
raise ValueError(f'Invalid step_size: {step_size}')
self.step_size = step_size
self.gamma = gamma
self.beta = beta
self.metrics = metrics = Metrics(params=params[2]["all_params"], linear=linear)
self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params))
self.velocity = np.zeros(
len(self.metrics.params) - len(self.metrics.mask))
self.not_ready = list(range(len(self.velocity)))
self.init_lr = lr
self.zeta = 1.
self.KG = 0.
def __setstate__(self, state):
super(Adas, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def epoch_step(self, epoch: int) -> None:
self.metrics()
if epoch == 0:
velocity = self.init_lr * np.ones(len(self.velocity))
self.KG = self.metrics.KG(epoch)
else:
KG = self.metrics.KG(epoch)
velocity = KG - self.KG
self.KG = KG
for idx in self.not_ready:
if np.isclose(KG[idx], 0.):
velocity[idx] = self.init_lr - \
self.beta * self.velocity[idx]
else:
self.not_ready.remove(idx)
if self.step_size is not None:
if epoch % self.step_size == 0 and epoch > 0:
self.lr_vector *= self.gamma
self.zeta *= self.gamma
self.velocity = np.maximum(
self.beta * self.velocity + self.zeta * velocity, 0.)
count = 0
for i in range(len(self.metrics.params)):
if i in self.metrics.mask:
self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)]
else:
self.lr_vector[i] = self.velocity[count]
count += 1
def step(self, closure: callable = None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
iteration_group = 0
for group in self.param_groups:
iteration_group += 1
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p_index, p in enumerate(group['params']):
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(p.data, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(
d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# p.data.add_(-group['lr'], d_p)
p.data.add_(d_p, alpha=-self.lr_vector[p_index])
return loss
| 36.314346 | 111 | 0.521815 | from __future__ import division
from torch.optim.optimizer import Optimizer, required
import numpy as np
import torch
from typing import NamedTuple, List
from dataclasses import dataclass
from enum import Enum
from typing import Union, Tuple
from scipy.optimize import minimize_scalar
class LayerType(Enum):
CONV = 1
FC = 2
NON_CONV = 3
@dataclass
class LayerMetrics:
rank: float
KG: float
condition: float
@dataclass
class ConvLayerMetrics:
input_channel: LayerMetrics
output_channel: LayerMetrics
class LRMetrics(NamedTuple):
rank_velocity: List[float]
r_conv: List[float]
def EVBMF(Y, sigma2=None, H=None):
L, M = Y.shape
if H is None:
H = L
alpha = L / M
tauubar = 2.5129 * np.sqrt(alpha)
U, s, V = torch.svd(Y)
U = U[:, :H]
s = s[:H]
V = V[:H].T
residual = 0.
if H < L:
residual = torch.sum(np.sum(Y**2) - np.sum(s**2))
if sigma2 is None:
xubar = (1 + tauubar) * (1 + alpha / tauubar)
eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1
upper_bound = (torch.sum(s**2) + residual) / (L * M)
lower_bound = torch.max(torch.stack(
[s[eH_ub + 1]**2 / (M * xubar), torch.mean(s[eH_ub + 1:]**2) / M], dim=0))
scale = 1.
s = s * np.sqrt(scale)
residual = residual * scale
lower_bound = lower_bound * scale
upper_bound = upper_bound * scale
sigma2_opt = minimize_scalar(
EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar),
bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()],
method='Bounded')
sigma2 = sigma2_opt.x
threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar))
pos = torch.sum(s > threshold)
d = (s[:pos] / 2) * (1 - (L + M) * sigma2 / s[:pos]**2
+ torch.sqrt((1 -
(L + M) * sigma2 / s[:pos]**2)**2 - 4 * L * M * sigma2**2 / s[:pos]**4))
return U[:, :pos], torch.diag(d), V[:, :pos]
def EVBsigma2(sigma2, L, M, s, residual, xubar):
H = len(s)
alpha = L / M
x = s**2 / (M * sigma2)
z1 = x[x > xubar]
z2 = x[x <= xubar]
tau_z1 = tau(z1, alpha)
term1 = np.sum(z2 - np.log(z2))
term2 = np.sum(z1 - tau_z1)
term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1)))
term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1))
obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2)
return obj
def phi0(x):
return x - np.log(x)
def phi1(x, alpha):
return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1
) - tau(x, alpha)
def tau(x, alpha):
return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha))**2 - 4 * alpha))
class Metrics:
def __init__(self, params, linear: bool = False) -> None:
self.params = params
self.history = list()
mask = list()
for param_idx, param in enumerate(params):
param_shape = param.shape
if not linear:
if len(param_shape) != 4:
mask.append(param_idx)
else:
if len(param_shape) != 4 and len(param_shape) != 2:
mask.append(param_idx)
self.mask = set(mask)
def compute_low_rank(self,
tensor: torch.Tensor,
normalizer: float) -> torch.Tensor:
if tensor.requires_grad:
tensor = tensor.detach()
try:
tensor_size = tensor.shape
if tensor_size[0] > tensor_size[1]:
tensor = tensor.T
U_approx, S_approx, V_approx = EVBMF(tensor)
except RuntimeError:
return None, None, None
rank = S_approx.shape[0] / tensor_size[0]
low_rank_eigen = torch.diag(S_approx).data.cpu().numpy()
if len(low_rank_eigen) != 0:
condition = low_rank_eigen[0] / low_rank_eigen[-1]
sum_low_rank_eigen = low_rank_eigen / \
max(low_rank_eigen)
sum_low_rank_eigen = np.sum(sum_low_rank_eigen)
else:
condition = 0
sum_low_rank_eigen = 0
KG = sum_low_rank_eigen / tensor_size[0]
return rank, KG, condition
def KG(self, epoch: int) -> np.ndarray:
KG_list = list()
for i, (index, metric) in enumerate(self.history[epoch]):
if isinstance(metric, ConvLayerMetrics):
KG_list.append((metric.input_channel.KG
+ metric.output_channel.KG) / 2)
elif isinstance(metric, LayerMetrics):
KG_list.append(metric.KG)
return np.array(KG_list)
def __call__(self) -> List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]]:
metrics: List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]] = list()
for layer_index, layer in enumerate(self.params):
if layer_index in self.mask:
metrics.append((layer_index, None))
continue
if len(layer.shape) == 4:
layer_tensor = layer.data
tensor_size = layer_tensor.shape
mode_3_unfold = layer_tensor.permute(1, 0, 2, 3)
mode_3_unfold = torch.reshape(
mode_3_unfold, [tensor_size[1], tensor_size[0]
* tensor_size[2] * tensor_size[3]])
mode_4_unfold = layer_tensor
mode_4_unfold = torch.reshape(
mode_4_unfold, [tensor_size[0], tensor_size[1]
* tensor_size[2] * tensor_size[3]])
in_rank, in_KG, in_condition = self.compute_low_rank(
mode_3_unfold, tensor_size[1])
if in_rank is None and in_KG is None and in_condition is None:
if len(self.history) > 0:
in_rank = self.history[-1][
layer_index][1].input_channel.rank
in_KG = self.history[-1][
layer_index][1].input_channel.KG
in_condition = self.history[-1][
layer_index][1].input_channel.condition
else:
in_rank = in_KG = in_condition = 0.
out_rank, out_KG, out_condition = self.compute_low_rank(
mode_4_unfold, tensor_size[0])
if out_rank is None and out_KG is None and out_condition is None:
if len(self.history) > 0:
out_rank = self.history[-1][
layer_index][1].output_channel.rank
out_KG = self.history[-1][
layer_index][1].output_channel.KG
out_condition = self.history[-1][
layer_index][1].output_channel.condition
else:
out_rank = out_KG = out_condition = 0.
metrics.append((layer_index, ConvLayerMetrics(
input_channel=LayerMetrics(
rank=in_rank,
KG=in_KG,
condition=in_condition),
output_channel=LayerMetrics(
rank=out_rank,
KG=out_KG,
condition=out_condition))))
elif len(layer.shape) == 2:
rank, KG, condition = self.compute_low_rank(
layer, layer.shape[0])
if rank is None and KG is None and condition is None:
if len(self.history) > 0:
rank = self.history[-1][layer_index][1].rank
KG = self.history[-1][layer_index][1].KG
condition = self.history[-1][layer_index][1].condition
else:
rank = KG = condition = 0.
metrics.append((layer_index, LayerMetrics(
rank=rank,
KG=KG,
condition=condition)))
else:
metrics.append((layer_index, None))
self.history.append(metrics)
return metrics
class Adas(Optimizer):
def __init__(self,
params,
lr: float = required,
beta: float = 0.8,
step_size: int = None,
linear: bool = True,
gamma: float = 1,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super(Adas, self).__init__(params[:2], defaults)
if np.less(beta, 0) or np.greater_equal(beta, 1):
raise ValueError(f'Invalid beta: {beta}')
if np.less(gamma, 0):
raise ValueError(f'Invalid gamma: {gamma}')
if step_size is not None:
if np.less_equal(step_size, 0):
raise ValueError(f'Invalid step_size: {step_size}')
self.step_size = step_size
self.gamma = gamma
self.beta = beta
self.metrics = metrics = Metrics(params=params[2]["all_params"], linear=linear)
self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params))
self.velocity = np.zeros(
len(self.metrics.params) - len(self.metrics.mask))
self.not_ready = list(range(len(self.velocity)))
self.init_lr = lr
self.zeta = 1.
self.KG = 0.
def __setstate__(self, state):
super(Adas, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def epoch_step(self, epoch: int) -> None:
self.metrics()
if epoch == 0:
velocity = self.init_lr * np.ones(len(self.velocity))
self.KG = self.metrics.KG(epoch)
else:
KG = self.metrics.KG(epoch)
velocity = KG - self.KG
self.KG = KG
for idx in self.not_ready:
if np.isclose(KG[idx], 0.):
velocity[idx] = self.init_lr - \
self.beta * self.velocity[idx]
else:
self.not_ready.remove(idx)
if self.step_size is not None:
if epoch % self.step_size == 0 and epoch > 0:
self.lr_vector *= self.gamma
self.zeta *= self.gamma
self.velocity = np.maximum(
self.beta * self.velocity + self.zeta * velocity, 0.)
count = 0
for i in range(len(self.metrics.params)):
if i in self.metrics.mask:
self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)]
else:
self.lr_vector[i] = self.velocity[count]
count += 1
def step(self, closure: callable = None):
loss = None
if closure is not None:
loss = closure()
iteration_group = 0
for group in self.param_groups:
iteration_group += 1
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p_index, p in enumerate(group['params']):
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(p.data, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(
d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(d_p, alpha=-self.lr_vector[p_index])
return loss
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.