text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
|
{
"content_hash": "631d499262db7e26a81104233b021f74",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 76,
"avg_line_length": 33.54,
"alnum_prop": 0.6243291592128801,
"repo_name": "angela278/UPDream",
"id": "9ed5a64c1d5bcc8aba69ba3da9e868385b66aa3c",
"size": "3354",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/urllib3/util/connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "316823"
},
{
"name": "HTML",
"bytes": "29817"
},
{
"name": "JavaScript",
"bytes": "185876"
},
{
"name": "Python",
"bytes": "3205458"
}
],
"symlink_target": ""
}
|
import pkg_resources
import re
import optparse
import os
import stat
import sys
import imp
from paste.script import command
from paste.script import create_distro
beginning_letter = re.compile(r"^[^a-z]*")
valid_only = re.compile(r"[^a-z0-9_]")
class MokshaQuickstartCommand(command.Command):
"""Create new Moksha components.
Create a new Moksha App with this command.
Example usage::
$ paster moksha yourproject
"""
version = pkg_resources.get_distribution('moksha').version
max_args = 3
min_args = 0
summary = __doc__.splitlines()[0]
usage = '\n' + __doc__
group_name = "Moksha"
name = None
package = None
svn_repository = None
templates = "moksha.master"
#dry_run = False
#no_input = False
livewidget = False
connector = False
consumer = False
stream = False
topic = 'moksha.test'
parser = command.Command.standard_parser(quiet=True)
parser = optparse.OptionParser(
usage="%prog moksha [options] [project name]",
version="%prog " + version)
parser.add_option("-l", "--livewidget",
help='Create an example Moksha LiveWidget',
action="store_true", dest="livewidget")
parser.add_option("-c", "--connector",
help='Create an example Moksha Connector',
action="store_true", dest="connector")
parser.add_option("-u", "--consumer",
help='Create an example Moksha Consumer',
action="store_true", dest="consumer")
parser.add_option("-C", "--controller",
help='Create an example Moksha Controller',
action="store_true", dest="controller")
parser.add_option("-P", "--producer",
help='Create an example Moksha Producer',
action="store_true", dest="stream")
parser.add_option("-p", "--package",
help="package name for the code",
dest="package")
parser.add_option("-t", "--topic",
help="The Moksha topic to utilize",
dest="topic")
#parser.add_option("--dry-run",
# help="dry run (don't actually do anything)",
# action="store_true", dest="dry_run")
#parser.add_option("--noinput",
# help="no input (don't ask any questions)",
# action="store_true", dest="no_input")
def command(self):
"""Quickstarts the new project."""
self.__dict__.update(self.options.__dict__)
if self.args:
self.name = self.args[0]
while not self.name:
self.name = raw_input("Enter project name: ")
package = self.name.lower()
package = beginning_letter.sub("", package)
package = valid_only.sub("", package)
if package:
self.package = package
else:
self.package = None
while not self.package:
self.package = raw_input(
"Enter package name [%s]: " % package).strip() or package
self.name = pkg_resources.safe_name(self.name).replace('-', '_')
self.rpm_name = self.package.replace('.', '-')
env = pkg_resources.Environment()
if self.name.lower() in env:
print 'The name "%s" is already in use by' % self.name,
for dist in env[self.name]:
print dist
return
try:
if imp.find_module(self.package):
print 'The package name "%s" is already in use' % self.package
return
except ImportError:
pass
if os.path.exists(self.name):
print 'A directory called "%s" already exists. Exiting.' % self.name
return
command = create_distro.CreateDistroCommand("create")
cmd_args = ['--template=moksha.master']
if self.livewidget:
cmd_args.append('--template=moksha.livewidget')
if self.stream:
cmd_args.append('--template=moksha.stream')
if self.consumer:
cmd_args.append('--template=moksha.consumer')
if self.connector:
cmd_args.append('--template=moksha.connector')
if self.controller:
cmd_args.append('--template=moksha.controller')
#if self.dry_run:
# cmd_args.append("--simulate")
# cmd_args.append("-q")
cmd_args.append(self.name)
cmd_args.append("livewidget=%s" % self.livewidget)
cmd_args.append("connector=%s" % self.connector)
cmd_args.append("consumer=%s" % self.consumer)
cmd_args.append("controller=%s" % self.controller)
cmd_args.append("stream=%s" % self.stream)
cmd_args.append("package=%s" % self.package)
cmd_args.append("widget_name=%s" % self.package.title() + 'Widget')
cmd_args.append("stream_name=%s" % self.package.title() + 'Stream')
cmd_args.append("consumer_name=%s" % self.package.title() + 'Consumer')
cmd_args.append("connector_name=%s" % self.package.title() + 'Connector')
cmd_args.append("controller_name=%s" % self.package.title() + 'Controller')
command.run(cmd_args)
#if not self.dry_run:
# os.chdir(self.name)
# startscript = "start-%s.py" % self.package
# if os.path.exists(startscript):
# oldmode = os.stat(startscript).st_mode
# os.chmod(startscript, oldmode | stat.S_IXUSR)
# os.system('paver egg_info')
# # dirty hack to allow "empty" dirs
# for base, path, files in os.walk("./"):
# for file in files:
# if file == "empty":
# os.remove(os.path.join(base, file))
|
{
"content_hash": "10e722287b8c29b6bde040462d0132e0",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 83,
"avg_line_length": 34.8719512195122,
"alnum_prop": 0.5679314565483476,
"repo_name": "ralphbean/moksha",
"id": "45afa902a85e9efc79557bc78ec4c094d0231e45",
"size": "5719",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "moksha/commands/quickstart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1249457"
},
{
"name": "Python",
"bytes": "731300"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
}
|
import hashlib
import hmac
import posixpath
import httplib2
from oslo_cache import core as cache_core
from oslo_config import cfg
from oslo_log import log as logging
import six
import six.moves.urllib.parse as urlparse
import webob
from ec2api import context as ec2_context
from ec2api import exception
from ec2api.i18n import _
from ec2api.metadata import api
from ec2api import utils
from ec2api import wsgi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_forwarded_for', 'ec2api.api.auth')
metadata_opts = [
cfg.StrOpt('nova_metadata_ip',
default='127.0.0.1',
help=_("IP address used by Nova metadata server.")),
cfg.IntOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('nova_metadata_protocol',
default='http',
choices=['http', 'https'],
help=_("Protocol to access nova metadata, http or https")),
cfg.BoolOpt('nova_metadata_insecure',
default=False,
help=_("Allow to perform insecure SSL (https) requests to "
"nova metadata")),
cfg.StrOpt('auth_ca_cert',
help=_("Certificate Authority public key (CA cert) "
"file for ssl")),
cfg.StrOpt('nova_client_cert',
default='',
help=_("Client certificate for nova metadata api server.")),
cfg.StrOpt('nova_client_priv_key',
default='',
help=_("Private key of client certificate.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('Shared secret to sign instance-id request'),
secret=True),
cfg.IntOpt("cache_expiration",
default=15,
min=0,
help=_('This option is the time (in seconds) to cache metadata. '
'Increasing this setting should improve response times of the '
'metadata API when under heavy load. Higher values may '
'increase memory usage, and result in longer times for host '
'metadata changes to take effect.'))
]
CONF.register_opts(metadata_opts, group='metadata')
cache_core.configure(CONF)
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
if not CONF.cache.enabled:
LOG.warning("Metadata doesn't use cache. "
"Configure cache options to use cache.")
self.cache_region = cache_core.create_region()
cache_core.configure_cache_region(CONF, self.cache_region)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
LOG.debug('Request: %s', req)
path = req.path_info
if path == '' or path[0] != '/':
path = '/' + path
path = posixpath.normpath(path)
path_tokens = path.split('/')[1:]
if path_tokens[0] == 'ec2':
path_tokens = path_tokens[1:]
if path_tokens == ['']:
resp = api.get_version_list()
return self._add_response_data(req.response, resp)
try:
requester = self._get_requester(req)
if path_tokens[0] == 'openstack':
return self._proxy_request(req, requester)
resp = self._get_metadata(path_tokens, requester)
return self._add_response_data(req.response, resp)
except exception.EC2MetadataNotFound:
return webob.exc.HTTPNotFound()
except Exception:
LOG.exception("Unexpected error.")
msg = _('An unknown error has occurred. '
'Please try your request again.')
return webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
def _proxy_request(self, req, requester):
headers = self._build_proxy_request_headers(requester)
nova_ip_port = '%s:%s' % (CONF.metadata.nova_metadata_ip,
CONF.metadata.nova_metadata_port)
url = urlparse.urlunsplit((
CONF.metadata.nova_metadata_protocol,
nova_ip_port,
req.path_info,
req.query_string,
''))
h = httplib2.Http(
ca_certs=CONF.metadata.auth_ca_cert,
disable_ssl_certificate_validation=(
CONF.metadata.nova_metadata_insecure)
)
if (CONF.metadata.nova_client_cert and
CONF.metadata.nova_client_priv_key):
h.add_certificate(CONF.metadata.nova_client_priv_key,
CONF.metadata.nova_client_cert,
nova_ip_port)
resp, content = h.request(url, method=req.method, headers=headers,
body=req.body)
if resp.status == 200:
LOG.debug(str(resp))
req.response.content_type = resp['content-type']
req.response.body = content
return req.response
elif resp.status == 403:
LOG.warning(
'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.'
)
return webob.exc.HTTPForbidden()
elif resp.status == 400:
return webob.exc.HTTPBadRequest()
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.warning(msg)
return webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
def _build_proxy_request_headers(self, requester):
signature = self._sign_instance_id(requester['os_instance_id'])
return {
'X-Forwarded-For': requester['private_ip'],
'X-Instance-ID': requester['os_instance_id'],
'X-Tenant-ID': requester['project_id'],
'X-Instance-ID-Signature': signature,
}
def _sign_instance_id(self, instance_id):
return hmac.new(
CONF.metadata.metadata_proxy_shared_secret.encode("utf-8"),
instance_id.encode(),
hashlib.sha256).hexdigest()
def _get_requester(self, req):
if req.headers.get('X-Metadata-Provider'):
provider_id, remote_ip = self._unpack_nsx_request(req)
context = ec2_context.get_os_admin_context()
os_instance_id, project_id = (
api.get_os_instance_and_project_id_by_provider_id(
context, provider_id, remote_ip))
else:
os_instance_id, project_id, remote_ip = (
self._unpack_neutron_request(req))
return {'os_instance_id': os_instance_id,
'project_id': project_id,
'private_ip': remote_ip}
def _unpack_neutron_request(self, req):
os_instance_id = req.headers.get('X-Instance-ID')
project_id = req.headers.get('X-Tenant-ID')
signature = req.headers.get('X-Instance-ID-Signature')
remote_ip = req.headers.get('X-Forwarded-For')
if not remote_ip:
raise exception.EC2MetadataInvalidAddress()
if os_instance_id is None:
msg = _('X-Instance-ID header is missing from request.')
elif project_id is None:
msg = _('X-Tenant-ID header is missing from request.')
elif not isinstance(os_instance_id, six.string_types):
msg = _('Multiple X-Instance-ID headers found within request.')
elif not isinstance(project_id, six.string_types):
msg = _('Multiple X-Tenant-ID headers found within request.')
else:
msg = None
if msg:
raise webob.exc.HTTPBadRequest(explanation=msg)
self._validate_signature(signature, os_instance_id, remote_ip)
return os_instance_id, project_id, remote_ip
def _unpack_nsx_request(self, req):
remote_address = req.headers.get('X-Forwarded-For')
if remote_address is None:
msg = _('X-Forwarded-For is missing from request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
provider_id = req.headers.get('X-Metadata-Provider')
if provider_id is None:
msg = _('X-Metadata-Provider is missing from request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
remote_ip = remote_address.split(',')[0]
if CONF.metadata.metadata_proxy_shared_secret:
signature = req.headers.get('X-Metadata-Provider-Signature')
self._validate_signature(signature, provider_id, remote_ip)
return provider_id, remote_ip
def _validate_signature(self, signature, requester_id, requester_ip):
expected_signature = hmac.new(
CONF.metadata.metadata_proxy_shared_secret.encode("utf-8"),
requester_id.encode(),
hashlib.sha256).hexdigest()
if not (signature and
utils.constant_time_compare(expected_signature, signature)):
LOG.warning('X-Instance-ID-Signature: %(signature)s does '
'not match the expected value: '
'%(expected_signature)s for id: '
'%(requester_id)s. Request From: '
'%(requester_ip)s',
{'signature': signature,
'expected_signature': expected_signature,
'requester_id': requester_id,
'requester_ip': requester_ip})
msg = _('Invalid proxy request signature.')
raise webob.exc.HTTPForbidden(explanation=msg)
def _get_metadata(self, path_tokens, requester):
context = ec2_context.get_os_admin_context()
# NOTE(ft): substitute project_id for context to instance's one.
# It's needed for correct describe and auto update DB operations.
# It doesn't affect operations via OpenStack's clients because
# these clients use auth_token field only
context.project_id = requester['project_id']
return api.get_metadata_item(context, path_tokens,
requester['os_instance_id'],
requester['private_ip'],
self.cache_region)
def _add_response_data(self, response, data):
if isinstance(data, six.text_type):
response.text = data
else:
response.body = data
response.content_type = 'text/plain'
return response
|
{
"content_hash": "67cacfe9570788697e7a82722abcd576",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 78,
"avg_line_length": 40.210332103321036,
"alnum_prop": 0.5747453427548866,
"repo_name": "stackforge/ec2-api",
"id": "aea8b9d8631469756be01ecb53faa3f1f3407d24",
"size": "11486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ec2api/metadata/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1702647"
},
{
"name": "Shell",
"bytes": "29508"
}
],
"symlink_target": ""
}
|
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "pysdn/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
{
"content_hash": "f50bf827ea2c6d603bda3fd073cc7a1c",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 79,
"avg_line_length": 33.94444444444444,
"alnum_prop": 0.5711292962356792,
"repo_name": "brocade/pysdn",
"id": "0e6cf35524f1aaa014e3d1b521d307936641058d",
"size": "15750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysdn/_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2452"
},
{
"name": "Python",
"bytes": "529708"
}
],
"symlink_target": ""
}
|
import urllib2
from bs4 import BeautifulSoup
def crawl(url):
soup = BeautifulSoup(urllib2.urlopen(url).read())
title = soup.title.text.split('|', 1)[0].strip()
episodes = []
resources = soup.select('dl.resource-list')
for resource in resources:
resource_items = resource.select('dd.resource-item')
for resource_item in resource_items:
infos = resource_item.select('a')
title_infos = infos[:2]
episode_title = ''.join([a.text.strip()
for a in title_infos])
link_infos = infos[2:]
ed2k = None
magnet = None
for link_info in link_infos:
type = link_info.get('data-download-type', None)
link = link_info.get('href', None)
if type == '1':
ed2k = link
elif type == '2':
magnet = link
episodes.append({
'title': episode_title,
'ed2k': ed2k,
'magnet': magnet
})
return title, episodes
|
{
"content_hash": "cafe0044d295f01fd30d208dd336ef9c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 64,
"avg_line_length": 34.875,
"alnum_prop": 0.49372759856630827,
"repo_name": "exherb/tvee",
"id": "4b40eec41cf67f6e96fedddefaa524630d0f56e4",
"size": "1154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvee/crawler/yayaxz_com.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "11586"
},
{
"name": "JavaScript",
"bytes": "15440"
},
{
"name": "Python",
"bytes": "29449"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from rest_framework import status as http_status
import itertools
from flask import request
from framework import status
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.auth.decorators import must_be_signed
from website.archiver import ARCHIVER_SUCCESS, ARCHIVER_FAILURE
from addons.base.views import DOWNLOAD_ACTIONS
from website import settings
from osf.exceptions import NodeStateError
from website.project.decorators import (
must_be_valid_project, must_be_contributor_or_public,
must_have_permission, must_be_contributor_and_not_group_member,
must_not_be_registration, must_be_registration,
must_not_be_retracted_registration
)
from osf import features
from osf.models import Identifier, RegistrationSchema
from website.project.utils import serialize_node
from osf.utils.permissions import ADMIN
from website import language
from website.ember_osf_web.decorators import ember_flag_is_active
from website.project import signals as project_signals
from website.project.metadata.schemas import _id_to_name
from website import util
from website.project.metadata.utils import serialize_meta_schema
from website.project.model import has_anonymous_link
from website.archiver.decorators import fail_archive_on_error
from .node import _view_project
from api.waffle.utils import flag_is_active
@must_be_valid_project
@must_not_be_retracted_registration
@must_be_contributor_or_public
def node_register_page(auth, node, **kwargs):
"""Display the registration metadata for a registration.
:return: serialized Node
"""
if node.is_registration:
return serialize_node(node, auth)
else:
status.push_status_message(
'You have been redirected to the project\'s registrations page. From here you can initiate a new Draft Registration to complete the registration process',
trust=False,
id='redirected_to_registrations',
)
return redirect(node.web_url_for('node_registrations', view='draft', _guid=True))
@must_be_valid_project
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
def node_registration_retraction_redirect(auth, node, **kwargs):
return redirect(node.web_url_for('node_registration_retraction_get', _guid=True))
@must_be_valid_project
@must_not_be_retracted_registration
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
def node_registration_retraction_get(auth, node, **kwargs):
"""Prepares node object for registration retraction page.
:return: serialized Node to be retracted
:raises: 400: BAD_REQUEST if registration already pending retraction
"""
if not node.is_registration:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-registrations is not permitted.'
})
if node.is_pending_retraction:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'This registration is already pending withdrawal.'
})
return serialize_node(node, auth, primary=True)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
def node_registration_retraction_post(auth, node, **kwargs):
"""Handles retraction of public registrations
:param auth: Authentication object for User
:return: Redirect URL for successful POST
"""
if node.is_pending_retraction:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'This registration is already pending withdrawal'
})
if not node.is_registration:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-registrations is not permitted.'
})
if node.root_id != node.id:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-parent registrations is not permitted.'
})
data = request.get_json()
try:
node.retract_registration(auth.user, data.get('justification', None))
node.save()
node.retraction.ask(node.get_active_contributors_recursive(unique_users=True))
except NodeStateError as err:
raise HTTPError(http_status.HTTP_403_FORBIDDEN, data=dict(message_long=str(err)))
return {'redirectUrl': node.web_url_for('view_project')}
@must_be_valid_project
@must_not_be_retracted_registration
@must_be_contributor_or_public
@ember_flag_is_active(features.EMBER_REGISTRATION_FORM_DETAIL)
def node_register_template_page(auth, node, metaschema_id, **kwargs):
if flag_is_active(request, features.EMBER_REGISTRIES_DETAIL_PAGE):
# Registration meta page obviated during redesign
return redirect(node.url)
if node.is_registration and bool(node.registered_schema):
try:
meta_schema = RegistrationSchema.objects.get(_id=metaschema_id)
except RegistrationSchema.DoesNotExist:
# backwards compatability for old urls, lookup by name
meta_schema = RegistrationSchema.objects.filter(name=_id_to_name(metaschema_id)).order_by('-schema_version').first()
if not meta_schema:
raise HTTPError(http_status.HTTP_404_NOT_FOUND, data={
'message_short': 'Invalid schema name',
'message_long': 'No registration schema with that name could be found.'
})
ret = _view_project(node, auth, primary=True)
my_meta = serialize_meta_schema(meta_schema)
if has_anonymous_link(node, auth):
for indx, schema_page in enumerate(my_meta['schema']['pages']):
for idx, schema_question in enumerate(schema_page['questions']):
if schema_question['title'] in settings.ANONYMIZED_TITLES:
del my_meta['schema']['pages'][indx]['questions'][idx]
ret['node']['registered_schema'] = serialize_meta_schema(meta_schema)
return ret
else:
status.push_status_message(
'You have been redirected to the project\'s registrations page. From here you can initiate a new Draft Registration to complete the registration process',
trust=False,
id='redirected_to_registrations',
)
return redirect(node.web_url_for('node_registrations', view=kwargs.get('template'), _guid=True))
@must_be_valid_project # returns project
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
@must_not_be_registration
def project_before_register(auth, node, **kwargs):
"""Returns prompt informing user that addons, if any, won't be registered."""
# TODO: Avoid generating HTML code in Python; all HTML should be in display layer
messages = {
'full': {
'addons': set(),
'message': 'The content and version history of <strong>{0}</strong> will be copied to the registration.',
},
'partial': {
'addons': set(),
'message': 'The current version of the content in <strong>{0}</strong> will be copied to the registration, but version history will be lost.'
},
'none': {
'addons': set(),
'message': 'The contents of <strong>{0}</strong> cannot be registered at this time, and will not be included as part of this registration.',
},
}
errors = {}
addon_set = [n.get_addons() for n in itertools.chain([node], node.get_descendants_recursive(primary_only=True))]
for addon in itertools.chain(*addon_set):
if not addon.complete:
continue
archive_errors = getattr(addon, 'archive_errors', None)
error = None
if archive_errors:
error = archive_errors()
if error:
errors[addon.config.short_name] = error
continue
name = addon.config.short_name
if name in settings.ADDONS_ARCHIVABLE:
messages[settings.ADDONS_ARCHIVABLE[name]]['addons'].add(addon.config.full_name)
else:
messages['none']['addons'].add(addon.config.full_name)
error_messages = list(errors.values())
prompts = [
m['message'].format(util.conjunct(m['addons']))
for m in messages.values() if m['addons']
]
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_REGISTER_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {
'prompts': prompts,
'errors': error_messages
}
def osf_admin_change_status_identifier(node):
if node.get_identifier_value('doi'):
node.request_identifier_update(category='doi')
def get_referent_by_identifier(category, value):
"""Look up identifier by `category` and `value` and redirect to its referent
if found.
"""
try:
identifier = Identifier.objects.get(category=category, value=value)
except Identifier.DoesNotExist:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
if identifier.referent.url:
return redirect(identifier.referent.url)
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
@fail_archive_on_error
@must_be_signed
@must_be_registration
def registration_callbacks(node, payload, *args, **kwargs):
if payload.get('action', None) in DOWNLOAD_ACTIONS:
return {'status': 'success'}
errors = payload.get('errors')
src_provider = payload['source']['provider']
if errors:
node.archive_job.update_target(
src_provider,
ARCHIVER_FAILURE,
errors=errors,
)
else:
# Dataverse requires two seperate targets, one
# for draft files and one for published files
if src_provider == 'dataverse':
src_provider += '-' + (payload['destination']['name'].split(' ')[-1].lstrip('(').rstrip(')').strip())
node.archive_job.update_target(
src_provider,
ARCHIVER_SUCCESS,
)
project_signals.archive_callback.send(node)
|
{
"content_hash": "b4c894c3a1ef2ff6f3dcfa45dac9ce9b",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 166,
"avg_line_length": 39.77307692307692,
"alnum_prop": 0.6703413596363988,
"repo_name": "baylee-d/osf.io",
"id": "812ecc66d8b93021a5bc90cad161a2a52f302584",
"size": "10365",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "website/project/views/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "5721"
},
{
"name": "HTML",
"bytes": "318459"
},
{
"name": "JavaScript",
"bytes": "1792442"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "654930"
},
{
"name": "Python",
"bytes": "10662092"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from Exporter import Exporter
from ClassExporter import ClassExporter
from FunctionExporter import FunctionExporter
from EnumExporter import EnumExporter
from VarExporter import VarExporter
from infos import *
from declarations import *
import os.path
import exporters
import MultipleCodeUnit
#==============================================================================
# HeaderExporter
#==============================================================================
class HeaderExporter(Exporter):
'Exports all declarations found in the given header'
def __init__(self, info, parser_tail=None):
Exporter.__init__(self, info, parser_tail)
def WriteInclude(self, codeunit):
pass
def IsInternalName(self, name):
'''Returns true if the given name looks like a internal compiler
structure'''
return name.startswith('_')
def Export(self, codeunit, exported_names):
header = os.path.normpath(self.parser_header)
for decl in self.declarations:
# check if this declaration is in the header
location = os.path.abspath(decl.location[0])
if location == header and not self.IsInternalName(decl.name):
# ok, check the type of the declaration and export it accordingly
self.HandleDeclaration(decl, codeunit, exported_names)
def HandleDeclaration(self, decl, codeunit, exported_names):
'''Dispatch the declaration to the appropriate method, that must create
a suitable info object for a Exporter, create a Exporter, set its
declarations and append it to the list of exporters.
'''
dispatch_table = {
Class : ClassExporter,
Enumeration : EnumExporter,
Function : FunctionExporter,
Variable : VarExporter,
}
exporter_class = dispatch_table.get(type(decl))
if exporter_class is not None:
self.HandleExporter(decl, exporter_class, codeunit, exported_names)
def HandleExporter(self, decl, exporter_type, codeunit, exported_names):
# only export complete declarations
if not decl.incomplete:
info = self.info[decl.name]
info.name = decl.FullName()
info.include = self.info.include
exporter = exporter_type(info)
exporter.SetDeclarations(self.declarations)
exporter.SetParsedHeader(self.parser_header)
if isinstance(codeunit, MultipleCodeUnit.MultipleCodeUnit):
codeunit.SetCurrent(self.interface_file, exporter.Name())
else:
codeunit.SetCurrent(exporter.Name())
exporter.GenerateCode(codeunit, exported_names)
def Name(self):
return self.info.include
|
{
"content_hash": "762175e173ab26b6601c3a862f36aa61",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 81,
"avg_line_length": 37.8051948051948,
"alnum_prop": 0.5984197870147716,
"repo_name": "DD-L/deel.boost.python",
"id": "9b3cce2f33672b98068fc84143a151c5e64343d6",
"size": "3146",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "origin/libs/python/pyste/src/Pyste/HeaderExporter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8496"
},
{
"name": "C++",
"bytes": "2095256"
},
{
"name": "CSS",
"bytes": "8530"
},
{
"name": "HTML",
"bytes": "2181446"
},
{
"name": "Python",
"bytes": "629736"
}
],
"symlink_target": ""
}
|
import os
import sys
import pbr.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
# NOTE(blk-u): Path for our Sphinx extension, remove when
# https://launchpad.net/bugs/1260495 is fixed.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'oslosphinx',
'ext.apidoc',
]
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenStack Command Line Client'
copyright = u'2012-2013 OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version_info = pbr.version.VersionInfo('python-openstackclient')
#
# The short X.Y version.
version = version_info.version_string()
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['openstackclient.']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme_path = ["."]
#html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStackCommandLineClientdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
# .
latex_documents = [
('index', 'OpenStackCommandLineClient.tex',
u'OpenStack Command Line Client Documentation',
u'OpenStack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'man/openstack',
'openstack',
u'OpenStack Command Line Client',
[u'OpenStack contributors'],
1,
),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenStackCommandLineClient',
u'OpenStack Command Line Client Documentation',
u'OpenStack', 'OpenStackCommandLineClient',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "fc8cc148c470c5cfc672106e416d0e5b",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 88,
"avg_line_length": 31.96062992125984,
"alnum_prop": 0.6957378664695738,
"repo_name": "BjoernT/python-openstackclient",
"id": "f4434ec1547cc9ab847ac3f6ee102b44b71db9af",
"size": "8560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1588452"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
}
|
from pandac.PandaModules import *
from direct.directnotify.DirectNotifyGlobal import *
from direct.showbase import Loader
from toontown.toontowngui import ToontownLoadingScreen
from toontown.dna.DNAParser import *
class ToontownLoader(Loader.Loader):
TickPeriod = 0.2
def __init__(self, base):
Loader.Loader.__init__(self, base)
self.inBulkBlock = None
self.blockName = None
self.loadingScreen = ToontownLoadingScreen.ToontownLoadingScreen()
return
def destroy(self):
self.loadingScreen.destroy()
del self.loadingScreen
Loader.Loader.destroy(self)
def loadDNAFile(self, dnastore, filename):
return loadDNAFile(dnastore, filename)
def beginBulkLoad(self, name, label, range, gui, tipCategory, zoneId):
self._loadStartT = globalClock.getRealTime()
Loader.Loader.notify.info("starting bulk load of block '%s'" % name)
if self.inBulkBlock:
Loader.Loader.notify.warning("Tried to start a block ('%s'), but am already in a block ('%s')" % (name, self.blockName))
return None
self.inBulkBlock = 1
self._lastTickT = globalClock.getRealTime()
self.blockName = name
self.loadingScreen.begin(range, label, gui, tipCategory, zoneId)
return None
def endBulkLoad(self, name):
if not self.inBulkBlock:
Loader.Loader.notify.warning("Tried to end a block ('%s'), but not in one" % name)
return None
if name != self.blockName:
Loader.Loader.notify.warning("Tried to end a block ('%s'), other then the current one ('%s')" % (name, self.blockName))
return None
self.inBulkBlock = None
expectedCount, loadedCount = self.loadingScreen.end()
now = globalClock.getRealTime()
Loader.Loader.notify.info("At end of block '%s', expected %s, loaded %s, duration=%s" % (self.blockName,
expectedCount,
loadedCount,
now - self._loadStartT))
return
def abortBulkLoad(self):
if self.inBulkBlock:
Loader.Loader.notify.info("Aborting block ('%s')" % self.blockName)
self.inBulkBlock = None
self.loadingScreen.abort()
return
def tick(self):
if self.inBulkBlock:
now = globalClock.getRealTime()
if now - self._lastTickT > self.TickPeriod:
self._lastTickT += self.TickPeriod
self.loadingScreen.tick()
try:
base.cr.considerHeartbeat()
except:
pass
def loadModel(self, *args, **kw):
ret = Loader.Loader.loadModel(self, *args, **kw)
if ret:
gsg = base.win.getGsg()
if gsg:
ret.prepareScene(gsg)
self.tick()
return ret
def loadFont(self, *args, **kw):
ret = Loader.Loader.loadFont(self, *args, **kw)
self.tick()
return ret
def loadTexture(self, texturePath, alphaPath = None, okMissing = False):
ret = Loader.Loader.loadTexture(self, texturePath, alphaPath, okMissing=okMissing)
self.tick()
if alphaPath:
self.tick()
return ret
def loadSfx(self, soundPath):
ret = Loader.Loader.loadSfx(self, soundPath)
self.tick()
return ret
def loadMusic(self, soundPath):
ret = Loader.Loader.loadMusic(self, soundPath)
self.tick()
return ret
|
{
"content_hash": "c9beba664e44e9d84223407a435a7101",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 132,
"avg_line_length": 35.17,
"alnum_prop": 0.6044924651691783,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "253219f1ee4a7a46ecdd595ddbd510b7e725783e",
"size": "3517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/toonbase/ToontownLoader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
}
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
import collections
from collections import OrderedDict, abc
import functools
from io import StringIO
import itertools
import sys
from textwrap import dedent
from typing import FrozenSet, List, Optional, Set, Tuple, Type, Union
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib
from pandas.compat import PY36, raise_with_traceback
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
rewrite_axis_style_signature,
)
from pandas.util._validators import validate_axis_style_args, validate_bool_kwarg
from pandas.core.dtypes.cast import (
cast_scalar_to_array,
coerce_to_dtypes,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
maybe_upcast,
maybe_upcast_putmask,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_extension_type,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_named_tuple,
is_nested_list_like,
is_object_dtype,
is_scalar,
is_sequence,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (
Index,
MultiIndex,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes import base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.multi import maybe_droplevels
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
get_names_from_index,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
reorder_arrays,
sanitize_index,
to_arrays,
)
from pandas.core.series import Series
from pandas.io.formats import console, format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes="index, columns",
klass="DataFrame",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel="",
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
"""
Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged:: 0.23.0
If data is a dict, column order follows insertion-order for
Python 3.6 and later.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order
Python 3.6 and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
DataFrame.from_items : From sequence of (key, value) pairs
read_csv, pandas.read_table, pandas.read_clipboard.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series # type: Type[Series]
_deprecations = NDFrame._deprecations | frozenset(
["from_items"]
) # type: FrozenSet[str]
_accessors = set() # type: Set[str]
@property
def _constructor_expanddim(self):
raise NotImplementedError("Not supported for DataFrames!")
# ----------------------------------------------------------------------
# Constructors
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(
data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, abc.Sequence):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError(
"DataFrame constructor called with "
"incompatible data and dtype: {e}".format(e=e)
)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array(
(len(index), len(columns)), data, dtype=dtype
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
else:
raise ValueError("DataFrame constructor not properly called!")
NDFrame.__init__(self, mgr, fastpath=True)
# ----------------------------------------------------------------------
@property
def axes(self):
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self):
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case off non-interactive session, no boundaries apply.
`ignore_width` is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split("\n"))
return repr_width < width
def _info_repr(self):
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self):
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
notebook=True,
)
else:
return None
@Substitution(
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
min_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
line_width=None,
):
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
formatter = fmt.DataFrameFormatter(
self,
buf=buf,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width,
)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
# ----------------------------------------------------------------------
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
%s
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"] % "Yields\n ------")
def items(self):
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"] % "Returns\n -------")
def iteritems(self):
return self.items()
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor
if name is not None and len(self.columns) + index < 256:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self):
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Serie. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
"Dot product shape mismatch, "
"{s} vs {r}".format(s=lvals.shape, r=rvals.shape)
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError("unsupported type: {oth}".format(oth=type(other)))
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError(
"cannot use columns parameter with " "orient='columns'"
)
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(self, dtype=None, copy=False):
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogenous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
result = np.array(self.values, dtype=dtype, copy=copy)
return result
def to_dict(self, orient="dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some " "columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith("d"):
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient.lower().startswith("l"):
return into_c((k, v.tolist()) for k, v in self.items())
elif orient.lower().startswith("sp"):
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient.lower().startswith("s"):
return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items())
elif orient.lower().startswith("r"):
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient.lower().startswith("i"):
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError("orient '{o}' not understood".format(o=orient))
def to_gbq(
self,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
verbose=verbose,
private_key=private_key,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
):
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in data.items():
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None
):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = "<S{}".format(df.index.str.len().max())
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
warnings.warn(
"The 'convert_datetime64' parameter is "
"deprecated and will be removed in a future "
"version",
FutureWarning,
stacklevel=2,
)
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index.values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c]._internal_get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = "level_%d" % count
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [self[c]._internal_get_values() for c in self.columns]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = (
"Invalid dtype {dtype} specified for " "{element} {name}"
).format(dtype=dtype_mapping, element=element, name=name)
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def from_items(cls, items, columns=None, orient="columns"):
"""
Construct a DataFrame from a list of tuples.
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
DataFrame
"""
warnings.warn(
"from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning,
stacklevel=2,
)
keys, values = zip(*items)
if orient == "columns":
if columns is not None:
columns = ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(ensure_index(keys)):
raise ValueError(
"With non-unique item names, passed "
"columns must be identical"
)
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError(
"The value in each (key, value) pair "
"must be an array, Series, or dict"
)
elif orient == "index":
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError(
"The value in each (key, value) pair "
"must be an array, Series, or dict"
)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
def to_sparse(self, fill_value=None, kind="block"):
"""
Convert to SparseDataFrame.
.. deprecated:: 0.25.0
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse() # doctest: +SKIP
>>> sdf # doctest: +SKIP
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf) # doctest: +SKIP
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
warnings.warn(
"DataFrame.to_sparse is deprecated and will be removed "
"in a future version",
FutureWarning,
stacklevel=2,
)
from pandas.core.sparse.api import SparseDataFrame
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="SparseDataFrame")
return SparseDataFrame(
self._series,
index=self.index,
columns=self.columns,
default_kind=kind,
default_fill_value=fill_value,
)
@deprecate_kwarg(old_arg_name="encoding", new_arg_name=None)
def to_stata(
self,
fname,
convert_dates=None,
write_index=True,
encoding="latin-1",
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
version=114,
convert_strl=None,
):
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
fname : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {114, 117}, default 114
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
kwargs = {}
if version not in (114, 117):
raise ValueError("Only formats 114 and 117 supported.")
if version == 114:
if convert_strl is not None:
raise ValueError(
"strl support is only available when using " "format 117"
)
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
kwargs["convert_strl"] = convert_strl
writer = statawriter(
fname,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
**kwargs
)
writer.write_file()
def to_feather(self, fname):
"""
Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(
self,
fname,
engine="auto",
compression="snappy",
index=None,
partition_cols=None,
**kwargs
):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
the behavior depends on the chosen engine.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset
Columns are partitioned in the order they are given
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(
self,
fname,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
**kwargs
)
@Substitution(
header="Whether to print column labels, default True",
col_space_type="str or int",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
buf=buf,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
table_id=table_id,
render_links=render_links,
)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
# ----------------------------------------------------------------------
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append("Empty {name}".format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option("display.max_info_columns", len(self.columns) + 1)
max_rows = get_option("display.max_info_rows", len(self) + 1)
if null_counts is None:
show_counts = (len(self.columns) <= max_cols) and (len(self) < max_rows)
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append("Data columns (total %d columns):" % len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
"Columns must equal counts "
"({cols:d} != {counts:d})".format(
cols=len(cols), counts=len(counts)
)
)
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(
_put_str(col, space) + tmpl.format(count=count, dtype=dtype)
)
def _non_verbose_repr():
lines.append(self.columns._summary(name="Columns"))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return "{num:3.1f}{size_q} " "{x}".format(
num=num, size_q=size_qualifier, x=x
)
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(
num=num, size_q=size_qualifier, pb="PB"
)
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self._data.get_dtype_counts()
dtypes = ["{k}({kk:d})".format(k=k[0], kk=k[1]) for k in sorted(counts.items())]
lines.append("dtypes: {types}".format(types=", ".join(dtypes)))
if memory_usage is None:
memory_usage = get_option("display.memory_usage")
if memory_usage:
# append memory usage of df to display
size_qualifier = ""
if memory_usage == "deep":
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if "object" in counts or self.index._is_memory_usage_qualified():
size_qualifier = "+"
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append(
"memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)
)
)
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.000000+0.000000j 1 True
1 1 1.0 1.000000+0.000000j 1 True
2 1 1.0 1.000000+0.000000j 1 True
3 1 1.0 1.000000+0.000000j 1 True
4 1 1.0 1.000000+0.000000j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5216
"""
result = Series(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = Series(self.index.memory_usage(deep=deep), index=["Index"]).append(
result
)
return result
def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super().transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = com._unpickle_array(cols)
index = com._unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
# old unpickling
(vals, idx, cols), object_state = state
index = com._unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols), copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(
ovals, index=index, columns=com._unpickle_array(ocols), copy=False
)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
label = self.index[i]
new_values = self._data.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if len(self.index) and not len(values):
values = np.array([np.nan] * len(self.index), dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self.take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match " "DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
"Item wrong length %d instead of %d." % (len(key), len(self.index))
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
except (TypeError, ValueError):
pass
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc[key] = value
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
"Item wrong length %d instead of %d!" % (len(key), len(self.index))
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
self._data = self._data.reindex_axis(
value.index.copy(), axis=1, fill_value=np.nan
)
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
.. versionadded:: 0.25.0
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
kwargs : dict
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_space_character_free_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, "_info_axis_number"):
msg = "object of type {typ!r} has no info axis"
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError("at least one of include or exclude must be " "nonempty")
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(infer_dtype_from_object, x)), selection
)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(
"include and exclude overlap on {inc_ex}".format(
inc_ex=(include & exclude)
)
)
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(
is_dtype_instance_mapper, enumerate(self.dtypes)
):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)]
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged:: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
In Python 3.6+, you can create multiple columns within the same assign
where one of the columns depends on another one defined within the same
assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError(
"incompatible index of inserted column " "with frame index"
)
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {
item: Series(self._data.iget(idx), index=self.index, name=item)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels):
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Returns
-------
numpy.ndarray
Notes
-----
Akin to::
result = [df.get_value(row, col)
for row, col in zip(row_labels, col_labels)]
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy,
level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy,
level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy, fill_value):
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
.. versionadded:: 0.21.0
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(self, *args, **kwargs):
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : int or str
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame
DataFrame with the renamed axis labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, "mapper", "rename")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("mapper", None)
return super().rename(**kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
**kwargs
)
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing = []
for col in keys:
if isinstance(
col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator)
):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError:
raise TypeError(
err_msg + " Received column of " "type {}".format(type(col))
)
else:
if not found:
missing.append(col)
if missing:
raise KeyError("None of {} are in the columns".format(missing))
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
"Length mismatch: Expected {len_self} rows, "
"received array of length {len_col}".format(
len_self=len(self), len_col=len(arrays[-1])
)
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError("Index has duplicate keys: {dup}".format(dup=duplicates))
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
):
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, changed = maybe_upcast_putmask(values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [
n if n is not None else ("level_%d" % i)
for (i, n) in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name)
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self):
return super().isna()
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self):
return super().isnull()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self):
return super().notna()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self):
return super().notnull()
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
msg = (
"supplying multiple axes to axis is deprecated and "
"will be removed in a future version."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError("invalid how option: {h}".format(h=how))
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep="first", inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep="first"):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
"Length of ascending (%d) != length of by (%d)"
% (len(ascending), len(by))
)
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position
)
new_data = self._data.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_index.__doc__)
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
by=None,
):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, "inplace")
# 10726
if by is not None:
warnings.warn(
"by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning,
stacklevel=2,
)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(
labels._get_codes_for_sorting(),
orders=ascending,
na_position=na_position,
)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if (ascending and labels.is_monotonic_increasing) or (
not ascending and labels.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
labels, kind=kind, ascending=ascending, na_position=na_position
)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer, axis=baxis, verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def nlargest(self, n, columns, keep="first"):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep="first"):
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 11300 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Nauru 11300 182 NR
Anguilla 11300 311 AI
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order. May not drop or
duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join="outer", level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if ops.should_series_dispatch(this, other, func):
# iterate over columns
return ops.dispatch_to_series(this, other, _arith_op)
else:
result = _arith_op(this.values, other.values)
return self._constructor(
result, index=new_index, columns=new_columns, copy=False
)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join="outer", axis=0, level=level, copy=False)
assert left.index.equals(right.index)
if left._is_mixed_type or right._is_mixed_type:
# operate column-wise; avoid costly object-casting in `.values`
return ops.dispatch_to_series(left, right, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(left.values.T, right.values).T
return self._constructor(
new_data, index=left.index, columns=self.columns, copy=False
)
def _combine_match_columns(self, other, func, level=None):
assert isinstance(other, Series)
left, right = self.align(other, join="outer", axis=1, level=level, copy=False)
assert left.columns.equals(right.index)
return ops.dispatch_to_series(left, right, func, axis="columns")
def _combine_const(self, other, func):
assert lib.is_scalar(other) or np.ndim(other) == 0
return ops.dispatch_to_series(self, other, func)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view("i8")
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
@deprecate_kwarg(
old_arg_name="raise_conflict",
new_arg_name="errors",
mapping={False: "ignore", True: "raise"},
)
def update(
self, other, join="left", overwrite=True, filter_func=None, errors="ignore"
):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError(
"The parameter errors must be either " "'ignore' or 'raise'"
)
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : string or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column to use to make new frame's columns.
values : string, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged:: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None):
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : boolean, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def explode(self, column: Union[str, Tuple]) -> "DataFrame":
"""
Transform each element of a list-like to a row, replicating the
index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels
DataFrame.melt : Unpivot a DataFrame from wide format to long format
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged. Empty list-likes will
result in a np.nan for that row.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
result = self[column].explode()
return (
self.drop([column], axis=1)
.join(result)
.reindex(columns=self.columns, copy=False)
)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs[
"melt"
] = """
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
%(other)s
pivot_table
DataFrame.pivot
Series.explode
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
@Appender(
_shared_docs["melt"]
% dict(
caller="df.melt(", versionadded=".. versionadded:: 0.20.0\n", other="melt"
)
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
from pandas.core.reshape.melt import melt
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1.
Returns
-------
DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: Union[str, List[str]],
ndim: int,
subset: Optional[Union[Series, ABCDataFrame]] = None,
) -> Union[Series, ABCDataFrame]:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.EWM : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@Substitution(
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
versionadded="\n.. versionadded:: 0.20.0\n",
**_shared_doc_kwargs
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = self.T._aggregate(arg, *args, **kwargs)
result = result.T if result is not None else result
return result, how
return super()._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs["transform"] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
if axis == 1:
return self.T.transform(func, *args, **kwargs).T
return super().transform(func, *args, **kwargs)
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
result_type=None,
args=(),
**kwds
):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
broadcast : bool, optional
Only relevant for aggregation functions:
* ``False`` or ``None`` : returns a Series whose length is the
length of the index or the number of columns (based on the
`axis` parameter)
* ``True`` : results will be broadcast to the original shape
of the frame, the original index and columns will be retained.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
raw : bool, default False
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
reduce : bool or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
`apply` will use `reduce` to determine whether the result
should be a Series or a DataFrame. If ``reduce=None`` (the
default), `apply`'s return value will be guessed by calling
`func` on an empty Series
(note: while guessing, exceptions raised by `func` will be
ignored).
If ``reduce=True`` a Series will always be returned, and if
``reduce=False`` a DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by ``result_type='reduce'``.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Notes
-----
In the current implementation apply calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Notes
-----
In the current implementation applymap calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
silence the warning and sort. Explicitly pass ``sort=False`` to
silence the warning and not sort.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = other.reindex(combined_columns, copy=False)
other = DataFrame(
other.values.reshape((1, len(other))),
index=index,
columns=combined_columns,
)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
# For SparseDataFrame's benefit
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported" " for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(frames, axis=1, join="outer", verify_integrity=True)
return res.reindex(self.index, copy=False)
else:
return concat(frames, axis=1, join=how, verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a " "Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith
Series.corr
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == "pearson":
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
"'{method}' was supplied".format(method=method)
)
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method="pearson"):
"""
Compute pairwise correlation between rows or columns of DataFrame
with rows or columns of Series or DataFrame. DataFrames are first
aligned along both axes before computing the correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable".format(method=method)
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype("int64")
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
"Can only count levels on hierarchical "
"{ax}.".format(ax=self._get_axis_name(axis))
)
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
if axis is None and filter_type == "bool":
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
# exclude timedelta/datetime unless we are uniform types
if (
axis == 1
and self._is_datelike_mixed_type
and (
not self._is_homogeneous_type
and not is_datetime64tz_dtype(self.dtypes[0])
)
):
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
if filter_type == "bool" and is_object_dtype(values) and axis is None:
# work around https://github.com/numpy/numpy/issues/10489
# TODO: combine with hasattr(result, 'dtype') further down
# hard since we don't have `values` down there.
result = np.bool_(result)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(
self, func=f, result_type="expand", ignore_failures=True
)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except Exception:
pass
if filter_type is None or filter_type == "numeric":
data = self._get_numeric_data()
elif filter_type == "bool":
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError(
"Handling exception with filter_type {f} not"
"implemented.".format(f=filter_type)
)
raise_with_traceback(e)
with np.errstate(all="ignore"):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == "numeric":
data = self._get_numeric_data()
elif filter_type == "bool":
# GH 25101, # GH 24434
data = self._get_bool_data() if axis == 0 else self
else: # pragma: no cover
msg = (
"Generating numeric_only data with filter_type {f}"
"not supported.".format(f=filter_type)
)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, "dtype") and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == "numeric":
result = result.astype(np.float64)
elif filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
result = Series(result, index=labels)
return result
def nunique(self, axis=0, dropna=True):
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError("Axis must be 0 or 1 (got %r)" % axis_num)
def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q)
result = data._data.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how="start", axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError("Axis must be 0 or 1. Got {ax!s}".format(ax=axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError("Axis must be 0 or 1. Got {ax!s}".format(ax=axis))
return self._constructor(new_data)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with " "a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with " "a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__)
)
return DataFrame(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._setup_axes(
["index", "columns"],
info_axis=1,
stat_axis=0,
axes_are_reversed=True,
aliases={"rows": 0},
docs={
"index": "The index (row labels) of the DataFrame.",
"columns": "The column labels of the DataFrame.",
},
)
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in data.items():
for col, v in s.items():
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return "{s}".format(s=s)[:space].ljust(space)
|
{
"content_hash": "8c9558e6e06024576d8bb16788bd17dd",
"timestamp": "",
"source": "github",
"line_count": 8436,
"max_line_length": 169,
"avg_line_length": 34.63442389758179,
"alnum_prop": 0.5217677016592739,
"repo_name": "toobaz/pandas",
"id": "33066ccef0687e0ca1345f3c6fbac72844717024",
"size": "292176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/frame.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394843"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "15031623"
},
{
"name": "Shell",
"bytes": "27585"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
"""9-3. Users:
Make a class called User. Create two attributes called first_name
and last_name, and then create several other attributes that are typically stored
in a user profile . Make a method called describe_user() that prints a summary
of the user’s information . Make another method called greet_user() that prints
a personalized greeting to the user .
Create several instances representing different users, and call both methods
for each user ."""
class User():
def __init__(self, first_name, last_name, username, email, number):
self.first_name = first_name.title()
self.last_name = last_name.title()
self.username = username
self.email = email
self.number = number.title()
self.login_attempts = 0
def describe_user(self):
print(f"\n{self.first_name} {self.last_name}")
print(f" Username: {self.username}")
print(f" Email: {self.email}")
print(f" Number: {self.number}")
def greet_user(self):
print(f"\n Hola {self.username} que tengas un excelente dia!")
def increment_login_attempts(self):
self.login_attempts += 1
def reset_login_attempts(self):
self.login_attempts = 0
class Admin(User):
def __init__(self, first_name, last_name, username, email, number):
super().__init__(first_name, last_name, username, email, number)
self.privileges = Privileges()
class Privileges():
def __init__(self, privileges=[]):
self.privileges = privileges
def show_privileges(self):
print("\nPrivilegios:")
if self.privileges:
for privilege in self.privileges:
print(f"- {privilege}")
else:
print("Este usuario no cuenta con privilegios.")
"""Alexa = User('Alexa', 'Cazarez', 'Alex', 'Alex@hotmail.com', '844631126')
Alexa.describe_user()
Alexa.greet_user()
Raul = User('Raúl', 'Alvarado', 'Cachohuate', 'cachohuate80@hotmail.com', '8441264364')
Raul.describe_user()
Raul.greet_user()"""
|
{
"content_hash": "498187c7af348ff55f685ace905d3e4c",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 87,
"avg_line_length": 35.775862068965516,
"alnum_prop": 0.6318072289156627,
"repo_name": "AnhellO/DAS_Sistemas",
"id": "946298632bf2455270dbaa3fb86c600accc425b2",
"size": "2078",
"binary": false,
"copies": "1",
"ref": "refs/heads/ene-jun-2022",
"path": "Ene-Jun-2022/jesus-raul-alvarado-torres/práctica-2/capítulo-9/Users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "8515"
},
{
"name": "Go",
"bytes": "25845"
},
{
"name": "HTML",
"bytes": "36671"
},
{
"name": "Python",
"bytes": "716604"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.python.framework.python_api_parameter_converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python import _pywrap_python_api_info
from tensorflow.python._pywrap_python_api_parameter_converter import Convert
from tensorflow.python._pywrap_python_tensor_converter import PythonTensorConverter
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.util import nest
# pylint: disable=g-long-lambda
# Helper function to make expected output in examples more compact:
def Const(x):
return constant_op.constant(x)
@test_util.run_all_in_graph_and_eager_modes
class PythonAPIWrapperTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def setUp(self):
context.ensure_initialized()
super(PythonAPIWrapperTest, self).setUp()
def makeTensorConverter(self):
"""Returns a new PythonTensorConverter with the current context."""
return PythonTensorConverter(context.context())
def makeApiInfoForGenOp(self, op_name, op_func):
"""Returns a PythonAPIParameterConverter for the given gen_op."""
api_info = _pywrap_python_api_info.PythonAPIInfo(op_name)
api_info.InitializeFromRegisteredOp(op_name)
return api_info
def makeApiInfoFromParamSpecs(self,
api_name,
param_names,
input_specs,
attr_specs,
defaults=()):
"""Returns a PythonAPIParameterConverter built from the given specs."""
api_info = _pywrap_python_api_info.PythonAPIInfo(api_name)
api_info.InitializeFromParamSpecs(input_specs, attr_specs, param_names,
defaults)
return api_info
def assertParamsEqual(self, actual_params, expected_params):
"""Asserts that converted parameters have the expected values & types."""
self.assertLen(actual_params, len(expected_params))
for actual, expected in zip(actual_params, expected_params):
if isinstance(expected, list):
self.assertIsInstance(actual, list)
self.assertLen(actual, len(expected))
for actual_item, expected_item in zip(actual, expected):
self.assertParamEqual(actual_item, expected_item)
else:
self.assertParamEqual(actual, expected)
def assertParamEqual(self, actual, expected):
if isinstance(actual, ops.Tensor):
self.assertAllEqual(actual, expected)
else:
self.assertEqual(actual, expected)
self.assertIs(type(actual), type(expected))
def assertInferredEqual(self, api_info, inferred, expected):
"""Asserts that inferred attributes have the expected values."""
inferred_type_attrs = api_info.InferredTypeAttrs()
inferred_type_list_attrs = api_info.InferredTypeListAttrs()
inferred_length_attrs = api_info.InferredLengthAttrs()
self.assertLen(inferred.types, len(inferred_type_attrs))
self.assertLen(inferred.type_lists, len(inferred_type_list_attrs))
self.assertLen(inferred.lengths, len(inferred_length_attrs))
actual = {}
for i, val in enumerate(inferred.types):
if val._type_enum == types_pb2.DT_INVALID:
val = types_pb2.DT_INVALID
actual[inferred_type_attrs[i]] = val
for i, val in enumerate(inferred.type_lists):
actual[inferred_type_list_attrs[i]] = val
for i, val in enumerate(inferred.lengths):
actual[inferred_length_attrs[i]] = val
self.assertEqual(actual, expected)
# This test constructs a PythonAPIParameterConverter for an op that expects
# a single argument, whose value is an attribute with a specified type; and
# then uses that converter to convert parameters and checks that the result
# is the expected value.
@parameterized.named_parameters([
("FloatFromFloat", "float", 5.0, 5.0),
("FloatFromInt", "float", 5, 5.0),
("FloatFromNumpyScalar", "float", np.array(5.0), 5.0),
("IntFromInt", "int", 5, 5),
("IntFromFloat", "int", 5.0, 5),
("IntFromNumpyScalar", "int", np.array(5.0), 5),
("StringFromBytes", "string", b"foo", b"foo"),
("StringFromUnicode", "string", u"foo", "foo"),
("BoolFromBool", "bool", True, True),
("TypeFromInt", "type", 1, dtypes.float32),
("TypeFromDType", "type", dtypes.int32, dtypes.int32),
("TypeFromNumpyType", "type", np.int32, dtypes.int32),
("ShapeFromShape", "shape", tensor_shape.as_shape([1, 2]),
tensor_shape.as_shape([1, 2])),
("ShapeFromInt", "shape", 1, tensor_shape.as_shape(1)),
("ShapeFromNone", "shape", None, tensor_shape.as_shape(None)),
("ShapeFromList", "shape", [1, 2, 3], tensor_shape.as_shape([1, 2, 3])),
("ListOfFloat", "list(float)", [1, 2.0, np.array(3)], [1.0, 2.0, 3.0]),
("ListOfInt", "list(int)", [1, 2.0, np.array(3)], [1, 2, 3]),
("ListOfString", "list(string)", [b"foo", u"bar"], [b"foo", u"bar"]),
("ListOfBool", "list(bool)", [True, False, True], [True, False, True]),
("ListOfType", "list(type)", [1, dtypes.int32, np.int64],
[dtypes.float32, dtypes.int32, dtypes.int64]),
("ListOfShape", "list(shape)", [1, None, [2, 3]], [
tensor_shape.as_shape(1),
tensor_shape.as_shape(None),
tensor_shape.as_shape([2, 3])
]),
])
def testConvertAttribute(self, attr_type, attr_val, expected):
api_info = self.makeApiInfoFromParamSpecs("ConvertAttributes", ["x"], {},
{"x": attr_type})
tensor_converter = self.makeTensorConverter()
params = [attr_val]
inferred = Convert(api_info, tensor_converter, params)
self.assertEqual(inferred.types, [])
self.assertEqual(inferred.type_lists, [])
self.assertEqual(inferred.lengths, [])
self.assertLen(params, 1)
actual = params[0]
self.assertEqual(actual, expected)
# Check that we got the actual types we expected. (Note that in Python,
# two values may be equal even if they have different types.)
self.assertIs(type(actual), type(expected))
if isinstance(expected, list):
self.assertLen(actual, len(expected))
for (actual_item, expected_item) in zip(actual, expected):
self.assertIs(type(actual_item), type(expected_item))
def testConvertMultipleAttributes(self):
attr_specs = {"x": "list(int)", "y": "shape", "z": "float"}
api_info = self.makeApiInfoFromParamSpecs("ConvertAttributes",
["x", "y", "z"], {}, attr_specs)
tensor_converter = self.makeTensorConverter()
params = [[1, 2.0, np.array(3.0)], [1, 2], 10]
inferred = Convert(api_info, tensor_converter, params)
self.assertEqual(inferred.types, [])
self.assertEqual(inferred.type_lists, [])
self.assertEqual(inferred.lengths, [])
self.assertLen(params, 3)
self.assertEqual(params, [[1, 2, 3], tensor_shape.as_shape([1, 2]), 10.0])
self.assertIsInstance(params[0][0], int)
self.assertIsInstance(params[1], tensor_shape.TensorShape)
self.assertIsInstance(params[2], float)
@parameterized.named_parameters([
("StringFromInt", "string", 5, "Foo argument x: Failed to convert value "
"of type 'int' to type 'string'."),
("IntFromNone", "int", None, "Foo argument x: Failed to convert value "
"of type 'NoneType' to type 'int'."),
("BoolFromInt", "bool", 0,
"Foo argument x: Failed to convert value of type 'int' to type 'bool'."),
])
def testConvertAttributeError(self, attr_type, attr_val, message):
api_info = self.makeApiInfoFromParamSpecs("Foo", ["x"], {},
{"x": attr_type})
tensor_converter = self.makeTensorConverter()
with self.assertRaisesRegex(TypeError, message):
Convert(api_info, tensor_converter, [attr_val])
@parameterized.named_parameters([
dict(
testcase_name="FixedDTypeInputs",
param_names=["x", "y"],
input_specs=dict(x="int32", y="float32"),
attr_specs={},
inputs=lambda: [1, 2],
outputs=lambda: [Const(1), Const(2.0)],
inferred={}),
dict(
testcase_name="UnconstrainedTypeInput",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="type"),
inputs=lambda: [np.array("foo")],
outputs=lambda: [Const("foo")],
inferred=dict(T=dtypes.string)),
dict(
testcase_name="ConstrainedTypeInput",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="{int32, float, string}"),
inputs=lambda: [np.array("foo")],
outputs=lambda: [Const("foo")],
inferred=dict(T=dtypes.string)),
dict(
testcase_name="SharedTypeInputs",
param_names=["x", "y"],
input_specs=dict(x="T", y="T"),
attr_specs=dict(T="{float, int32, int64}"),
inputs=lambda: [1, np.array(2)],
outputs=lambda: [Const(1), Const(2)],
inferred=dict(T=dtypes.int32)),
dict(
testcase_name="SharedTypeInferredFromTensor",
param_names=["x", "y"],
input_specs=dict(x="T", y="T"),
attr_specs=dict(T="{float, int32, int64}"),
inputs=lambda: [1, Const(2.0)],
outputs=lambda: [Const(1.0), Const(2.0)],
inferred=dict(T=dtypes.float32)),
dict(
# If the native converted type for an input isn't in the ok_dtypes
# list, then we try the default dtype instead.
testcase_name="FallbackToDefaultDtype",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="{float, string} = DT_FLOAT"),
inputs=lambda: [1],
outputs=lambda: [Const(1.0)],
inferred=dict(T=dtypes.float32)),
dict(
testcase_name="RepeatedInput",
param_names=["x", "y"],
input_specs=dict(x="N * T", y="T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [[1, 2, 3], 4],
outputs=lambda: [[Const(1), Const(2), Const(3)],
Const(4)],
inferred=dict(T=dtypes.int32, N=3)),
dict(
testcase_name="RepeatedInputInferDTypeFromRepeated",
param_names=["x", "y"],
input_specs=dict(x="N * T", y="T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [[1, 2, Const(3.0)], 4],
outputs=lambda: [[Const(1.0), Const(2.0),
Const(3.0)],
Const(4.0)],
inferred=dict(T=dtypes.float32, N=3)),
dict(
testcase_name="RepeatedInputInferDTypeFromSingleton",
param_names=["x", "y"],
input_specs=dict(x="N * T", y="T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [[1, 2, 3], Const(4.0)],
outputs=lambda: [[Const(1.0), Const(2.0),
Const(3.0)],
Const(4.0)],
inferred=dict(T=dtypes.float32, N=3)),
dict(
testcase_name="EmptyRepeatedInput",
param_names=["x"],
input_specs=dict(x="N * T"),
attr_specs=dict(T="{float, int32} = DT_INT32", N="int"),
inputs=lambda: [[]],
outputs=lambda: [[]],
inferred=dict(T=dtypes.int32, N=0)),
dict(
testcase_name="EmptyRepeatedInputWithNoDefaultDtype",
param_names=["x"],
input_specs=dict(x="N * T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [[]],
outputs=lambda: [[]],
inferred=dict(T=types_pb2.DT_INVALID, N=0)),
dict(
testcase_name="RepeatedInputWithExplicitCountAndType",
param_names=["N", "T", "x", "y"],
input_specs=dict(x="N * T", y="T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [3, np.float32, [1, 2, 3], 4],
outputs=lambda:
[3, dtypes.float32, [Const(1.0), Const(2.0),
Const(3.0)],
Const(4.0)],
inferred={}),
dict(
testcase_name="ListOfTypes",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="list({int32, float32})"),
inputs=lambda: [[1, 2, Const(3.0)]],
outputs=lambda: [[Const(1), Const(2), Const(3.0)]],
inferred=dict(T=[dtypes.int32, dtypes.int32, dtypes.float32])),
dict(
testcase_name="EmptyListOfTypes",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="list({int32, float32}) >= 0"),
inputs=lambda: [[]],
outputs=lambda: [[]],
inferred=dict(T=[])),
dict(
testcase_name="MatchingListsOfTypes",
param_names=["x", "y", "z"],
input_specs=dict(x="T", y="T", z="T"),
attr_specs=dict(T="list({int32, float32})"),
inputs=lambda: [
[1, 2, constant_op.constant(3.0)], # x
[constant_op.constant(4.0), 5, 6], # y
[7, constant_op.constant(8), 9], # z
],
outputs=lambda: nest.map_structure(
constant_op.constant, #
[[1.0, 2, 3.0], [4.0, 5, 6.0], [7.0, 8, 9.0]]),
inferred=dict(T=[dtypes.float32, dtypes.int32, dtypes.float32])),
dict(
testcase_name="ExplicitListOfTypes",
param_names=["x", "T"],
input_specs=dict(x="T"),
attr_specs=dict(T="list({int32, float32})"),
inputs=lambda: [[1, 2, constant_op.constant(3.0)],
[dtypes.int32, dtypes.float32, dtypes.float32]],
outputs=lambda: [[
constant_op.constant(1, dtypes.int32),
constant_op.constant(2, dtypes.float32),
constant_op.constant(3.0, dtypes.float32)
], [dtypes.int32, dtypes.float32, dtypes.float32]],
inferred={}),
dict(
testcase_name="NameParam",
param_names=["x", "y", "name"],
input_specs=dict(x="int32", y="float32"),
attr_specs={},
inputs=lambda: [1, 2, "bob"],
outputs=lambda: [
constant_op.constant(1, dtypes.int32),
constant_op.constant(2, dtypes.float32), "bob"
],
inferred={}),
dict(
testcase_name="NameParamInNonstandardPosition",
param_names=["x", "name", "y"],
input_specs=dict(x="int32", y="float32"),
attr_specs={},
inputs=lambda: [1, "bob", 2],
outputs=lambda: [
constant_op.constant(1, dtypes.int32), "bob",
constant_op.constant(2, dtypes.float32)
],
inferred={}),
dict(
testcase_name="NameParamIsNotConvertedOrModified",
param_names=["x", "y", "name"],
input_specs=dict(x="int32", y="float32"),
attr_specs={},
inputs=lambda: [1, 2, {
"foo": ["bar", "baz"]
}],
outputs=lambda: [
constant_op.constant(1, dtypes.int32),
constant_op.constant(2, dtypes.float32), {
"foo": ["bar", "baz"]
}
],
inferred={}),
dict(
# Note: there don't appear to be any real-world ops that have a
# type(list) attr whose default value is anything other than `[]`.
# But we test this case anyway.
testcase_name="ListOfTypesFallbackToDefault",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="list({string, float32}) = [DT_FLOAT, DT_FLOAT]"),
inputs=lambda: [[1, 2.0]],
outputs=lambda: [[
constant_op.constant(1.0, dtypes.float32),
constant_op.constant(2.0, dtypes.float32)
]],
inferred=dict(T=[dtypes.float32, dtypes.float32])),
dict(
testcase_name="ComplexOp",
param_names=["a", "b", "c", "d", "e", "f", "name"],
input_specs=dict(a="X", b="N * X", e="Y", f="Y"),
attr_specs=dict(
c="list(int)",
d="string",
N="int",
X="type",
Y="list({int32, string})"),
inputs=lambda: [
[[1, 2, 3], [4, 5, 6]], # a
[[1, 2], [3, 4, 5], [6]], # b
[1, 2, 3], # c
"Foo", # d
[[1, 2], [["three"]], [4], "five"], # e
[1, "two", [[3, 4], [5, 6]], [["7"]]], # f
],
outputs=lambda: [
Const([[1, 2, 3], [4, 5, 6]]),
[Const([1, 2]), Const([3, 4, 5]),
Const([6])],
[1, 2, 3],
"Foo",
[Const([1, 2]),
Const([["three"]]),
Const([4]),
Const("five")],
[Const(1),
Const("two"),
Const([[3, 4], [5, 6]]),
Const([["7"]])],
],
inferred=dict(
N=3,
X=dtypes.int32,
Y=[dtypes.int32, dtypes.string, dtypes.int32, dtypes.string])),
])
def testConvert(self, param_names, input_specs, attr_specs, inputs, outputs,
inferred):
api_info = self.makeApiInfoFromParamSpecs("TestFunc", param_names,
input_specs, attr_specs)
tensor_converter = self.makeTensorConverter()
param_values = inputs()
actual_inferred = Convert(api_info, tensor_converter, param_values)
self.assertInferredEqual(api_info, actual_inferred, inferred)
self.assertParamsEqual(param_values, outputs())
@parameterized.named_parameters([
dict(
testcase_name="WrongDTypeForFixedDTypeInput",
param_names=["x"],
input_specs=dict(x="float"),
attr_specs={},
inputs=lambda: [constant_op.constant(1)],
message="TestFunc argument x: Expected DT_FLOAT but got DT_INT32"),
dict(
testcase_name="AddIntTensorAndFloatTensor",
param_names=["x", "y"],
input_specs=dict(x="T", y="T"),
attr_specs=dict(T="{float, int32, int64}"),
inputs=lambda: [constant_op.constant(1),
constant_op.constant(2.0)],
message="TestFunc argument y: Expected DT_INT32 but got DT_FLOAT"),
])
def testConvertError(self,
param_names,
input_specs,
attr_specs,
inputs,
message,
exception=TypeError):
api_info = self.makeApiInfoFromParamSpecs("TestFunc", param_names,
input_specs, attr_specs)
tensor_converter = self.makeTensorConverter()
param_values = inputs()
with self.assertRaisesRegex(exception, message):
Convert(api_info, tensor_converter, param_values)
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "a18fe815e411fd6ad5b1451aa8ef79d9",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 83,
"avg_line_length": 41.46822033898305,
"alnum_prop": 0.55765595463138,
"repo_name": "petewarden/tensorflow",
"id": "6c182163b98168be2c636b3f039ac279e2869668",
"size": "20262",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/python_api_parameter_converter_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Limit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('curvalue', models.IntegerField(default=0)),
('maxvalue', models.IntegerField(default=-2)),
],
options={
'db_table': 'limits_limit',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LimitsPool',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'limits_limitspool',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='limit',
name='pool',
field=models.ForeignKey(to='modoboa_admin_limits.LimitsPool'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='limit',
unique_together=set([('name', 'pool')]),
),
]
|
{
"content_hash": "4b38483a67f3702eb86460cd7be01251",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 114,
"avg_line_length": 32.458333333333336,
"alnum_prop": 0.5301668806161746,
"repo_name": "disko/modoboa-admin-limits",
"id": "ccc3b0b8e1c3b5fafe5c46149908baaab1028973",
"size": "1582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modoboa_admin_limits/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "429"
},
{
"name": "Python",
"bytes": "42164"
}
],
"symlink_target": ""
}
|
from __future__ import division
import warnings
import numpy as np
from .widget import Widget
from ...util.np_backport import nanmean
class Grid(Widget):
"""
Widget that automatically sets the position and size of child Widgets to
proportionally divide its internal area into a grid.
Parameters
----------
spacing : int
Spacing between widgets.
**kwargs : dict
Keyword arguments to pass to `Widget`.
"""
def __init__(self, spacing=6, **kwargs):
from .viewbox import ViewBox
self._next_cell = [0, 0] # row, col
self._cells = {}
self._grid_widgets = {}
self.spacing = spacing
self._n_added = 0
self._default_class = ViewBox # what to add when __getitem__ is used
Widget.__init__(self, **kwargs)
def __getitem__(self, idxs):
"""Return an item or create it if the location is available"""
if not isinstance(idxs, tuple):
idxs = (idxs,)
if len(idxs) == 1:
idxs = idxs + (slice(None),)
elif len(idxs) != 2:
raise ValueError('Incorrect index: %s' % (idxs,))
lims = np.empty((2, 2), int)
for ii, idx in enumerate(idxs):
if isinstance(idx, int):
idx = slice(idx, idx + 1, None)
if not isinstance(idx, slice):
raise ValueError('indices must be slices or integers, not %s'
% (type(idx),))
if idx.step is not None and idx.step != 1:
raise ValueError('step must be one or None, not %s' % idx.step)
start = 0 if idx.start is None else idx.start
end = self.grid_size[ii] if idx.stop is None else idx.stop
lims[ii] = [start, end]
layout = self.layout_array
existing = layout[lims[0, 0]:lims[0, 1], lims[1, 0]:lims[1, 1]] + 1
if existing.any():
existing = set(list(existing.ravel()))
ii = list(existing)[0] - 1
if len(existing) != 1 or ((layout == ii).sum() !=
np.prod(np.diff(lims))):
raise ValueError('Cannot add widget (collision)')
return self._grid_widgets[ii][-1]
spans = np.diff(lims)[:, 0]
item = self.add_widget(self._default_class(),
row=lims[0, 0], col=lims[1, 0],
row_span=spans[0], col_span=spans[1])
return item
def add_widget(self, widget=None, row=None, col=None, row_span=1,
col_span=1):
"""
Add a new widget to this grid. This will cause other widgets in the
grid to be resized to make room for the new widget.
Parameters
----------
widget : Widget
The Widget to add
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
Notes
-----
The widget's parent is automatically set to this grid, and all other
parent(s) are removed.
"""
if row is None:
row = self._next_cell[0]
if col is None:
col = self._next_cell[1]
if widget is None:
widget = Widget()
_row = self._cells.setdefault(row, {})
_row[col] = widget
self._grid_widgets[self._n_added] = (row, col, row_span, col_span,
widget)
self._n_added += 1
widget.parent = self
self._next_cell = [row, col+col_span]
# update stretch based on colspan/rowspan
stretch = list(widget.stretch)
stretch[0] = col_span if stretch[0] is None else stretch[0]
stretch[1] = row_span if stretch[1] is None else stretch[1]
widget.stretch = stretch
self._update_child_widgets()
return widget
def add_grid(self, row=None, col=None, row_span=1, col_span=1,
**kwargs):
"""
Create a new Grid and add it as a child widget.
Parameters
----------
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
**kwargs : dict
Keyword arguments to pass to the new `Grid`.
"""
from .grid import Grid
grid = Grid(**kwargs)
return self.add_widget(grid, row, col, row_span, col_span)
def add_view(self, row=None, col=None, row_span=1, col_span=1,
**kwargs):
"""
Create a new ViewBox and add it as a child widget.
Parameters
----------
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
**kwargs : dict
Keyword arguments to pass to `ViewBox`.
"""
from .viewbox import ViewBox
view = ViewBox(**kwargs)
return self.add_widget(view, row, col, row_span, col_span)
def next_row(self):
self._next_cell = [self._next_cell[0] + 1, 0]
@property
def grid_size(self):
rvals = [widget[0]+widget[2] for widget in self._grid_widgets.values()]
cvals = [widget[1]+widget[3] for widget in self._grid_widgets.values()]
return max(rvals + [0]), max(cvals + [0])
@property
def layout_array(self):
locs = -1 * np.ones(self.grid_size, int)
for key in self._grid_widgets.keys():
r, c, rs, cs = self._grid_widgets[key][:4]
locs[r:r + rs, c:c + cs] = key
return locs
def __repr__(self):
return (('<Grid at %s:\n' % hex(id(self))) +
str(self.layout_array + 1) + '>')
def _update_child_widgets(self):
# Resize all widgets in this grid to share space.
n_rows, n_cols = self.grid_size
if n_rows == 0 or n_cols == 0:
return
# 1. Collect information about occupied cells and their contents
occupied = np.zeros((n_rows, n_cols), dtype=bool)
stretch = np.zeros((n_rows, n_cols, 2), dtype=float)
stretch[:] = np.nan
#minsize = np.zeros((n_rows, n_cols, 2), dtype=float)
for key, val in self._grid_widgets.items():
w = val[4]
row, col, rspan, cspan, ch = self._grid_widgets[key]
occupied[row:row+rspan, col:col+cspan] = True
stretch[row:row+rspan, col:col+cspan] = (np.array(w.stretch) /
[cspan, rspan])
row_occ = occupied.sum(axis=1) > 0
col_occ = occupied.sum(axis=0) > 0
with warnings.catch_warnings(record=True): # mean of empty slice
row_stretch = nanmean(stretch[..., 1], axis=1)
col_stretch = nanmean(stretch[..., 0], axis=0)
row_stretch[np.isnan(row_stretch)] = 0
col_stretch[np.isnan(col_stretch)] = 0
# 2. Decide width of each row/col
rect = self.rect.padded(self.padding + self.margin)
n_cols = col_occ.sum()
colspace = rect.width - (n_cols-1) * self.spacing
colsizes = col_stretch * colspace / col_stretch.sum()
colsizes[colsizes == 0] = -self.spacing
n_rows = row_occ.sum()
rowspace = rect.height - (n_rows-1) * self.spacing
rowsizes = row_stretch * rowspace / row_stretch.sum()
rowsizes[rowsizes == 0] = -self.spacing
# 3. Decide placement of row/col edges
colend = np.cumsum(colsizes) + self.spacing * np.arange(len(colsizes))
colstart = colend - colsizes
rowend = np.cumsum(rowsizes) + self.spacing * np.arange(len(rowsizes))
rowstart = rowend - rowsizes
# snap to pixel boundaries to avoid drawing artifacts
colstart = np.round(colstart) + self.margin
colend = np.round(colend) + self.margin
rowstart = np.round(rowstart) + self.margin
rowend = np.round(rowend) + self.margin
for key in self._grid_widgets.keys():
row, col, rspan, cspan, ch = self._grid_widgets[key]
# Translate the origin of the node to the corner of the area
# ch.transform.reset()
# ch.transform.translate((colstart[col], rowstart[row]))
ch.pos = colstart[col], rowstart[row]
# ..and set the size to match.
w = colend[col+cspan-1]-colstart[col]
h = rowend[row+rspan-1]-rowstart[row]
ch.size = w, h
|
{
"content_hash": "3ed5ba4703f0d16add53d08ad81fce77",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 79,
"avg_line_length": 38.551867219917014,
"alnum_prop": 0.5431062318372619,
"repo_name": "sh4wn/vispy",
"id": "6bec17c9eedf8be3ed494c59af29e4aa7bd7bfa7",
"size": "9436",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vispy/scene/widgets/grid.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "171513"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2957955"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import eagle_util_funcs
import os.path
import subprocess
import sys
try:
import yaml
except ImportError:
print('Please install PyYaml')
sys.exit(1)
def generate_gerber(brdFile, outFile, layers):
ret = eagle_util_funcs.run_eagle([
'-X', # Run the command line CAM processor
'-N', # No output messages
'-dGERBER_RS274X', # Output in gerber format
'-o' + outFile, # Output path
brdFile, # Input board
] + layers # Layers to process
)
if ret != 0:
print("Eagle returned error!")
sys.exit(ret)
def generate_drill(brdFile, outFile, layers):
ret = eagle_util_funcs.run_eagle([
'-X', # Run the command line CAM processor
'-N', # No output messages
'-dEXCELLON', # Output in gerber format
'-o' + outFile, # Output path
brdFile, # Input board
] + layers # Layers to process
)
if ret != 0:
print("Eagle returned error!")
sys.exit(ret)
def zip_up_results(outfile):
subprocess.call("zip %s *" % outfile, shell=True)
print("Made zip!")
def main():
if len(sys.argv) < 4:
print("Usage: %s config.yaml in.brd out.zip" % (sys.argv[0]))
sys.exit(1)
configFileName = os.path.abspath(sys.argv[1])
inputFileName = os.path.abspath(sys.argv[2])
outputFileName = os.path.abspath(sys.argv[3])
# Get the "base" (no path, no extension) part of the input name
inputBaseName = os.path.splitext(os.path.basename(inputFileName))[0]
# Read configuration
configFile = open(configFileName, 'r')
configData = yaml.load(configFile)
configFile.close()
# Create temporary directory
tempdir = eagle_util_funcs.setup_tmp_dir()
# Process each section
for configSection in configData:
print("Running section %s..." % configSection['description'])
sectionOutputFilePath = ("%s/%s.%s" %
(tempdir, inputBaseName,
configSection['output_extension']))
layers = configSection['layers']
if type(layers) == int:
layers = str(layers)
layers = layers.split()
if configSection['type'] == 'gerber':
generate_gerber(inputFileName, sectionOutputFilePath, layers)
elif configSection['type'] == 'excellon':
generate_drill(inputFileName, sectionOutputFilePath, layers)
else:
print("Section ignored, unknown type %s" % configSection['type'])
# Zip outputs
zip_up_results(outputFileName)
# Clean up
eagle_util_funcs.remove_tmp_dir(tempdir)
if __name__ == '__main__':
main()
|
{
"content_hash": "a21ff3a4d58f28e0e83eb0a67b856cd8",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 29.810526315789474,
"alnum_prop": 0.5787429378531074,
"repo_name": "SamboyKirk/tenshi",
"id": "5827bfc2428307d4ea6f0312f6c468d310f2421c",
"size": "3645",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tools/run-eagle-cam-v2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "9351"
},
{
"name": "Batchfile",
"bytes": "936"
},
{
"name": "C",
"bytes": "3472923"
},
{
"name": "C++",
"bytes": "442800"
},
{
"name": "CSS",
"bytes": "33648"
},
{
"name": "Eagle",
"bytes": "4250414"
},
{
"name": "Groff",
"bytes": "5275"
},
{
"name": "HTML",
"bytes": "365988"
},
{
"name": "Java",
"bytes": "9770"
},
{
"name": "JavaScript",
"bytes": "9235684"
},
{
"name": "Lua",
"bytes": "26268"
},
{
"name": "Makefile",
"bytes": "9665"
},
{
"name": "Objective-C",
"bytes": "4499"
},
{
"name": "Python",
"bytes": "93006"
},
{
"name": "Shell",
"bytes": "22065"
}
],
"symlink_target": ""
}
|
"""Utilities to normalize a stream of tensors (typically observations)."""
import tensorflow as tf
class Normalizer(tf.Module):
"""Normalizes tensors by tracking their element-wise mean and variance."""
def __init__(self, eps=0.001, clip_range=(-5, 5)):
"""Initialize the normalizer.
Args:
eps: A constant added to the standard deviation of data before
normalization.
clip_range: Normalized values are clipped to this range.
"""
super(Normalizer, self).__init__()
self.eps = eps
self.clip_range = clip_range
self.initialized = False
def build(self, input_shape):
assert not self.initialized
self.initialized = True
size = input_shape[-1]
def get_variable(name, initial_value, local):
if local:
# ON_READ causes the replicated variable to act as independent variables
# for each replica. The variable only gets aggregated if it is read
# in cross-replica context, which may happen e.g. when the normalizer
# is checkpointed.
return tf.Variable(name=name,
initial_value=initial_value,
trainable=False,
dtype=tf.float32,
synchronization=tf.VariableSynchronization.ON_READ,
aggregation=tf.VariableAggregation.MEAN)
else: # mirrored variable, same value on each replica
return tf.Variable(name=name,
initial_value=initial_value,
trainable=False,
dtype=tf.float32)
# local accumulators
self.steps_acc = get_variable('steps_acc', 0, local=True)
self.sum_acc = get_variable('sum_acc', tf.zeros(shape=[size]), local=True)
self.sumsq_acc = get_variable('sumsq_acc', tf.zeros(shape=[size]),
local=True)
# mirrored variables
self.steps = get_variable('steps', 0, local=False)
self.sum = get_variable('sum', tf.zeros(shape=[size]), local=False)
self.sumsq = get_variable('sumsq', tf.zeros(shape=[size]), local=False)
self.mean = get_variable('mean', tf.zeros(shape=[size]), local=False)
self.std = get_variable('std', tf.zeros(shape=[size]), local=False)
def update(self, input_, only_accumulate=False):
"""Update normalization statistics.
Args:
input_: A tensor. All dimensions apart from the last one are treated
as batch dimensions.
only_accumulate: If True, only local accumulators are updated and the
normalization is not affected. Use this option if running on TPU.
In this case, you need to call `finish_update` method in cross-replica
context later to update the normalization.
"""
if not self.initialized:
self.build(input_.shape)
# reshape to 2 dimensions
shape = input_.shape
input_ = tf.reshape(input_, [tf.reduce_prod(shape[:-1]), shape[-1]])
assert len(input_.shape) == 2
# update local accumulators
self.steps_acc.assign_add(float(input_.shape[0]))
self.sum_acc.assign_add(tf.reduce_sum(input_, axis=0))
self.sumsq_acc.assign_add(tf.reduce_sum(tf.square(input_), axis=0))
if not only_accumulate:
self.finish_update()
def finish_update(self):
"""Update the normalization (mean and std) based on local accumulators.
You only need to call this method manually if `update` was called with
`only_accumulate=True` (usually on a TPU). This method needs to be called
in cross-replica context (i.e. not inside Strategy.run).
"""
# sum the accumulators accross all replicas
step_increment, sum_increment, sumsq_increment = (
tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM,
[self.steps_acc, self.sum_acc, self.sumsq_acc]))
# zero the accumulators
self.steps_acc.assign(tf.zeros_like(self.steps_acc))
self.sum_acc.assign(tf.zeros_like(self.sum_acc))
self.sumsq_acc.assign(tf.zeros_like(self.sumsq_acc))
# update the normalization
self.steps.assign_add(step_increment)
self.sum.assign_add(sum_increment)
self.sumsq.assign_add(sumsq_increment)
self.mean.assign(self.sum / self.steps)
self.std.assign(tf.sqrt(tf.maximum(
0., (self.sumsq / self.steps) - tf.square(self.mean))))
def __call__(self, input_):
"""Normalize the tensor.
Args:
input_: tensor to be normalizer. All dimensions apart from the last one
are treated as batch dimensions.
Returns:
a tensor of the same shape and dtype as input_.
"""
if not self.initialized:
self.build(input_.shape)
# reshape to 2 dimensions
shape = input_.shape
input_ = tf.reshape(input_, [tf.reduce_prod(shape[:-1]), shape[-1]])
assert len(input_.shape) == 2
# normalize
input_ -= self.mean[tf.newaxis, :]
input_ /= self.std[tf.newaxis, :] + self.eps
input_ = tf.clip_by_value(input_, *self.clip_range)
# reshape to the original shape
return tf.reshape(input_, shape)
def get_logs(self):
logs = dict()
for key, var in [('mean', self.mean), ('std', self.std)]:
for i in range(var.shape[0]):
logs['%s/%d' % (key, i)] = var[i]
return logs
class NormalizeObservationsWrapper(tf.Module):
"""Wrapper which adds observation normalization to a policy.
Works for V-trace and SAC policies.
"""
def __init__(self, policy, normalizer):
self.policy = policy
self.normalizer = normalizer
def _norm_env_output(self, env_outputs):
flat = tf.nest.flatten(env_outputs.observation)
normalized = self.normalizer(tf.concat(values=flat, axis=-1))
normalized = tf.split(normalized, [t.shape[-1] for t in flat], axis=-1)
normalized = tf.nest.pack_sequence_as(env_outputs.observation, normalized)
for a, b in zip(tf.nest.flatten(flat), tf.nest.flatten(normalized)):
assert a.shape == b.shape
return env_outputs._replace(observation=normalized)
@tf.function
def initial_state(self, *args, **kwargs):
return self.policy.initial_state(*args, **kwargs)
# Not clear why, but if "@tf.function" declarator is placed directly onto
# __call__, training fails with "uninitialized variable *baseline".
# when running on multiple learning tpu cores.
@tf.function
def get_action(self, *args, **kwargs):
return self.__call__(*args, **kwargs)
def __call__(self, prev_actions, env_outputs, *args,
is_training=False, **kwargs):
if is_training:
flat = tf.nest.flatten(env_outputs.observation)
self.normalizer.update(tf.concat(values=flat, axis=-1),
only_accumulate=True)
return self.policy(prev_actions, self._norm_env_output(env_outputs), *args,
is_training=is_training, **kwargs)
def end_of_training_step_callback(self):
self.normalizer.finish_update()
# The methods below are only used by SAC policies.
def get_Q(self, prev_action, env_output, *args, **kwargs):
return self.policy.get_Q(
prev_action, self._norm_env_output(env_output), *args, **kwargs)
def get_V(self, prev_action, env_output, *args, **kwargs):
return self.policy.get_V(
prev_action, self._norm_env_output(env_output), *args, **kwargs)
def get_action_params(self, prev_action, env_output, *args, **kwargs):
return self.policy.get_action_params(
prev_action, self._norm_env_output(env_output), *args, **kwargs)
|
{
"content_hash": "b3d1756811bb5efca2f282cbccac5a0f",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 80,
"avg_line_length": 38.234693877551024,
"alnum_prop": 0.6453162530024019,
"repo_name": "google-research/seed_rl",
"id": "b7543a69ce9daca79cbc2ddb487a117366377c0a",
"size": "8089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/normalizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "41131"
},
{
"name": "Jupyter Notebook",
"bytes": "72883"
},
{
"name": "Python",
"bytes": "614110"
},
{
"name": "Shell",
"bytes": "31284"
},
{
"name": "Starlark",
"bytes": "932"
}
],
"symlink_target": ""
}
|
"""Config flow to configure forked-daapd devices."""
from contextlib import suppress
import logging
from pyforked_daapd import ForkedDaapdAPI
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
CONF_LIBRESPOT_JAVA_PORT,
CONF_MAX_PLAYLISTS,
CONF_TTS_PAUSE_TIME,
CONF_TTS_VOLUME,
DEFAULT_PORT,
DEFAULT_TTS_PAUSE_TIME,
DEFAULT_TTS_VOLUME,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
# Can't use all vol types: https://github.com/home-assistant/core/issues/32819
DATA_SCHEMA_DICT = {
vol.Required(CONF_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
vol.Optional(CONF_PASSWORD, default=""): str,
}
TEST_CONNECTION_ERROR_DICT = {
"forbidden": "forbidden",
"ok": "ok",
"websocket_not_enabled": "websocket_not_enabled",
"wrong_host_or_port": "wrong_host_or_port",
"wrong_password": "wrong_password",
"wrong_server_type": "wrong_server_type",
}
class ForkedDaapdOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a forked-daapd options flow."""
def __init__(self, config_entry):
"""Initialize."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="options", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_TTS_PAUSE_TIME,
default=self.config_entry.options.get(
CONF_TTS_PAUSE_TIME, DEFAULT_TTS_PAUSE_TIME
),
): float,
vol.Optional(
CONF_TTS_VOLUME,
default=self.config_entry.options.get(
CONF_TTS_VOLUME, DEFAULT_TTS_VOLUME
),
): float,
vol.Optional(
CONF_LIBRESPOT_JAVA_PORT,
default=self.config_entry.options.get(
CONF_LIBRESPOT_JAVA_PORT, 24879
),
): int,
vol.Optional(
CONF_MAX_PLAYLISTS,
default=self.config_entry.options.get(CONF_MAX_PLAYLISTS, 10),
): int,
}
),
)
def fill_in_schema_dict(some_input):
"""Fill in schema dict defaults from user_input."""
schema_dict = {}
for field, _type in DATA_SCHEMA_DICT.items():
if some_input.get(str(field)):
schema_dict[
vol.Optional(str(field), default=some_input[str(field)])
] = _type
else:
schema_dict[field] = _type
return schema_dict
class ForkedDaapdFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a forked-daapd config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize."""
self.discovery_schema = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Return options flow handler."""
return ForkedDaapdOptionsFlowHandler(config_entry)
async def validate_input(self, user_input):
"""Validate the user input."""
websession = async_get_clientsession(self.hass)
validate_result = await ForkedDaapdAPI.test_connection(
websession=websession,
host=user_input[CONF_HOST],
port=user_input[CONF_PORT],
password=user_input[CONF_PASSWORD],
)
validate_result[0] = TEST_CONNECTION_ERROR_DICT.get(
validate_result[0], "unknown_error"
)
return validate_result
async def async_step_user(self, user_input=None):
"""Handle a forked-daapd config flow start.
Manage device specific parameters.
"""
if user_input is not None:
# check for any entries with same host, abort if found
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) == user_input[CONF_HOST]:
return self.async_abort(reason="already_configured")
validate_result = await self.validate_input(user_input)
if validate_result[0] == "ok": # success
_LOGGER.debug("Connected successfully. Creating entry")
return self.async_create_entry(
title=validate_result[1], data=user_input
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(fill_in_schema_dict(user_input)),
errors={"base": validate_result[0]},
)
if self.discovery_schema: # stop at form to allow user to set up manually
return self.async_show_form(
step_id="user", data_schema=self.discovery_schema, errors={}
)
return self.async_show_form(
step_id="user", data_schema=vol.Schema(DATA_SCHEMA_DICT), errors={}
)
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered forked-daapd device."""
version_num = 0
if discovery_info.get("properties") and discovery_info["properties"].get(
"Machine Name"
):
with suppress(ValueError):
version_num = int(
discovery_info["properties"].get("mtd-version", "0").split(".")[0]
)
if version_num < 27:
return self.async_abort(reason="not_forked_daapd")
await self.async_set_unique_id(discovery_info["properties"]["Machine Name"])
self._abort_if_unique_id_configured()
# Update title and abort if we already have an entry for this host
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) != discovery_info["host"]:
continue
self.hass.config_entries.async_update_entry(
entry,
title=discovery_info["properties"]["Machine Name"],
)
return self.async_abort(reason="already_configured")
zeroconf_data = {
CONF_HOST: discovery_info["host"],
CONF_PORT: int(discovery_info["port"]),
CONF_NAME: discovery_info["properties"]["Machine Name"],
}
self.discovery_schema = vol.Schema(fill_in_schema_dict(zeroconf_data))
self.context.update({"title_placeholders": zeroconf_data})
return await self.async_step_user()
|
{
"content_hash": "871db8c63ab83b0c8185aafbd3f39fc7",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 86,
"avg_line_length": 36.58115183246073,
"alnum_prop": 0.5740661227994848,
"repo_name": "adrienbrault/home-assistant",
"id": "6bcc35f0a525a3807381ae31b9d00cdc7f3eba6f",
"size": "6987",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/forked_daapd/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
import os
import yaml
import unittest
from validator import Required, validate, InstanceOf
class TestAllYaml(unittest.TestCase):
def test_recursive_yaml(self):
yaml_files = []
for root, _, fnames in os.walk(os.getcwd()):
for fname in fnames:
if (fname.endswith('.yaml') or fname.endswith('.yml')):
yaml_files.append(os.path.join(root, fname))
for fullname in yaml_files:
with open(fullname, 'r') as yfile:
try:
yaml.safe_load(yfile)
except Exception as e:
msg = "File {name} is broken: {exc}"
self.fail(msg.format(name=fullname, exc=str(e)))
def test_student_yaml(self):
is_str = InstanceOf(type(""))
spec = {
'blog': [Required, is_str],
'feed': [Required, is_str],
'forges': [Required, InstanceOf(list)],
'hw': [Required, InstanceOf(dict)],
'irc': [Required, is_str],
'name': [Required, is_str],
'rit_dce': [Required, is_str],
# optional fields
'bio': [is_str],
'twitter': [is_str],
'coderwall': [is_str],
}
student_files = []
for root, _, fnames in os.walk(
os.path.join(os.getcwd(), "people")):
for fname in fnames:
if (fname.endswith('.yaml') or fname.endswith('.yml')):
student_files.append(os.path.join(root, fname))
for fullname in student_files:
with open(fullname, 'r') as student:
content = yaml.safe_load(student)
validity = validate(spec, content)
if not validity[0]:
out = ""
for k, v in validity[1].items():
out += ("File: {f} Key: {key} "
"{check}\n\n".format(key=k,
check=v,
f=fullname)
)
self.fail(out)
|
{
"content_hash": "84c4a464cf7e43206d006c1d3bb1346f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 71,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.4502304147465438,
"repo_name": "ritjoe/ofCourse",
"id": "061c9f140e7184f73c63c9c9a85b72de5744f4e1",
"size": "2170",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "ofcourse/tests/test_yaml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5941"
},
{
"name": "HTML",
"bytes": "56305"
},
{
"name": "Python",
"bytes": "34935"
}
],
"symlink_target": ""
}
|
"""Support for Blockchain.com sensors."""
from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Blockchain.com sensors."""
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return False
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(SensorEntity):
"""Representation of a Blockchain.com sensor."""
_attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
_attr_icon = ICON
_attr_native_unit_of_measurement = "BTC"
def __init__(self, name, addresses):
"""Initialize the sensor."""
self._attr_name = name
self.addresses = addresses
def update(self):
"""Get the latest state of the sensor."""
self._attr_native_value = get_balance(self.addresses)
|
{
"content_hash": "d748a05d13c5a52e278fa57cf4a9d1c9",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 73,
"avg_line_length": 27.85,
"alnum_prop": 0.698982645122681,
"repo_name": "Danielhiversen/home-assistant",
"id": "9d31d4c05838b015ac0654753a5d621021ed2b38",
"size": "1671",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/blockchain/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""
This module contains the go to line dialog.
"""
from pyqode.core._forms import dlg_goto_line_ui
from pyqode.qt import QtWidgets
class DlgGotoLine(QtWidgets.QDialog, dlg_goto_line_ui.Ui_Dialog):
"""
Goto line dialog.
"""
def __init__(self, parent, current_line, line_count):
QtWidgets.QDialog.__init__(self, parent)
dlg_goto_line_ui.Ui_Dialog.__init__(self)
self.setupUi(self)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.spinBox.setMaximum(line_count)
self.spinBox.setValue(current_line)
self.lblCurrentLine.setText("%d" % current_line)
self.lblLineCount.setText("%d" % line_count)
self.buttonBox.button(self.buttonBox.Ok).setText(_("Go"))
self.buttonBox.button(self.buttonBox.Cancel).setText(
"I'm going nowhere")
self.spinBox.setFocus()
@classmethod
def get_line(cls, parent, current_line, line_count):
"""
Gets user selected line.
:param parent: Parent widget
:param current_line: Current line number
:param line_count: Number of lines in the current text document.
:returns: tuple(line, status) status is False if the dialog has been
rejected.
"""
dlg = DlgGotoLine(parent, current_line + 1, line_count)
if dlg.exec_() == dlg.Accepted:
return dlg.spinBox.value() - 1, True
return current_line, False
|
{
"content_hash": "1f49e84980e09702fab5e4524f775fa8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 35.69047619047619,
"alnum_prop": 0.6350900600400267,
"repo_name": "tommo/gii",
"id": "c462d9177170ed3bd3faf8f84f852ea141774e33",
"size": "1499",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/3rdparty/common/pyqode/core/dialogs/goto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "398"
},
{
"name": "C",
"bytes": "1118982"
},
{
"name": "C++",
"bytes": "743466"
},
{
"name": "CSS",
"bytes": "5956"
},
{
"name": "HTML",
"bytes": "126233"
},
{
"name": "JavaScript",
"bytes": "129855"
},
{
"name": "Lua",
"bytes": "1290198"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Objective-C",
"bytes": "28896"
},
{
"name": "Objective-C++",
"bytes": "129214"
},
{
"name": "Python",
"bytes": "2676186"
},
{
"name": "Shell",
"bytes": "11215"
}
],
"symlink_target": ""
}
|
from convert_api_to_pypredef import Convert, Contains
try:
import unittest
except:
import sys
sys.stderr.write('--- PYTHONPATH FOUND:\n')
sys.stderr.write('\n'.join(sys.path))
sys.stderr.write('\n--- END PYTHONPATH\n')
raise
from StringIO import StringIO
#===================================================================================================
# Test
#===================================================================================================
class Test(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def Check(self, sub, full):
if not Contains(sub, full):
raise AssertionError('%s not in %s.' % (sub, full))
def testConvert(self):
import convert_api_to_pypredef
lines = [
"PyQt4.QtCore.QObject.disconnect?4(QObject, SIGNAL(), QObject, SLOT()) -> object",
"PyQt4.QtCore.QObject.connect?4(QObject, SIGNAL(), QObject, SLOT(), Qt.ConnectionType=Qt.AutoConnection) -> object",
"PyQt4.QtCore.QAbstractEventDispatcher.__init__?1(self, QObject parent=None)",
"PyQt4.QtCore.QByteArray.leftJustified?4(int width, char fill=' ', bool truncate=False) -> QByteArray"
]
cancel_monitor = convert_api_to_pypredef.CancelMonitor()
output_stream = StringIO()
Convert('test_passed_lines', 2, cancel_monitor, lines, output_stream=output_stream)
# print output_stream.getvalue()
self.Check("def disconnect(QObject, SIGNAL, QObject, SLOT):", output_stream.getvalue())
self.Check("def connect(QObject, SIGNAL, QObject, SLOT, Qt_ConnectionType=Qt.AutoConnection):", output_stream.getvalue())
self.Check("def leftJustified(width, fill=' ', truncate=False):", output_stream.getvalue())
self.Check("def __init__(self, parent=None):", output_stream.getvalue())
# api_file = r'C:\Documents and Settings\Fabio\Desktop\pydev_temp\PyQt4.api'
# parts_for_module = 2
# Convert(api_file, parts_for_module, cancel_monitor)
#===================================================================================================
# main
#===================================================================================================
if __name__ == '__main__':
suite = unittest.makeSuite(Test)
unittest.TextTestRunner(verbosity=1).run(suite)
|
{
"content_hash": "4b36f77918d4e0697d812ae366f60c37",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 129,
"avg_line_length": 43.3448275862069,
"alnum_prop": 0.5318217979315831,
"repo_name": "ArcherSys/ArcherSys",
"id": "af58671a49278263cbc6724c4de81ced437ce12b",
"size": "2588",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "eclipse/plugins/org.python.pydev.jython_4.5.5.201603221110/jysrc/tests/test_convert.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import setuptools
setuptools.setup(
name="nova_dirsvc_plugin",
version=1,
packages=['nova_dirsvc_plugin'],
entry_points={
'nova.hooks': [
'create_instance=nova_dirsvc_plugin.hooks:SaveToDirSvc',
'delete_instance=nova_dirsvc_plugin.hooks:DeleteFromDirSvc',
]
},
)
|
{
"content_hash": "0e657a4d222cb6b9b44a6abc04593673",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 24.923076923076923,
"alnum_prop": 0.6203703703703703,
"repo_name": "fultonj/nova_dirsvc_plugin",
"id": "f6460aed369b85ae3e15fb2225481a1b3d8d5909",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5583"
}
],
"symlink_target": ""
}
|
"""
Fuzz tests an object after the default construction to make sure it does not crash lldb.
"""
import lldb
def fuzz_obj(obj):
obj.Append(lldb.SBValue())
obj.GetSize()
obj.GetValueAtIndex(100)
obj.FindValueObjectByUID(200)
for val in obj:
s = str(val)
|
{
"content_hash": "2638c24e39206c9c8fb42c616be993e1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 88,
"avg_line_length": 20.285714285714285,
"alnum_prop": 0.6690140845070423,
"repo_name": "apple/swift-lldb",
"id": "f20c87752499e4d5bf4a9386b707ede49a706d7a",
"size": "284",
"binary": false,
"copies": "13",
"ref": "refs/heads/stable",
"path": "packages/Python/lldbsuite/test/python_api/default-constructor/sb_valuelist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "130449"
},
{
"name": "C",
"bytes": "198536"
},
{
"name": "C++",
"bytes": "27687071"
},
{
"name": "CMake",
"bytes": "172176"
},
{
"name": "DTrace",
"bytes": "334"
},
{
"name": "LLVM",
"bytes": "6106"
},
{
"name": "Makefile",
"bytes": "106804"
},
{
"name": "Objective-C",
"bytes": "106821"
},
{
"name": "Objective-C++",
"bytes": "25658"
},
{
"name": "Perl",
"bytes": "72175"
},
{
"name": "Python",
"bytes": "4680483"
},
{
"name": "Shell",
"bytes": "6573"
},
{
"name": "Swift",
"bytes": "260786"
},
{
"name": "Vim script",
"bytes": "8434"
}
],
"symlink_target": ""
}
|
import shutil
import os
import tempfile
import subprocess
import json
def run_test(code, test):
try:
# Create folder
tmp_dir = tempfile.mkdtemp()
print("Folder Created:",tmp_dir )
# Create the project
print("Creating Project", tmp_dir)
try:
out = subprocess.check_output(
['dotnet','new', 'xunit', '--output', tmp_dir ],
stderr=subprocess.STDOUT)
print("out:", out)
print("Project Created")
except subprocess.CalledProcessError as e:
print("out Exception:", e)
result = (json.dumps(
{'successes': [], 'failures': [], 'errors': e.output.decode('utf8').split('\n'), 'stdout': "",
'result': 'ProcessError'}), e.returncode)
return result
print("Project Created:")
listOfFiles = os.listdir(tmp_dir)
for entry in listOfFiles:
print(entry)
#Create the file
code = """using Xunit;
""" + code + test
tmp_script = open(os.path.join(tmp_dir, "UnitTest1.cs"), 'w')
tmp_script.write(code)
tmp_script.close()
result = [], 0
print("File Created")
# TEST
try:
out = subprocess.check_output(
['dotnet', 'test', tmp_dir, '--nologo'],
stderr=subprocess.STDOUT)
print("Pass TEST:", out)
result = (json.dumps({
'successes': out.decode('utf8').split("\n"),
'failures': [],
'errors': [],
'stdout': '',
'result': 'Success'
}), 0)
except subprocess.CalledProcessError as e:
print("Fail TEST:",e.output.decode('utf8'))
return (json.dumps({
'successes': [],
'failures': [e.output.decode('utf8')],
'errors': [],
'stdout': 'outer:' + str(e),
'result': "Failure"
}), 1)
finally:
shutil.rmtree(tmp_dir)
return result
except Exception as e:
return ["Error, could not evaluate"], e
def _result():
import xml.etree.ElementTree as ET
tree = ET.parse('TestResult.xml')
a = open('out.txt')
r = {
'successes': [e.attrib['description'] for e in tree.findall(".//test-case[@result='Success']")],
'failures': [e.attrib['description'] for e in tree.findall(".//test-case[@result='Failure']")],
'errors': [],
'stdout': a.read(),
'result': tree.findall("test-suite")[0].attrib['result']
}
return json.dumps(r)
if __name__ == "__main__":
code = """using System.IO;
using System;
public class Product
{
public int code;
public string desc;
public Product(int c, string d)
{
code=c;
desc=d;
}
public void Print()
{
Console.WriteLine("Producto {0}: {1}", code,desc);
}
}"""
test = """
public class ProductTest
{
[Fact]
public void Constructor()
{
Product p = new Product(1,"hola");
// Constraint Syntax
Assert.Equal(1,p.code);
}
[Fact]
public void PrintTest()
{
Product p = new Product(1,"hola");
p.Print();
using (StringWriter sw = new StringWriter())
{
Console.SetOut(sw);
p.Print();
string expected = "Producto 1: hola";
Assert.StartsWith(expected, sw.ToString());
}
}
}"""
print((run_test(code, test)))
|
{
"content_hash": "5b5f770edc0852cd3cad09855a52ac76",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 110,
"avg_line_length": 25.56756756756757,
"alnum_prop": 0.47436575052854124,
"repo_name": "mariosky/protoboard",
"id": "d9ac6c00d37606340d15f10fd3d49ec06e62d2db",
"size": "3808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker_compose_sandbox/sandbox/tester/test_csharp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7881"
},
{
"name": "Dockerfile",
"bytes": "611"
},
{
"name": "HTML",
"bytes": "322054"
},
{
"name": "JavaScript",
"bytes": "59836"
},
{
"name": "Mustache",
"bytes": "1985"
},
{
"name": "Python",
"bytes": "190989"
},
{
"name": "SCSS",
"bytes": "1634"
},
{
"name": "Shell",
"bytes": "482"
}
],
"symlink_target": ""
}
|
import imp
import os.path
import sys
import unittest
from mojom.generate import module as mojom
from mojom.generate import translate
from mojom.parse import ast
class TranslateTest(unittest.TestCase):
"""Tests |parser.Parse()|."""
def testSimpleArray(self):
"""Tests a simple int32[]."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("int32[]"), "a:i32")
def testAssociativeArray(self):
"""Tests a simple uint8{string}."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8{string}"), "m[s][u8]")
def testLeftToRightAssociativeArray(self):
"""Makes sure that parsing is done from right to left on the internal kinds
in the presence of an associative array."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8[]{string}"), "m[s][a:u8]")
def testTranslateSimpleUnions(self):
"""Makes sure that a simple union is translated correctly."""
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"SomeUnion", None,
ast.UnionBody([
ast.UnionField("a", None, None, "int32"),
ast.UnionField("b", None, None, "string")
]))
])
translation = translate.OrderedModule(tree, "mojom_tree", [])
self.assertEqual(1, len(translation.unions))
union = translation.unions[0]
self.assertTrue(isinstance(union, mojom.Union))
self.assertEqual("SomeUnion", union.mojom_name)
self.assertEqual(2, len(union.fields))
self.assertEqual("a", union.fields[0].mojom_name)
self.assertEqual(mojom.INT32.spec, union.fields[0].kind.spec)
self.assertEqual("b", union.fields[1].mojom_name)
self.assertEqual(mojom.STRING.spec, union.fields[1].kind.spec)
def testMapKindRaisesWithDuplicate(self):
"""Verifies _MapTreeForType() raises when passed two values with the same
name."""
methods = [
ast.Method('dup', None, None, ast.ParameterList(), None),
ast.Method('dup', None, None, ast.ParameterList(), None)
]
with self.assertRaises(Exception):
translate._ElemsOfType(methods, ast.Method, 'scope')
def testAssociatedKinds(self):
"""Tests type spec translation of associated interfaces and requests."""
# pylint: disable=W0212
self.assertEquals(
translate._MapKind("asso<SomeInterface>?"), "?asso:x:SomeInterface")
self.assertEquals(translate._MapKind("rca<SomeInterface>?"),
"?rca:x:SomeInterface")
def testSelfRecursiveUnions(self):
"""Verifies _UnionField() raises when a union is self-recursive."""
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union("SomeUnion", None,
ast.UnionBody([ast.UnionField("a", None, None, "SomeUnion")]))
])
with self.assertRaises(Exception):
translate.OrderedModule(tree, "mojom_tree", [])
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"SomeUnion", None,
ast.UnionBody([ast.UnionField("a", None, None, "SomeUnion?")]))
])
with self.assertRaises(Exception):
translate.OrderedModule(tree, "mojom_tree", [])
def testDuplicateAttributesException(self):
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"FakeUnion",
ast.AttributeList([
ast.Attribute("key1", "value"),
ast.Attribute("key1", "value")
]),
ast.UnionBody([
ast.UnionField("a", None, None, "int32"),
ast.UnionField("b", None, None, "string")
]))
])
with self.assertRaises(Exception):
translate.OrderedModule(tree, "mojom_tree", [])
|
{
"content_hash": "8e5f49e248970a79235eb4ee3289bfda",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 80,
"avg_line_length": 36.1078431372549,
"alnum_prop": 0.6326364376866684,
"repo_name": "nwjs/chromium.src",
"id": "4259374513f2bf7146e2f6ab6b6be958aa85f4ca",
"size": "3824",
"binary": false,
"copies": "8",
"ref": "refs/heads/nw70",
"path": "mojo/public/tools/mojom/mojom/generate/translate_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Generates YAML configuration files for distributed Tensorflow workers.
The workers will be run in a Kubernetes (k8s) container cluster.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
# Note: It is intentional that we do not import tensorflow in this script. The
# machine that launches a TensorFlow k8s cluster does not have to have the
# Python package of TensorFlow installed on it.
DEFAULT_DOCKER_IMAGE = 'tensorflow/tf_grpc_test_server'
DEFAULT_PORT = 2222
# TODO(cais): Consider adding resource requests/limits to the pods.
WORKER_RC = (
"""apiVersion: v1
kind: ReplicationController
metadata:
name: tf-worker{worker_id}
spec:
replicas: 1
template:
metadata:
labels:
tf-worker: "{worker_id}"
spec:
containers:
- name: tf-worker{worker_id}
image: {docker_image}
args:
- --cluster_spec={cluster_spec}
- --job_name=worker
- --task_id={worker_id}
ports:
- containerPort: {port}
""")
WORKER_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: tf-worker{worker_id}
labels:
tf-worker: "{worker_id}"
spec:
ports:
- port: {port}
targetPort: {port}
selector:
tf-worker: "{worker_id}"
""")
WORKER_LB_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: tf-worker{worker_id}
labels:
tf-worker: "{worker_id}"
spec:
type: LoadBalancer
ports:
- port: {port}
selector:
tf-worker: "{worker_id}"
""")
PARAM_SERVER_RC = (
"""apiVersion: v1
kind: ReplicationController
metadata:
name: tf-ps{param_server_id}
spec:
replicas: 1
template:
metadata:
labels:
tf-ps: "{param_server_id}"
spec:
containers:
- name: tf-ps{param_server_id}
image: {docker_image}
args:
- --cluster_spec={cluster_spec}
- --job_name=ps
- --task_id={param_server_id}
ports:
- containerPort: {port}
""")
PARAM_SERVER_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: tf-ps{param_server_id}
labels:
tf-ps: "{param_server_id}"
spec:
ports:
- port: {port}
selector:
tf-ps: "{param_server_id}"
""")
def main():
"""Do arg parsing."""
parser = argparse.ArgumentParser()
parser.add_argument('--num_workers',
type=int,
default=2,
help='How many worker pods to run')
parser.add_argument('--num_parameter_servers',
type=int,
default=1,
help='How many paramater server pods to run')
parser.add_argument('--grpc_port',
type=int,
default=DEFAULT_PORT,
help='GRPC server port (Default: %d)' % DEFAULT_PORT)
parser.add_argument('--request_load_balancer',
type=bool,
default=False,
help='To request worker0 to be exposed on a public IP '
'address via an external load balancer, enabling you to '
'run client processes from outside the cluster')
parser.add_argument('--docker_image',
type=str,
default=DEFAULT_DOCKER_IMAGE,
help='Override default docker image for the TensorFlow '
'GRPC server')
args = parser.parse_args()
if args.num_workers <= 0:
sys.stderr.write('--num_workers must be greater than 0; received %d\n'
% args.num_workers)
sys.exit(1)
if args.num_parameter_servers <= 0:
sys.stderr.write(
'--num_parameter_servers must be greater than 0; received %d\n'
% args.num_parameter_servers)
sys.exit(1)
# Generate contents of yaml config
yaml_config = GenerateConfig(args.num_workers,
args.num_parameter_servers,
args.grpc_port,
args.request_load_balancer,
args.docker_image)
print(yaml_config) # pylint: disable=superfluous-parens
def GenerateConfig(num_workers,
num_param_servers,
port,
request_load_balancer,
docker_image):
"""Generate configuration strings."""
config = ''
for worker in range(num_workers):
config += WORKER_RC.format(
port=port,
worker_id=worker,
docker_image=docker_image,
cluster_spec=WorkerClusterSpec(num_workers,
num_param_servers,
port))
config += '---\n'
if worker == 0 and request_load_balancer:
config += WORKER_LB_SVC.format(port=port,
worker_id=worker)
else:
config += WORKER_SVC.format(port=port,
worker_id=worker)
config += '---\n'
for param_server in range(num_param_servers):
config += PARAM_SERVER_RC.format(
port=port,
param_server_id=param_server,
docker_image=docker_image,
cluster_spec=ParamServerClusterSpec(num_workers,
num_param_servers,
port))
config += '---\n'
config += PARAM_SERVER_SVC.format(port=port,
param_server_id=param_server)
config += '---\n'
return config
def WorkerClusterSpec(num_workers,
num_param_servers,
port):
"""Generates worker cluster spec."""
return ClusterSpec(num_workers, num_param_servers, port)
def ParamServerClusterSpec(num_workers,
num_param_servers,
port):
"""Generates parameter server spec."""
return ClusterSpec(num_workers, num_param_servers, port)
def ClusterSpec(num_workers,
num_param_servers,
port):
"""Generates general cluster spec."""
spec = 'worker|'
for worker in range(num_workers):
spec += 'tf-worker%d:%d' % (worker, port)
if worker != num_workers-1:
spec += ';'
spec += ',ps|'
for param_server in range(num_param_servers):
spec += 'tf-ps%d:%d' % (param_server, port)
if param_server != num_param_servers-1:
spec += ';'
return spec
if __name__ == '__main__':
main()
|
{
"content_hash": "9e22938d62fa726d4fd1348aa35d2ce3",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 28.275109170305676,
"alnum_prop": 0.5536679536679536,
"repo_name": "panmari/tensorflow",
"id": "e3fde2180acce87403f9364da94da65d9f96cd44",
"size": "7171",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/tools/dist_test/scripts/k8s_tensorflow.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "153226"
},
{
"name": "C++",
"bytes": "7360924"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "683163"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "7188"
},
{
"name": "Jupyter Notebook",
"bytes": "1771416"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "102168"
},
{
"name": "Python",
"bytes": "4526515"
},
{
"name": "Shell",
"bytes": "117381"
},
{
"name": "TypeScript",
"bytes": "340911"
}
],
"symlink_target": ""
}
|
import random
import math
class Neuron(object):
def __init__(self, prev_n_neurons):
self._synapticWeights = []
for i in range(prev_n_neurons):
self._synapticWeights.append(random.random() - 0.5)
self.lamda = 1.5
self._activation = 0.0
def activate(self, inputs):
self._activation = 0.0
for i in range(len(inputs)):
self._activation += inputs[i] * _synapticWeights[i]
return 2.0 / (1.0 + float(math.exp(-_activation * lamda))) - 1.0
def getActivationDerivative(self):
expmlx = float(math.exp(lamda * _activation))
return 2 * lamda * expmlx / ((1 + expmlx) * (1 + expmlx))
def getSynapticWeights(self):
return self._synapticWeights
def getSynapticWeight(self, i):
return self._synapticWeights[i]
def setSynapticWeight(self, i, v):
self._synapticWeights[i] = v
class Layer(object):
def __init__(self, prev_n_neurons, n_neurons):
self._n_neurons = n_neurons + 1
self._prev_n_neurons = prev_n_neurons + 1
self._neurons = []
self._outputs = []
for i in range(self._n_neurons):
self._neurons.append(Neuron(self._prev_n_neurons))
@staticmethod
def add_bias(inp):
outp = [1.0]
for i in range(len(inp)):
outp.append(inp[i])
return outp
def evaluate(self, inp):
inputs = []
if len(inp) != len(_neurons[0]._synapticWeights):
inputs = add_bias(inp)
else:
inputs = inp
for i in range(_n_neurons):
if i > 0:
_outputs[i] = _neurons[i].activate(inputs)
_outputs[0] = 1.0
return _outputs
def size(self):
return _n_neurons
def getOutput(self, i):
return _outputs[i]
def getActivationDerivative(self, i):
return _neurons[i].getActivationDerivative()
def getWeight(self, i, j):
return self._neurons[i].getSynapticWeight(j)
def getWeights(self, i):
return self._neurons[i].getSynapticWeights()
def setWeight(self, i, j, v):
self._neurons[i].setSynapticWeight(j, v)
class Mlp(object):
def __init__(self, nn_neurons):
self._layers = []
for i in range(len(nn_neurons)):
self._layers.append(Layer(nn_neurons[i] if i == 0 else nn_neurons[i-1], nn_neurons[i]))
self._delta_w = []
for i in range(len(nn_neurons)):
self._delta_w.append([[None] * len(self._layers[i].getWeights(0))] * len(self._layers[i]))
self._grad_ex = []
for i in range(len(nn_neurons)):
self._grad_ex.append([None] * _layers[i].size())
def evaluate(self, inputs):
outputs = [None] * len(inputs)
for i in range(len(_layers)):
outputs = _layers[i].evaluate(inputs)
inputs = outputs
return outputs
def evaluateError(self, nn_output, desired_output):
d = []
if len(desired_output) != len(nn_output):
d = Layer.add_bias(desired_output)
else:
d = desired_output
e = 0.0
for i in range(len(nn_output)):
e += (nn_output[i] - d[i]) * (nn_output[i] - d[i])
return e
def evaluateQuadraticError(self, examples, results):
e = 0.0
for i in range(len(examples)):
e += evaluateError(evaluate(examples[i]), results[i])
return e
def evaluateGradients(self, results):
c = len(_layers) - 1
while c >= 0:
for i in range(len(_layers[c])):
if c == len(_layers) - 1:
_grad_ex[c][i] = 2 * (_layers[c].getOutput(i) - results[0]) * (_layers[c].getActivationDerivative(i))
else:
sum = 0.0
for k in range(len(_layers[c+1])):
if k > 0:
sum += _layers[c+1].getWeight(k, i) * _grad_ex[c+1][k]
_grad_ex[c][i] = _layers[c].getActivationDerivative(i) * sum
c = c - 1
def resetWeightsDelta(self):
for c in range(len(_layers)):
for i in range(len(_layers[c])):
weights = _layers[c].getWeights[i]
for j in range(len(weights)):
_delta_w[c][i][j] = 0
def evaluateWeightsDelta(self):
for c in range(len(_layers)):
if c > 0:
for i in range(len(_layers[c])):
weights = _layers[c].getWeights(i)
for j in range(len(weights)):
_delta_w[c][i][j] += _grad_ex[c][i] * _layers[c-1].getOutput(j)
def updateWeights(self, learning_rate):
for c in range(len(_layers)):
for i in range(len(_layers[c])):
weights = _layers[c].getWeights(i)
for j in range(len(weights)):
_layers[c].setWeight(i, j, _layers[c].getWeight(i, j) - (learning_rate * _delta_w[c][i][j]))
def batchBackPropagation(self, examples, results, learning_rate):
resetWeightsDelta()
for l in range(len(examples)):
evaluate(examples[l])
evaluateGradients(results[l])
evaluateWeightsDelta()
updateWeights(learning_rate)
def learn(self, examples, results, learning_rate):
e = 10000000000.0
while e > 0.001:
batchBackPropagation(examples, results, learning_rate)
e = evaluateQuadraticError(examples, results)
ex = [[-1,1], [1,1], [1,-1], [-1,-1]]
res = [1, -1, 1, -1]
nn = [len(ex[0]), len(ex[0]) * 3]
mlp = Mlp(nn)
for i in range(40000):
mlp.learn(ex, res, 0.3)
err = mlp.evaluateQuadraticError(ex, res)
print(i, "->error: ", err)
|
{
"content_hash": "336184eed7f88c6b9a07d0e99787978d",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 106,
"avg_line_length": 28.532544378698226,
"alnum_prop": 0.638946495230195,
"repo_name": "vigor95/vigor95.github.io",
"id": "800da8e6f8696272681916943219a03e3342b6d6",
"size": "4822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_posts/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20363"
},
{
"name": "HTML",
"bytes": "112738"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import numpy as np
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_cut():
"""
Python API test: h2o.frame.H2OFrame.cut(breaks, labels=None, include_lowest=False, right=True, dig_lab=3)[source]
"""
python_lists = np.random.uniform(-2,2, (100,1))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
breaks = [-2, 1, 0, 1, 2]
newframe = h2oframe.cut(breaks, labels=None, include_lowest=False, right=True, dig_lab=3)
assert_is_type(newframe, H2OFrame) # check return type as H2OFrame
# check returned frame content is correct
assert newframe.types["C1"]=="enum", "h2o.H2OFrame.cut() command is not working."
assert len(newframe.levels()) <= len(breaks), "h2o.H2OFrame.cut() command is not working."
pyunit_utils.standalone_test(h2o_H2OFrame_cut)
|
{
"content_hash": "127389e070976ec2d0feb0c212fb1444",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 117,
"avg_line_length": 39.833333333333336,
"alnum_prop": 0.7039748953974896,
"repo_name": "h2oai/h2o-3",
"id": "a08bc671a639f6572b000d00f16c9c51cddd8070",
"size": "956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_cut.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
}
|
"""
Library to make simple python functions as robot keywords
"""
import ipaddress
def get_ip_version(ip_string):
"""Get ip version as integer number"""
ip = ipaddress.ip_address(unicode(ip_string))
return ip.version
|
{
"content_hash": "b4809710ab8145464b1538930e85ad3e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 57,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.7130434782608696,
"repo_name": "rastislavszabo/vpp",
"id": "09f406d31723256a977351c0eaafbe58b0638a0b",
"size": "230",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/robot/libraries/help_py_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7753"
},
{
"name": "Dockerfile",
"bytes": "8549"
},
{
"name": "Go",
"bytes": "2219611"
},
{
"name": "HTML",
"bytes": "33270"
},
{
"name": "JavaScript",
"bytes": "1716"
},
{
"name": "Makefile",
"bytes": "12395"
},
{
"name": "Python",
"bytes": "4797"
},
{
"name": "RobotFramework",
"bytes": "141402"
},
{
"name": "Ruby",
"bytes": "13223"
},
{
"name": "Shell",
"bytes": "172620"
},
{
"name": "TypeScript",
"bytes": "221104"
}
],
"symlink_target": ""
}
|
import unittest
import warnings
import mock
from brew.exceptions import DataLoaderException
from brew.exceptions import GrainException
from brew.exceptions import HopException
from brew.exceptions import YeastException
from brew.parsers import DataLoader
from brew.parsers import JSONDataLoader
from brew.parsers import parse_cereals
from brew.parsers import parse_hops
from brew.parsers import parse_recipe
from brew.parsers import parse_yeast
from brew.recipes import Recipe
from fixtures import cascade_add
from fixtures import pale_add
from fixtures import yeast
class CerealsLoader(DataLoader):
def get_item(self, dir_suffix, item_name):
grain_add = pale_add.to_dict()
grain_add.update(grain_add.pop(u"data"))
return grain_add
class HopsLoader(DataLoader):
def get_item(self, dir_suffix, item_name):
hop_add = cascade_add.to_dict()
hop_add.update(hop_add.pop(u"data"))
return hop_add
class YeastLoader(DataLoader):
def get_item(self, dir_suffix, item_name):
yst = yeast.to_dict()
yst.update(yst.pop(u"data"))
return yst
class TestDataLoader(unittest.TestCase):
def setUp(self):
self.loader = DataLoader("./")
self.loader.DATA = {}
self.loader.EXT = "json"
def test_data_dir_does_not_exist(self):
with self.assertRaises(DataLoaderException) as ctx:
DataLoader("./baddirectory")
self.assertEquals(
str(ctx.exception), u"Directory './baddirectory' does not exist"
)
def test_read_data_raises(self):
with self.assertRaises(NotImplementedError):
self.loader.read_data("filename")
@mock.patch("glob.glob")
def test_get_item(self, mock_glob):
def read_data(item_filename):
return "data"
self.loader.read_data = read_data
mock_glob.return_value = ["cereals/crystal_20.json"]
out = self.loader.get_item("/", "crystal 20")
expected = "data"
self.assertEquals(out, expected)
@mock.patch("glob.glob")
def test_get_item_dir_does_not_exist(self, mock_glob):
with self.assertRaises(DataLoaderException) as ctx:
self.loader.get_item("baditemdir/", "crystal 20")
self.assertEquals(
str(ctx.exception), u"Item directory './baditemdir/' does not exist"
)
@mock.patch("glob.glob")
def test_get_item_warns(self, mock_glob):
def read_data(item_filename):
return "data"
self.loader.read_data = read_data
mock_glob.return_value = ["cereals/crystal_40.json"]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.loader.get_item("/", "crystal 20")
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, Warning))
self.assertTrue("dir not found" in str(w[-1].message))
class TestJSONDataLoader(unittest.TestCase):
def setUp(self):
self.loader = JSONDataLoader("./")
def test_format_name(self):
name_list = [
(u"pale malt 2-row us", u"pale_malt_2_row_us"),
(u"caramel crystal malt 20l", u"caramel_crystal_malt_20l"), # noqa
(u"centennial", u"centennial"),
(u"cascade us", u"cascade_us"),
(u"Wyeast 1056", u"wyeast_1056"),
]
for name, expected in name_list:
out = self.loader.format_name(name)
self.assertEquals(out, expected)
class TestCerealParser(unittest.TestCase):
def setUp(self):
self.grain_add = pale_add.to_dict()
self.loader = CerealsLoader("./")
def test_parse_cereals(self):
out = parse_cereals(self.grain_add, self.loader)
self.assertEquals(out, pale_add)
def test_parse_cereals_loader_returns_no_data(self):
def get_item(dir_suffix, item_name):
return {}
self.loader.get_item = get_item
out = parse_cereals(self.grain_add, self.loader)
self.assertEquals(out, pale_add)
def test_parse_cereals_no_color(self):
grain_add = pale_add.to_dict()
grain_add[u"data"].pop(u"color")
grain_add.update(grain_add.pop(u"data"))
out = parse_cereals(grain_add, self.loader)
self.assertEquals(out, pale_add)
def test_parse_cereals_no_color_data(self):
class Loader(DataLoader):
def get_item(self, dir_suffix, item_name):
grain_add = pale_add.to_dict()
grain_add[u"data"].pop(u"color")
grain_add.update(grain_add.pop(u"data"))
return grain_add
grain_add = pale_add.to_dict()
grain_add[u"data"].pop(u"color")
grain_add.update(grain_add.pop(u"data"))
with self.assertRaises(GrainException) as ctx:
parse_cereals(grain_add, Loader("./"))
self.assertEquals(str(ctx.exception), u"pale 2-row: Must provide color value")
def test_parse_cereals_no_ppg(self):
grain_add = pale_add.to_dict()
grain_add[u"data"].pop(u"ppg")
grain_add.update(grain_add.pop(u"data"))
out = parse_cereals(grain_add, self.loader)
self.assertEquals(out, pale_add)
def test_parse_cereals_no_ppg_data(self):
class Loader(DataLoader):
def get_item(self, dir_suffix, item_name):
grain_add = pale_add.to_dict()
grain_add[u"data"].pop(u"ppg")
grain_add.update(grain_add.pop(u"data"))
return grain_add
grain_add = pale_add.to_dict()
grain_add[u"data"].pop(u"ppg")
grain_add.update(grain_add.pop(u"data"))
with self.assertRaises(GrainException) as ctx:
parse_cereals(grain_add, Loader("./"))
self.assertEquals(str(ctx.exception), u"pale 2-row: Must provide ppg or hwe")
class TestHopsParser(unittest.TestCase):
def setUp(self):
self.hop_add = cascade_add.to_dict()
self.loader = HopsLoader("./")
def test_parse_hops(self):
out = parse_hops(self.hop_add, self.loader)
self.assertEquals(out, cascade_add)
def test_parse_hops_loader_returns_no_data(self):
def get_item(dir_suffix, item_name):
return {}
self.loader.get_item = get_item
out = parse_hops(self.hop_add, self.loader)
self.assertEquals(out, cascade_add)
def test_parse_hops_no_percent_alpha_acids(self):
hop_add = cascade_add.to_dict()
hop_add[u"data"].pop(u"percent_alpha_acids")
hop_add.update(hop_add.pop(u"data"))
out = parse_hops(hop_add, self.loader)
self.assertEquals(out, cascade_add)
def test_parse_hops_no_percent_alpha_acids_data(self):
class Loader(DataLoader):
def get_item(self, dir_suffix, item_name):
hop_add = cascade_add.to_dict()
hop_add[u"data"].pop(u"percent_alpha_acids")
hop_add.update(hop_add.pop(u"data"))
return hop_add
hop_add = cascade_add.to_dict()
hop_add[u"data"].pop(u"percent_alpha_acids")
hop_add.update(hop_add.pop(u"data"))
with self.assertRaises(HopException) as ctx:
parse_hops(hop_add, Loader("./"))
self.assertEquals(
str(ctx.exception), u"cascade: Must provide percent alpha acids"
)
class TestYeastParser(unittest.TestCase):
def setUp(self):
self.yeast = yeast.to_dict()
self.loader = YeastLoader("./")
def test_parse_yeast(self):
out = parse_yeast(self.yeast, self.loader)
self.assertEquals(out, yeast)
def test_parse_yeast_loader_returns_no_data(self):
def get_item(dir_suffix, item_name):
return {}
self.loader.get_item = get_item
out = parse_yeast(self.yeast, self.loader)
self.assertEquals(out, yeast)
def test_parse_yeast_no_percent_attenuation(self):
yst = yeast.to_dict()
yst[u"data"].pop(u"percent_attenuation")
yst.update(yst.pop(u"data"))
out = parse_yeast(yst, self.loader)
self.assertEquals(out, yeast)
def test_parse_yeast_no_percent_attenuation_data(self):
class Loader(DataLoader):
def get_item(self, dir_suffix, item_name):
yst = yeast.to_dict()
yst[u"data"].pop(u"percent_attenuation")
yst.update(yst.pop(u"data"))
return yst
yst = yeast.to_dict()
yst[u"data"].pop(u"percent_attenuation")
yst.update(yst.pop(u"data"))
with self.assertRaises(YeastException) as ctx:
parse_yeast(yst, Loader("./"))
self.assertEquals(
str(ctx.exception), u"Wyeast 1056: Must provide percent attenuation"
)
class TestRecipeParser(unittest.TestCase):
def setUp(self):
# A special recipe is needed since the loaders only return
# pre-chosen additions
self.recipe = Recipe(
name=u"pale ale",
grain_additions=[pale_add, pale_add],
hop_additions=[cascade_add, cascade_add],
yeast=yeast,
brew_house_yield=0.70, # %
start_volume=7.0, # G
final_volume=5.0, # G
)
self.recipe_data = self.recipe.to_dict()
self.cereals_loader = CerealsLoader("./")
self.hops_loader = HopsLoader("./")
self.yeast_loader = YeastLoader("./")
def test_parse_recipe(self):
out = parse_recipe(
self.recipe_data,
None,
cereals_loader=self.cereals_loader,
hops_loader=self.hops_loader,
yeast_loader=self.yeast_loader,
)
self.assertEquals(out, self.recipe)
def test_parse_recipe_default_loader(self):
out = parse_recipe(
self.recipe_data,
DataLoader("./"),
cereals_dir_suffix="/",
hops_dir_suffix="/",
yeast_dir_suffix="/",
)
self.assertEquals(out, self.recipe)
|
{
"content_hash": "4e3e87e0d1ef1cd7de54b61139198381",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 86,
"avg_line_length": 34.48972602739726,
"alnum_prop": 0.6021249131168702,
"repo_name": "chrisgilmerproj/brewday",
"id": "f2ebff6033f10fe2eda20dbf5901bd076504394a",
"size": "10095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1528"
},
{
"name": "Python",
"bytes": "313918"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
}
|
import os
from setuptools import find_packages
from setuptools import setup
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
# NOTE: This is duplicated throughout and we should try to
# consolidate.
SETUP_BASE = {
'author': 'Google Cloud Platform',
'author_email': 'jjg+google-cloud-python@google.com',
'scripts': [],
'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
'license': 'Apache 2.0',
'platforms': 'Posix; MacOS X; Windows',
'include_package_data': True,
'zip_safe': False,
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
}
REQUIREMENTS = [
'google-cloud-core >= 0.22.1, < 0.23dev',
]
setup(
name='google-cloud-bigquery',
version='0.22.1',
description='Python Client for Google BigQuery',
long_description=README,
namespace_packages=[
'google',
'google.cloud',
],
packages=find_packages(),
install_requires=REQUIREMENTS,
**SETUP_BASE
)
|
{
"content_hash": "8dd4831ccebd8fb1b7f60b05f14851cf",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 72,
"avg_line_length": 27.98148148148148,
"alnum_prop": 0.6194573130377233,
"repo_name": "Fkawala/gcloud-python",
"id": "98aea8dbe15b0ac807d0369380139b14dea6f2b3",
"size": "2087",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bigquery/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "89702"
},
{
"name": "Python",
"bytes": "3403274"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import requests
import json
import wget
import sys
import os
__author__ = "Vesselin Bontchev <vbontchev@yahoo.com>"
__license__ = "GPL"
__VERSION__ = "1.02"
def error(e):
print("Error: %s." % e, file=sys.stderr)
sys.exit(-1)
def makeOutputDir(pageNum):
outputDir = str(pageNum).zfill(3)
try:
if (not os.path.exists(outputDir)):
os.mkdir(outputDir)
except Exception as e:
error(e)
return outputDir
def downloadTheFiles(jsonData, hashes, elementsPerDir):
seen = set()
i = 0
paginate = False
outputDir = ""
elementNum = 1
if ((elementsPerDir > 0) and (len(jsonData) > elementsPerDir)):
paginate = True
pageNum = 1
outputDir = makeOutputDir(pageNum)
for element in jsonData:
url = element["url"]
ext = element["ext"]
hash = element["md5"].upper()
if (hash in hashes and not hash in seen):
seen.add(hash)
i += 1
fileName = hash + "." + ext
if (paginate):
if (elementNum > elementsPerDir):
elementNum = 1
pageNum += 1
outputDir = makeOutputDir(pageNum)
fileName = os.path.join(outputDir, fileName)
print("[%d] %s -> %s" % (i, url, fileName), file=sys.stderr)
try:
outputFile = wget.download(url, out=fileName)
except Exception as e:
error(e)
print("")
elementNum += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(version="%(prog)s version " + __VERSION__,
description="Downloads suspected malware from Wikileaks.")
parser.add_argument("-e", "--elements", type=int, help="elements per page")
parser.add_argument("jsonfile", help="JSON data file")
parser.add_argument("notfoundhashes", help="file with MD5 hashes of unknown files")
args = parser.parse_args()
elements = args.elements
if (elements < 1):
elements = 0
try:
with open(args.jsonfile, "r") as contentFile:
content = contentFile.read()
jsonData = json.loads(content)
with open(args.notfoundhashes, "r") as hashFile:
hashes = [line.split()[0].upper() for line in hashFile]
except Exception as e:
error(e)
downloadTheFiles(jsonData, hashes, elements)
sys.exit(0)
|
{
"content_hash": "0cb606778e20f9e2dde92def6671d3eb",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 87,
"avg_line_length": 32,
"alnum_prop": 0.5775162337662337,
"repo_name": "bontchev/wlscrape",
"id": "c48fc243c7945d357c40eb7041023119119c8563",
"size": "2487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/getnotfound.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6175"
}
],
"symlink_target": ""
}
|
import re
import string
from csmpe.plugins import CSMPlugin
from csmpe.core_plugins.csm_install_operations.ios_xe.utils import number_of_rsp
class Plugin(CSMPlugin):
"""
This plugin checks if the node redundancy is sufficient to proceed the upgrade.
"""
name = "Node Redundancy Check Plugin"
platforms = {'ASR900'}
phases = {'Pre-Upgrade', 'Post-Upgrade'}
def run(self):
"""
PAN-5201-ASR903#show redundancy states
my state = 13 -ACTIVE
peer state = 8 -STANDBY HOT
Mode = Duplex
Unit = Secondary
Unit ID = 49
Redundancy Mode (Operational) = sso
Redundancy Mode (Configured) = sso
Redundancy State = sso
Maintenance Mode = Disabled
Manual Swact = enabled
Communications = Up
client count = 107
client_notification_TMR = 30000 milliseconds
RF debug mask = 0x0
"""
sub_platforms = ['ASR-903', 'ASR-907']
rsp_count = 1
if self.ctx._connection.platform not in sub_platforms:
self.ctx.info("Node redundancy not supported by "
"{}".format(self.ctx._connection.platform))
return
rsp_count = number_of_rsp(self.ctx)
if rsp_count != 2:
self.ctx.info("Node redundancy not supported by "
" number of RSP: {}".format(rsp_count))
return
cmd = "show redundancy states"
output = self.ctx.send(cmd)
if not output:
self.ctx.error("Show redundancy output is insufficient.")
return
sso_ready = 0
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
m = re.search('my state = .* -(.*)', line)
if m:
state = m.group(1)
if 'ACTIVE' in state:
sso_ready = sso_ready | 1
self.ctx.info('{}'.format(line.lstrip()))
continue
m = re.search('peer state = .* -(.*)', line)
if m:
state = m.group(1)
if 'STANDBY HOT' in state:
sso_ready = sso_ready | 2
self.ctx.info('{}'.format(line.lstrip()))
continue
m = re.search('Redundancy Mode \(Operational\)\s+= (.*)', line)
if m:
state = m.group(1)
if 'sso' in state:
sso_ready = sso_ready | 4
self.ctx.info('{}'.format(line.lstrip()))
continue
m = re.search('Redundancy Mode \(Configured\)\s+= (.*)', line)
if m:
state = m.group(1)
if 'sso' in state:
sso_ready = sso_ready | 8
self.ctx.info('{}'.format(line.lstrip()))
continue
m = re.search('Redundancy State \s+ = (.*)', line)
if m:
state = m.group(1)
if 'sso' in state:
sso_ready = sso_ready | 16
self.ctx.info('{}'.format(line.lstrip()))
continue
self.ctx.info("sso_ready = {}".format(sso_ready))
if sso_ready == 31:
self.ctx.info("Router redundancy has reached SSO state.")
else:
self.ctx.warning("Router redundancy has not reached SSO state.")
|
{
"content_hash": "b7783c1e39acbbe9b19abc8df59fe146",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 83,
"avg_line_length": 32.77358490566038,
"alnum_prop": 0.4925158318940702,
"repo_name": "anushreejangid/csmpe-main",
"id": "7fd428f023372e4122ff56a73f7ece211faf81db",
"size": "4973",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "csmpe/core_plugins/csm_redundancy_check/ios_xe/plugin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "451672"
},
{
"name": "Shell",
"bytes": "699"
}
],
"symlink_target": ""
}
|
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class EmergencyContactsPlugin(WillPlugin):
@respond_to("^set my contact info to (?P<contact_info>.*)", multiline=True)
def set_my_info(self, message, contact_info=""):
"""set my contact info to ____: Set your emergency contact info."""
contacts = self.load("contact_info", {})
contacts[message.sender.handle] = {
"info": contact_info,
"name": message.sender.name,
}
self.save("contact_info", contacts)
self.say("Got it.", message=message)
@respond_to("^contact info$")
def respond_to_contact_info(self, message):
"""contact info: Show everyone's emergency contact info."""
contacts = self.load("contact_info", {})
context = {
"contacts": contacts,
}
contact_html = rendered_template("contact_info.html", context)
self.say(contact_html, message=message)
|
{
"content_hash": "bcb6e0289bf86e8fe09fdefc752ffc80",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 108,
"avg_line_length": 40.34615384615385,
"alnum_prop": 0.6291706387035272,
"repo_name": "woohgit/will",
"id": "d890c61d2b1c88d8a9eec277d52d4f4e6ca20d84",
"size": "1049",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "will/plugins/devops/emergency_contacts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2008"
},
{
"name": "Python",
"bytes": "312884"
},
{
"name": "Shell",
"bytes": "1940"
}
],
"symlink_target": ""
}
|
import os
import sys
import tempfile
import time
import numpy as np
import pytest
import ray
import ray._private.utils
import ray.ray_constants as ray_constants
from ray.exceptions import RayTaskError
from ray.test_utils import (wait_for_condition, SignalActor, init_error_pubsub,
get_error_message)
def test_unhandled_errors(ray_start_regular):
@ray.remote
def f():
raise ValueError()
@ray.remote
class Actor:
def f(self):
raise ValueError()
a = Actor.remote()
num_exceptions = 0
def interceptor(e):
nonlocal num_exceptions
num_exceptions += 1
# Test we report unhandled exceptions.
ray.worker._unhandled_error_handler = interceptor
x1 = f.remote()
x2 = a.f.remote()
del x1
del x2
wait_for_condition(lambda: num_exceptions == 2)
# Test we don't report handled exceptions.
x1 = f.remote()
x2 = a.f.remote()
with pytest.raises(ray.exceptions.RayError) as err: # noqa
ray.get([x1, x2])
del x1
del x2
time.sleep(1)
assert num_exceptions == 2, num_exceptions
# Test suppression with env var works.
try:
os.environ["RAY_IGNORE_UNHANDLED_ERRORS"] = "1"
x1 = f.remote()
del x1
time.sleep(1)
assert num_exceptions == 2, num_exceptions
finally:
del os.environ["RAY_IGNORE_UNHANDLED_ERRORS"]
def test_push_error_to_driver_through_redis(ray_start_regular, error_pubsub):
address_info = ray_start_regular
address = address_info["redis_address"]
redis_client = ray._private.services.create_redis_client(
address, password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
error_message = "Test error message"
ray._private.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_AGENT_DIED_ERROR, error_message)
errors = get_error_message(error_pubsub, 1,
ray_constants.DASHBOARD_AGENT_DIED_ERROR)
assert errors[0].type == ray_constants.DASHBOARD_AGENT_DIED_ERROR
assert errors[0].error_message == error_message
def test_get_throws_quickly_when_found_exception(ray_start_regular):
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, signal):
ray.get(signal.wait.remote())
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
signal1 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func1.remote(),
actor.slow_func.remote(signal1)], ray.exceptions.RayTaskError)
ray.get(signal1.send.remote())
signal2 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func2.remote(),
actor.slow_func.remote(signal2)], ray.exceptions.RayActorError)
ray.get(signal2.send.remote())
def test_failed_function_to_run(ray_start_2_cpus, error_pubsub):
p = error_pubsub
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
# Check that the error message is in the task info.
errors = get_error_message(p, 2, ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert errors[0].type == ray_constants.FUNCTION_TO_RUN_PUSH_ERROR
assert "Function to run failed." in errors[0].error_message
assert "Function to run failed." in errors[1].error_message
def test_fail_importing_actor(ray_start_regular, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
errors = get_error_message(p, 2)
assert len(errors) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
errors = get_error_message(p, 2)
assert len(errors) == 2
for error in errors:
# Wait for the error to arrive.
if error.type == ray_constants.REGISTER_ACTOR_PUSH_ERROR:
assert "No module named" in error.error_message
else:
# Wait for the error from when the __init__ tries to run.
assert ("failed to be imported, and so cannot execute this method"
in error.error_message)
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular, error_pubsub):
p = error_pubsub
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
# Incoming methods will get the exception in creation task
with pytest.raises(ray.exceptions.RayActorError) as e:
ray.get(a.fail_method.remote())
assert error_message1 in str(e.value)
def test_failed_actor_method(ray_start_regular, error_pubsub):
p = error_pubsub
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message2 in errors[0].error_message
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_calls=2)
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
worker = ray.worker.global_worker
worker.function_actor_manager.increase_task_counter = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
errors = get_error_message(p, 1, ray_constants.WORKER_CRASH_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_CRASH_PUSH_ERROR
def test_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(f.remote())
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
assert "died or was killed while executing" in errors[0].error_message
def test_actor_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_future_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular,
error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def __init__(self):
# This log is added to debug a flaky test issue.
print(os.getpid())
def ping(self):
pass
a = Actor.remote()
# Without this waiting, there seems to be race condition happening
# in the CI. This is not a fundamental fix for that, but it at least
# makes the test less flaky.
ray.get(a.ping.remote())
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
errors = get_error_message(p, 1)
assert len(errors) == 0, "Should not have propogated an error - {}".format(
errors)
def test_exception_chain(ray_start_regular):
@ray.remote
def bar():
return 1 / 0
@ray.remote
def foo():
return ray.get(bar.remote())
r = foo.remote()
try:
ray.get(r)
except ZeroDivisionError as ex:
assert isinstance(ex, RayTaskError)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory, error_pubsub):
p = error_pubsub
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
errors = get_error_message(p, 1,
ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
# get_error_message(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("Publish happeds before we subscribe it")
def test_version_mismatch(error_pubsub, shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
p = error_pubsub
errors = get_error_message(p, 1, ray_constants.VERSION_MISMATCH_PUSH_ERROR)
assert False, errors
assert len(errors) == 1
assert errors[0].type == ray_constants.VERSION_MISMATCH_PUSH_ERROR
# Reset the version.
ray.__version__ = ray_version
def test_export_large_objects(ray_start_regular, error_pubsub):
p = error_pubsub
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
def test_warning_all_tasks_blocked(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(3)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_actor_waiting_on_actor(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Actor:
pass
a = Actor.remote() # noqa
b = Actor.remote() # noqa
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_task_waiting_on_actor(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Actor:
pass
a = Actor.remote() # noqa
@ray.remote(num_cpus=1)
def f():
print("f running")
time.sleep(999)
ids = [f.remote()] # noqa
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
{
"content_hash": "7c4737538edde7838dc8feb376e1066f",
"timestamp": "",
"source": "github",
"line_count": 622,
"max_line_length": 79,
"avg_line_length": 30.726688102893892,
"alnum_prop": 0.6381331100879029,
"repo_name": "pcmoritz/ray-1",
"id": "e0407a728994dafeae086ca89720b7e04a3e3611",
"size": "19112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tests/test_failure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Hashable,
Literal,
Mapping,
Sequence,
cast,
overload,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import lib
from pandas._libs.tslibs import (
Period,
Tick,
Timestamp,
to_offset,
)
from pandas._typing import (
Axis,
CompressionOptions,
Dtype,
DtypeArg,
DtypeObj,
FilePathOrBuffer,
FrameOrSeries,
IndexKeyFunc,
IndexLabel,
JSONSerializable,
Level,
Manager,
RandomState,
Renamer,
StorageOptions,
T,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
final,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import (
AbstractMethodError,
InvalidIndexError,
)
from pandas.util._decorators import (
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_ascending,
validate_bool_kwarg,
validate_fillna_kwargs,
)
from pandas.core.dtypes.common import (
ensure_object,
ensure_platform_int,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
arraylike,
indexing,
missing,
nanops,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.construction import (
create_series_with_explicit_dtype,
extract_array,
)
from pandas.core.describe import describe_ndframe
from pandas.core.flags import Flags
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
MultiIndex,
PeriodIndex,
RangeIndex,
ensure_index,
)
from pandas.core.internals import (
ArrayManager,
BlockManager,
SingleArrayManager,
)
from pandas.core.internals.construction import mgr_to_mgr
from pandas.core.missing import find_valid_index
from pandas.core.ops import align_method_FRAME
from pandas.core.reshape.concat import concat
import pandas.core.sample as sample
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import get_indexer_indexer
from pandas.core.window import (
Expanding,
ExponentialMovingWindow,
Rolling,
Window,
)
from pandas.io.formats import format as fmt
from pandas.io.formats.format import (
DataFrameFormatter,
DataFrameRenderer,
)
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
from pandas._libs.tslibs import BaseOffset
from pandas.core.frame import DataFrame
from pandas.core.resample import Resampler
from pandas.core.series import Series
from pandas.core.window.indexers import BaseIndexer
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = {**_shared_docs}
_shared_doc_kwargs = {
"axes": "keywords for axes",
"klass": "Series/DataFrame",
"axes_single_arg": "int or labels for object",
"args_transpose": "axes to permute (int or label for object)",
"inplace": """
inplace : bool, default False
If True, performs operation inplace and returns None.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: list[str] = [
"_mgr",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
"_flags",
]
_internal_names_set: set[str] = set(_internal_names)
_accessors: set[str] = set()
_hidden_attrs: frozenset[str] = frozenset(
["_AXIS_NAMES", "_AXIS_NUMBERS", "get_values", "tshift"]
)
_metadata: list[str] = []
_is_copy: weakref.ReferenceType[NDFrame] | None = None
_mgr: Manager
_attrs: dict[Hashable, Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: Manager,
copy: bool_t = False,
attrs: Mapping[Hashable, Any] | None = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_mgr", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True))
@classmethod
def _init_mgr(
cls,
mgr: Manager,
axes,
dtype: Dtype | None = None,
copy: bool_t = False,
) -> Manager:
"""passed a manager and a axes dict"""
for a, axe in axes.items():
if axe is not None:
axe = ensure_index(axe)
bm_axis = cls._get_block_manager_axis(a)
mgr = mgr.reindex_axis(axe, axis=bm_axis)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if (
isinstance(mgr, BlockManager)
and len(mgr.blocks) == 1
and is_dtype_equal(mgr.blocks[0].values.dtype, dtype)
):
pass
else:
mgr = mgr.astype(dtype=dtype)
return mgr
@classmethod
def _from_mgr(cls, mgr: Manager):
"""
Fastpath to create a new DataFrame/Series from just a BlockManager/ArrayManager.
Notes
-----
Skips setting `_flags` attribute; caller is responsible for doing so.
"""
obj = cls.__new__(cls)
object.__setattr__(obj, "_is_copy", None)
object.__setattr__(obj, "_mgr", mgr)
object.__setattr__(obj, "_item_cache", {})
object.__setattr__(obj, "_attrs", {})
return obj
def _as_manager(
self: FrameOrSeries, typ: str, copy: bool_t = True
) -> FrameOrSeries:
"""
Private helper function to create a DataFrame with specific manager.
Parameters
----------
typ : {"block", "array"}
copy : bool, default True
Only controls whether the conversion from Block->ArrayManager
copies the 1D arrays (to ensure proper/contiguous memory layout).
Returns
-------
DataFrame
New DataFrame using specified manager type. Is not guaranteed
to be a copy or not.
"""
new_mgr: Manager
new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy)
# fastpath of passing a manager doesn't check the option/manager class
return self._constructor(new_mgr).__finalize__(self)
# ----------------------------------------------------------------------
# attrs and flags
@property
def attrs(self) -> dict[Hashable, Any]:
"""
Dictionary of global attributes of this dataset.
.. warning::
attrs is experimental and may change without warning.
See Also
--------
DataFrame.flags : Global flags applying to this object.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@final
@property
def flags(self) -> Flags:
"""
Get the properties associated with this pandas object.
The available flags are
* :attr:`Flags.allows_duplicate_labels`
See Also
--------
Flags : Flags that apply to pandas objects.
DataFrame.attrs : Global metadata applying to this dataset.
Notes
-----
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags
<Flags(allows_duplicate_labels=True)>
Flags can be get or set using ``.``
>>> df.flags.allows_duplicate_labels
True
>>> df.flags.allows_duplicate_labels = False
Or by slicing with a key
>>> df.flags["allows_duplicate_labels"]
False
>>> df.flags["allows_duplicate_labels"] = True
"""
return self._flags
@final
def set_flags(
self: FrameOrSeries,
*,
copy: bool_t = False,
allows_duplicate_labels: bool_t | None = None,
) -> FrameOrSeries:
"""
Return a new object with updated flags.
Parameters
----------
allows_duplicate_labels : bool, optional
Whether the returned object allows duplicate labels.
Returns
-------
Series or DataFrame
The same type as the caller.
See Also
--------
DataFrame.attrs : Global metadata applying to this dataset.
DataFrame.flags : Global flags applying to this object.
Notes
-----
This method returns a new object that's a view on the same data
as the input. Mutating the input or the output values will be reflected
in the other.
This method is intended to be used in method chains.
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags.allows_duplicate_labels
True
>>> df2 = df.set_flags(allows_duplicate_labels=False)
>>> df2.flags.allows_duplicate_labels
False
"""
df = self.copy(deep=copy)
if allows_duplicate_labels is not None:
df.flags["allows_duplicate_labels"] = allows_duplicate_labels
return df
@final
@classmethod
def _validate_dtype(cls, dtype) -> DtypeObj | None:
"""validate the passed dtype"""
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
# ----------------------------------------------------------------------
# Internals
@final
@property
def _data(self):
# GH#33054 retained because some downstream packages uses this,
# e.g. fastparquet
return self._mgr
# ----------------------------------------------------------------------
# Axis
_stat_axis_number = 0
_stat_axis_name = "index"
_AXIS_ORDERS: list[str]
_AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {0: 0, "index": 0, "rows": 0}
_AXIS_REVERSED: bool_t
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
@property
def _AXIS_NUMBERS(self) -> dict[str, int]:
""".. deprecated:: 1.1.0"""
level = self.ndim + 1
warnings.warn(
"_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=level
)
return {"index": 0}
@property
def _AXIS_NAMES(self) -> dict[int, str]:
""".. deprecated:: 1.1.0"""
level = self.ndim + 1
warnings.warn(
"_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=level
)
return {0: "index"}
@final
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@final
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool_t = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@final
@classmethod
def _get_axis_number(cls, axis: Axis) -> int:
try:
return cls._AXIS_TO_AXIS_NUMBER[axis]
except KeyError:
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
@final
@classmethod
def _get_axis_name(cls, axis: Axis) -> str:
axis_number = cls._get_axis_number(axis)
return cls._AXIS_ORDERS[axis_number]
@final
def _get_axis(self, axis: Axis) -> Index:
axis_number = self._get_axis_number(axis)
assert axis_number in {0, 1}
return self.index if axis_number == 0 else self.columns
@final
@classmethod
def _get_block_manager_axis(cls, axis: Axis) -> int:
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
@final
def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]:
# index or columns
axis_index = getattr(self, axis)
d = {}
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
@final
def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]:
from pandas.core.computation.parsing import clean_column_name
d: dict[str, Series | MultiIndex] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
@final
def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self) -> Index:
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self) -> Index:
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> list[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._mgr.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@overload
def set_axis(
self: FrameOrSeries, labels, axis: Axis = ..., inplace: Literal[False] = ...
) -> FrameOrSeries:
...
@overload
def set_axis(
self: FrameOrSeries, labels, axis: Axis, inplace: Literal[True]
) -> None:
...
@overload
def set_axis(self: FrameOrSeries, labels, *, inplace: Literal[True]) -> None:
...
@overload
def set_axis(
self: FrameOrSeries, labels, axis: Axis = ..., inplace: bool_t = ...
) -> FrameOrSeries | None:
...
def set_axis(self, labels, axis: Axis = 0, inplace: bool_t = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s or None if ``inplace=True``.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
self._check_inplace_and_allows_duplicate_labels(inplace)
return self._set_axis_nocheck(labels, axis, inplace)
@final
def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t):
# NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy.
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._mgr.set_axis(axis, labels)
self._clear_item_cache()
@final
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
# ignore needed because of NDFrame constructor is different than
# DataFrame/Series constructors.
return self._constructor(
# error: Argument 1 to "NDFrame" has incompatible type "ndarray"; expected
# "Union[ArrayManager, BlockManager]"
# error: Argument 2 to "NDFrame" has incompatible type "*Generator[Index,
# None, None]"; expected "bool" [arg-type]
# error: Argument 2 to "NDFrame" has incompatible type "*Generator[Index,
# None, None]"; expected "Optional[Mapping[Hashable, Any]]"
new_values, # type: ignore[arg-type]
*new_axes, # type: ignore[arg-type]
).__finalize__(self, method="swapaxes")
@final
@doc(klass=_shared_doc_kwargs["klass"])
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return {klass} with requested index / column level(s) removed.
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
{klass}
{klass} with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
return self.set_axis(new_labels, axis=axis, inplace=False)
def pop(self, item: Hashable) -> Series | Any:
result = self[item]
del self[item]
return result
@final
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Renamer | None = None,
*,
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
copy: bool_t = True,
inplace: bool_t = False,
level: Level | None = None,
errors: str = "ignore",
) -> FrameOrSeries | None:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new {klass}. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : {klass} (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
self._check_inplace_and_allows_duplicate_labels(inplace)
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
new_index = ax._transform_index(f, level=level)
result._set_axis_nocheck(new_index, axis=axis_no, inplace=True)
result._clear_item_cache()
if inplace:
self._update_inplace(result)
return None
else:
return result.__finalize__(self, method="rename")
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Note that the ``columns`` parameter is not allowed if the
object is a Series. This parameter only apply for DataFrame
type objects.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._get_axis_name(axis))
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
@final
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
num_legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
@final
def _indexed_same(self, other) -> bool_t:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
@final
def equals(self, other: object) -> bool_t:
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal.
The row/column index do not need to have the same type, as long
as the values are considered equal. Corresponding columns must be of
the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not (isinstance(other, type(self)) or isinstance(self, type(other))):
return False
other = cast(NDFrame, other)
return self._mgr.equals(other._mgr)
# -------------------------------------------------------------------------
# Unary Methods
@final
def __neg__(self):
values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
@final
def __pos__(self):
values = self._values
if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(
"Unary plus expects bool, numeric, timedelta, "
f"or object dtype, not {values.dtype}"
)
return self.__array_wrap__(arr)
@final
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._mgr.apply(operator.invert)
return self._constructor(new_data).__finalize__(self, method="__invert__")
@final
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
@final
def bool(self):
"""
Return the bool of a single element Series or DataFrame.
This must be a boolean scalar value, either True or False. It will raise a
ValueError if the Series or DataFrame does not have exactly 1 element, or that
element is not boolean (integer values 0 and 1 will also raise an exception).
Returns
-------
bool
The value in the Series or DataFrame.
See Also
--------
Series.astype : Change the data type of a Series, including to boolean.
DataFrame.astype : Change the data type of a DataFrame, including to boolean.
numpy.bool_ : NumPy boolean data type, used by pandas for boolean values.
Examples
--------
The method will only work for single element objects with a boolean value:
>>> pd.Series([True]).bool()
True
>>> pd.Series([False]).bool()
False
>>> pd.DataFrame({'col': [True]}).bool()
True
>>> pd.DataFrame({'col': [False]}).bool()
False
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
@final
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
@final
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
@final
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
@final
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key : str
Potential label name
axis : int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
@final
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key : str
Potential label or level name
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
@final
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key : str or object
Label or level name.
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
@final
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key : str
Label or level name.
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values : np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
f"The {label_axis_name} label '{key}' is not unique.{multi_message}"
)
return values
@final
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys : str or list of str
labels or levels to drop
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
__hash__: None # type: ignore[assignment]
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@doc(items)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
@final
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(
self,
result: np.ndarray,
context: tuple[Callable, tuple[Any, ...], int] | None = None,
):
"""
Gets called after a ufunc and other functions.
Parameters
----------
result: np.ndarray
The result of the ufunc or other function called on the NumPy array
returned by __array__
context: tuple of (func, tuple, int)
This parameter is returned by ufuncs as a 3-element tuple: (name of the
ufunc, arguments of the ufunc, domain of the ufunc), but is not set by
other numpy functions.q
Notes
-----
Series implements __array_ufunc_ so this not called for ufunc on Series.
"""
res = lib.item_from_zerodim(result)
if is_scalar(res):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return res
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
# error: Argument 1 to "NDFrame" has incompatible type "ndarray";
# expected "BlockManager"
return self._constructor(res, **d).__finalize__( # type: ignore[arg-type]
self, method="__array_wrap__"
)
def __array_ufunc__(
self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any
):
return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
@final
def __getstate__(self) -> dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return {
"_mgr": self._mgr,
"_typ": self._typ,
"_metadata": self._metadata,
"attrs": self.attrs,
"_flags": {k: self.flags[k] for k in self.flags._keys},
**meta,
}
@final
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._mgr = state
elif isinstance(state, dict):
if "_data" in state and "_mgr" not in state:
# compat for older pickles
state["_mgr"] = state.pop("_data")
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
flags = state.get("_flags", {"allows_duplicate_labels": True})
object.__setattr__(self, "_flags", Flags(self, **flags))
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _mgr to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state and k != "_flags":
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
@final
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
@final
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
as_json = data.to_json(orient="table")
as_json = cast(str, as_json)
return json.loads(as_json, object_pairs_hook=collections.OrderedDict)
# ----------------------------------------------------------------------
# I/O Methods
@final
@doc(klass="object", storage_options=_shared_docs["storage_options"])
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: str | None = None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
storage_options: StorageOptions = None,
) -> None:
"""
Write {klass} to an Excel sheet.
To write a single {klass} to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : path-like, file-like, or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
.. deprecated:: 1.2.0
As the `xlwt <https://pypi.org/project/xlwt/>`__ package is no longer
maintained, the ``xlwt`` engine will be removed in a future version
of pandas.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
{storage_options}
.. versionadded:: 1.2.0
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible to write further
data without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
storage_options=storage_options,
)
@final
@doc(storage_options=_shared_docs["storage_options"])
def to_json(
self,
path_or_buf: FilePathOrBuffer | None = None,
orient: str | None = None,
date_format: str | None = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Callable[[Any], JSONSerializable] | None = None,
lines: bool_t = False,
compression: CompressionOptions = "infer",
index: bool_t = True,
indent: int | None = None,
storage_options: StorageOptions = None,
) -> str | None:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {{'split', 'records', 'index', 'table'}}.
* DataFrame:
- default is 'columns'
- allowed values are: {{'split', 'records', 'index', 'columns',
'values', 'table'}}.
* The format of the JSON string:
- 'split' : dict like {{'index' -> [index], 'columns' -> [columns],
'data' -> [values]}}
- 'records' : list like [{{column -> value}}, ... , {{column -> value}}]
- 'index' : dict like {{index -> {{column -> value}}}}
- 'columns' : dict like {{column -> {{index -> value}}}}
- 'values' : just the values array
- 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}}
Describing the data, where data component is like ``orient='records'``.
date_format : {{None, 'epoch', 'iso'}}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line-delimited json format. Will
throw ValueError if incorrect 'orient' since others are not
list-like.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
``orient='table'`` contains a 'pandas_version' field under 'schema'.
This stores the version of `pandas` used in the latest revision of the
schema.
Examples
--------
>>> import json
>>> df = pd.DataFrame(
... [["a", "b"], ["c", "d"]],
... index=["row 1", "row 2"],
... columns=["col 1", "col 2"],
... )
>>> result = df.to_json(orient="split")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{{
"columns": [
"col 1",
"col 2"
],
"index": [
"row 1",
"row 2"
],
"data": [
[
"a",
"b"
],
[
"c",
"d"
]
]
}}
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> result = df.to_json(orient="records")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
[
{{
"col 1": "a",
"col 2": "b"
}},
{{
"col 1": "c",
"col 2": "d"
}}
]
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> result = df.to_json(orient="index")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{{
"row 1": {{
"col 1": "a",
"col 2": "b"
}},
"row 2": {{
"col 1": "c",
"col 2": "d"
}}
}}
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> result = df.to_json(orient="columns")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{{
"col 1": {{
"row 1": "a",
"row 2": "c"
}},
"col 2": {{
"row 1": "b",
"row 2": "d"
}}
}}
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> result = df.to_json(orient="values")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
[
[
"a",
"b"
],
[
"c",
"d"
]
]
Encoding with Table Schema:
>>> result = df.to_json(orient="table")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{{
"schema": {{
"fields": [
{{
"name": "index",
"type": "string"
}},
{{
"name": "col 1",
"type": "string"
}},
{{
"name": "col 2",
"type": "string"
}}
],
"primaryKey": [
"index"
],
"pandas_version": "0.20.0"
}},
"data": [
{{
"index": "row 1",
"col 1": "a",
"col 2": "b"
}},
{{
"index": "row 2",
"col 1": "c",
"col 2": "d"
}}
]
}}
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
storage_options=storage_options,
)
@final
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: int | None = None,
complib: str | None = None,
append: bool_t = False,
format: str | None = None,
index: bool_t = True,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
dropna: bool_t | None = None,
data_columns: bool_t | list[str] | None = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
.. warning::
One can store a subclass of ``DataFrame`` or ``Series`` to HDF5,
but the type of the subclass is lost upon storing.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a SQL table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
@final
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
An `sqlalchemy.engine.Connection` can also be passed to `con`:
>>> with engine.begin() as connection:
... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
... df1.to_sql('users', con=connection, if_exists='append')
This is allowed to support operations that require that the same
DBAPI connection is used for the entire operation.
>>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']})
>>> df2.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5'), (0, 'User 6'),
(1, 'User 7')]
Overwrite the table with just ``df2``.
>>> df2.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 6'), (1, 'User 7')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
@final
@doc(storage_options=_shared_docs["storage_options"])
def to_pickle(
self,
path,
compression: CompressionOptions = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
storage_options: StorageOptions = None,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
Compression mode may be any of the following possible
values: {{‘infer’, ‘gzip’, ‘bz2’, ‘zip’, ‘xz’, None}}. If compression
mode is ‘infer’ and path_or_buf is path-like, then detect
compression mode from the following extensions:
‘.gz’, ‘.bz2’, ‘.zip’ or ‘.xz’. (otherwise no compression).
If dict given and mode is ‘zip’ or inferred as ‘zip’, other entries
passed as additional compression options.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4, 5. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
{storage_options}
.. versionadded:: 1.2.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(
self,
path,
compression=compression,
protocol=protocol,
storage_options=storage_options,
)
@final
def to_clipboard(
self, excel: bool_t = True, sep: str | None = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
@final
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@final
@doc(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
position=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{{table.tex}}``.
.. versionchanged:: 1.0.0
Added caption and label arguments.
.. versionchanged:: 1.2.0
Added position argument, changed meaning of caption argument.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {{str: function}}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{{longtable}} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{{multirow}} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in ``\caption[short_caption]{{full_caption}}``;
if a single string is passed, no short caption will be set.
.. versionadded:: 1.0.0
.. versionchanged:: 1.2.0
Optionally allow caption to be a tuple ``(full_caption, short_caption)``.
label : str, optional
The LaTeX label to be placed inside ``\label{{}}`` in the output.
This is used with ``\ref{{}}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{{}}`` in the output.
.. versionadded:: 1.2.0
{returns}
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'],
... mask=['red', 'purple'],
... weapon=['sai', 'bo staff']))
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{{tabular}}{{lll}}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{{tabular}}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
self = cast("DataFrame", self)
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return DataFrameRenderer(formatter).to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
position=position,
)
@final
@doc(storage_options=_shared_docs["storage_options"])
def to_csv(
self,
path_or_buf: FilePathOrBuffer[AnyStr] | None = None,
sep: str = ",",
na_rep: str = "",
float_format: str | None = None,
columns: Sequence[Hashable] | None = None,
header: bool_t | list[str] = True,
index: bool_t = True,
index_label: IndexLabel | None = None,
mode: str = "w",
encoding: str | None = None,
compression: CompressionOptions = "infer",
quoting: int | None = None,
quotechar: str = '"',
line_terminator: str | None = None,
chunksize: int | None = None,
date_format: str | None = None,
doublequote: bool_t = True,
escapechar: str | None = None,
decimal: str = ".",
errors: str = "strict",
storage_options: StorageOptions = None,
) -> str | None:
r"""
Write object to a comma-separated values (csv) file.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a non-binary file object is passed, it should be opened
with `newline=''`, disabling universal newlines. If a binary
file object is passed, `mode` might need to contain a `'b'`.
.. versionchanged:: 1.2.0
Support for binary file objects was introduced.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'. `encoding` is not supported if `path_or_buf`
is a non-binary file object.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is one of {{'zip', 'gzip', 'bz2'}}, or inferred as
one of the above, other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
.. versionchanged:: 1.1.0
Passing compression options as keys in dict is
supported for compression modes 'gzip' and 'bz2'
as well as 'zip'.
.. versionchanged:: 1.2.0
Compression is supported for binary file objects.
.. versionchanged:: 1.2.0
Previous versions forwarded dict entries for 'gzip' to
`gzip.open` instead of `gzip.GzipFile` which prevented
setting `mtime`.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']}})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
formatter = DataFrameFormatter(
frame=df,
header=header,
index=index,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
)
return DataFrameRenderer(formatter).to_csv(
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
errors=errors,
compression=compression,
quoting=quoting,
columns=columns,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
storage_options=storage_options,
)
# ----------------------------------------------------------------------
# Lookup Caching
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
raise AbstractMethodError(self)
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referent")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
raise AbstractMethodError(self)
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: bool_t | None = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take((), kwargs)
self._consolidate_inplace()
new_data = self._mgr.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self, method="take")
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
@final
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if isinstance(key, list):
warnings.warn(
"Passing lists as key for xs is deprecated and will be removed in a "
"future version. Pass key as a tuple instead.",
FutureWarning,
stacklevel=2,
)
if level is not None:
if not isinstance(labels, MultiIndex):
raise TypeError("Index must be a MultiIndex")
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
if drop_level:
return self[key]
index = self.columns
else:
index = self.index
self._consolidate_inplace()
if isinstance(index, MultiIndex):
loc, new_index = index._get_loc_level(key, level=0)
if not drop_level:
if lib.is_integer(loc):
new_index = index[loc : loc + 1]
else:
new_index = index[loc]
else:
loc = index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = index[loc]
if is_scalar(loc) and axis == 0:
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._mgr.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
elif is_scalar(loc):
result = self.iloc[:, slice(loc, loc + 1)]
elif axis == 1:
result = self.iloc[:, loc]
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
def __getitem__(self, item):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._mgr.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
@final
def _set_is_copy(self, ref: FrameOrSeries, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referent")
return False
@final
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or (r is not None and r.shape == self.shape):
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referent":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
loc = self.axes[-1].get_loc(key)
self._mgr = self._mgr.idelete(loc)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
@final
def _check_inplace_and_allows_duplicate_labels(self, inplace):
if inplace and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'inplace=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
@final
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@final
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array"""
return self._mgr.is_view
@final
def reindex_like(
self: FrameOrSeries,
other,
method: str | None = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
@final
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
elif isinstance(axis, MultiIndex) and labels.dtype == "object":
# Set level to zero in case of MultiIndex and label is string,
# because isin can't handle strings for MultiIndexes GH#36293
indexer = ~axis.get_level_values(0).isin(labels)
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
@final
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
result : same type as self
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._mgr = result._mgr
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
@final
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
# error: Incompatible return value type (got "Optional[FrameOrSeries]",
# expected "FrameOrSeries")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
return self.rename(**mapper) # type: ignore[return-value, arg-type]
@final
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
# error: Incompatible return value type (got "Optional[FrameOrSeries]",
# expected "FrameOrSeries")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
return self.rename(**mapper) # type: ignore[return-value, arg-type]
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
key: ValueKeyFunc = None,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. `mergesort` and `stable` are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
Apply the key function to the values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect a
``Series`` and return a Series with the same shape as the input.
It will be applied to each column in `by` independently.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
DataFrame with sorted values or None if ``inplace=True``.
See Also
--------
DataFrame.sort_index : Sort a DataFrame by the index.
Series.sort_values : Similar method for a Series.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... 'col4': ['a', 'B', 'c', 'D', 'e', 'F']
... })
>>> df
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
5 C 4 3 F
4 D 7 2 e
3 NaN 8 4 D
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3 col4
1 A 1 1 B
0 A 2 0 a
2 B 9 9 c
5 C 4 3 F
4 D 7 2 e
3 NaN 8 4 D
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3 col4
4 D 7 2 e
5 C 4 3 F
2 B 9 9 c
0 A 2 0 a
1 A 1 1 B
3 NaN 8 4 D
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3 col4
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
2 B 9 9 c
0 A 2 0 a
1 A 1 1 B
Sorting with a key function
>>> df.sort_values(by='col4', key=lambda col: col.str.lower())
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
Natural sort with the key argument,
using the `natsort <https://github.com/SethMMorton/natsort>` package.
>>> df = pd.DataFrame({
... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'],
... "value": [10, 20, 30, 40, 50]
... })
>>> df
time value
0 0hr 10
1 128hr 20
2 72hr 30
3 48hr 40
4 96hr 50
>>> from natsort import index_natsorted
>>> df.sort_values(
... by="time",
... key=lambda x: np.argsort(index_natsorted(df["time"]))
... )
time value
0 0hr 10
3 48hr 40
2 72hr 30
4 96hr 50
1 128hr 20
"""
raise AbstractMethodError(self)
def sort_index(
self,
axis=0,
level=None,
ascending: bool_t | int | Sequence[bool_t | int] = True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool_t = True,
ignore_index: bool_t = False,
key: IndexKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
ascending = validate_ascending(ascending)
target = self._get_axis(axis)
indexer = get_indexer_indexer(
target, level, ascending, kind, na_position, sort_remaining, key
)
if indexer is None:
if inplace:
return
else:
return self.copy()
baxis = self._get_block_manager_axis(axis)
new_data = self._mgr.take(indexer, axis=baxis, verify=False)
# reconstruct axis if needed
new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic())
if ignore_index:
axis = 1 if isinstance(self, ABCDataFrame) else 0
new_data.set_axis(axis, ibase.default_index(len(indexer)))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_index")
@doc(
klass=_shared_doc_kwargs["klass"],
axes=_shared_doc_kwargs["axes"],
optional_labels="",
optional_axis="",
)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform {klass} to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
{optional_labels}
{axes} : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
{optional_axis}
method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
{klass} with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={{'index', 'columns'}}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self, method="reindex")
@final
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
@final
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here"""
# reindex doing multiple operations on different axes if indicated
new_data = self._mgr
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_platform_int(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
# If we've made a copy once, no need to make another one
copy = False
if copy and new_data is self._mgr:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: str | None = None,
regex: str | None = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x) -> bool_t:
assert like is not None # needed for mypy
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x) -> bool_t:
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
@final
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
@final
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
@final
def sample(
self: FrameOrSeries,
n: int | None = None,
frac: float | None = None,
replace: bool_t = False,
weights=None,
random_state: RandomState | None = None,
axis: Axis | None = None,
ignore_index: bool_t = False,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int, array-like, BitGenerator, np.random.RandomState,
np.random.Generator, optional. If int, array-like, or BitGenerator, seed for
random number generator. If np.random.RandomState or np.random.Generator,
use as given.
.. versionchanged:: 1.1.0
array-like and BitGenerator object now passed to np.random.RandomState()
as seed
.. versionchanged:: 1.4.0
np.random.Generator objects now accepted
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.3.0
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
DataFrameGroupBy.sample: Generates random samples from each group of a
DataFrame object.
SeriesGroupBy.sample: Generates random samples from each group of a
Series object.
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
obj_len = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
size = sample.process_sampling_size(n, frac, replace)
if size is None:
assert frac is not None
size = round(frac * obj_len)
if weights is not None:
weights = sample.preprocess_weights(self, weights, axis)
sampled_indices = sample.sample(obj_len, size, replace, weights, rs)
result = self.take(sampled_indices, axis=axis)
if ignore_index:
result.index = ibase.default_index(len(result))
return result
@final
@doc(klass=_shared_doc_kwargs["klass"])
def pipe(
self,
func: Callable[..., T] | tuple[Callable[..., T], str],
*args,
**kwargs,
) -> T:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the {klass}.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the {klass}.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(func, arg2=b, arg3=c)
... ) # doctest: +SKIP
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((func, 'arg2'), arg1=a, arg3=c)
... ) # doctest: +SKIP
"""
return com.pipe(self, func, *args, **kwargs)
# ----------------------------------------------------------------------
# Attribute access
@final
def __finalize__(
self: FrameOrSeries, other, method: str | None = None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : str, optional
A passed method name providing context on where ``__finalize__``
was called.
.. warning::
The value passed as `method` are not currently considered
stable across pandas releases.
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels
# For subclasses using _metadata.
for name in set(self._metadata) & set(other._metadata):
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
if method == "concat":
allows_duplicate_labels = all(
x.flags.allows_duplicate_labels for x in other.objs
)
self.flags.allows_duplicate_labels = allows_duplicate_labels
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name not in self._internal_names_set
and name not in self._metadata
and name not in self._accessors
and self._info_axis._can_hold_identifiers_and_holds_name(name)
):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
@final
def _dir_additions(self) -> set[str]:
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, its first level values are used.
"""
additions = super()._dir_additions()
if self._info_axis._can_hold_strings:
additions.update(self._info_axis._dir_additions_for_owner)
return additions
# ----------------------------------------------------------------------
# Consolidation of internals
@final
def _protect_consolidate(self, f):
"""
Consolidate _mgr -- if the blocks have changed, then clear the
cache
"""
if isinstance(self._mgr, (ArrayManager, SingleArrayManager)):
return f()
blocks_before = len(self._mgr.blocks)
result = f()
if len(self._mgr.blocks) != blocks_before:
self._clear_item_cache()
return result
@final
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._mgr = self._mgr.consolidate()
self._protect_consolidate(f)
@final
def _consolidate(self):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Returns
-------
consolidated : same type as caller
"""
f = lambda: self._mgr.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@final
@property
def _is_mixed_type(self) -> bool_t:
if self._mgr.is_single_block:
return False
if self._mgr.any_extension_types:
# Even if they have the same dtype, we can't consolidate them,
# so we pretend this is "mixed'"
return True
return self.dtypes.nunique() > 1
@final
def _check_inplace_setting(self, value) -> bool_t:
"""check whether we allow in-place setting with this type of value"""
if self._is_mixed_type and not self._mgr.is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
@final
def _get_numeric_data(self):
return self._constructor(self._mgr.get_numeric_data()).__finalize__(self)
@final
def _get_bool_data(self):
return self._constructor(self._mgr.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
raise AbstractMethodError(self)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
raise AbstractMethodError(self)
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
data = self._mgr.get_dtypes()
return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Notes
-----
.. deprecated:: 1.3.0
Using ``astype`` to convert from timezone-naive dtype to
timezone-aware dtype is deprecated and will raise in a
future version. Use :meth:`Series.dt.tz_localize` instead.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> from pandas.api.types import CategoricalDtype
>>> cat_dtype = CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
Create a series of dates:
>>> ser_date = pd.Series(pd.date_range('20200101', periods=3))
>>> ser_date
0 2020-01-01
1 2020-01-02
2 2020-01-03
dtype: datetime64[ns]
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
# TODO(EA2D): special case not needed with 2D EAs
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self, method="astype")
# GH 33113: handle empty frame or series
if not results:
return self.copy()
# GH 19920: retain column metadata after concat
result = concat(results, axis=1, copy=False)
result.columns = self.columns
return result
@final
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._mgr.copy(deep=deep)
self._clear_item_cache()
return self._constructor(data).__finalize__(self, method="copy")
@final
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
@final
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
@final
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
return self._constructor(
self._mgr.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
copy=True,
)
).__finalize__(self)
@final
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._mgr.convert(datetime=True, numeric=False, timedelta=True, copy=True)
).__finalize__(self, method="infer_objects")
@final
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
convert_floating: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
.. versionadded:: 1.2.0
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, ``convert_boolean`` and
``convert_boolean``, it is possible to turn off individual conversions
to ``StringDtype``, the integer extension types, ``BooleanDtype``
or floating extension types, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer
or floating extension type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type. Otherwise, convert to an
appropriate floating extension type.
.. versionchanged:: 1.2
Starting with pandas 1.2, this method also converts float columns
to the nullable floating extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 <NA>
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f Float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects,
convert_string,
convert_integer,
convert_boolean,
convert_floating,
)
else:
results = [
col._convert_dtypes(
infer_objects,
convert_string,
convert_integer,
convert_boolean,
convert_floating,
)
for col_name, col in self.items()
]
if len(results) > 0:
return concat(results, axis=1, copy=False)
else:
return self.copy()
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> FrameOrSeries | None:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list("ABCD"))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method="ffill")
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
When filling using a DataFrame, replacement happens along
the same column names and same indices
>>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE"))
>>> df.fillna(df2)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if not self._mgr.is_single_block and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._mgr = result._mgr.downcast()
return result
new_data = self._mgr.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
value = value.reindex(self.index, copy=False)
value = value._values
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
is_dict = isinstance(downcast, dict)
for k, v in value.items():
if k not in result:
continue
obj = result[k]
downcast_k = downcast if not is_dict else downcast.get(k)
obj.fillna(v, limit=limit, inplace=True, downcast=downcast_k)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)._data
else:
raise ValueError(f"invalid fill value with a {type(value)}")
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="fillna")
@doc(klass=_shared_doc_kwargs["klass"])
def ffill(
self: FrameOrSeries,
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
downcast=None,
) -> FrameOrSeries | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
pad = ffill
@doc(klass=_shared_doc_kwargs["klass"])
def bfill(
self: FrameOrSeries,
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
downcast=None,
) -> FrameOrSeries | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
backfill = bfill
@doc(
_shared_docs["replace"],
klass=_shared_doc_kwargs["klass"],
inplace=_shared_doc_kwargs["inplace"],
replace_iloc=_shared_doc_kwargs["replace_iloc"],
)
def replace(
self,
to_replace=None,
value=None,
inplace: bool_t = False,
limit: int | None = None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
self._constructor_sliced._replace_single,
args=(to_replace, method, inplace, limit),
)
self = cast("Series", self)
return self._replace_single(to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
if items:
keys, values = zip(*items)
else:
keys, values = ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
if inplace:
return
return self.copy()
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
# Note: Checking below for `in foo.keys()` instead of
# `in foo` is needed for when we have a Series and not dict
mapping = {
col: (to_replace[col], value[col])
for col in to_replace.keys()
if col in value.keys() and col in self
}
return self._replace_columnwise(mapping, inplace, regex)
# {'A': NA} -> 0
elif not is_list_like(value):
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-like to_replace "
"and non-None value"
)
mapping = {
col: (to_rep, value) for col, to_rep in to_replace.items()
}
return self._replace_columnwise(mapping, inplace, regex)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace):
if not is_list_like(value):
# e.g. to_replace = [NA, ''] and value is 0,
# so we replace NA with 0 and then replace '' with 0
value = [value] * len(to_replace)
# e.g. we have to_replace = [NA, ''] and value = [0, 'missing']
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._mgr.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-value and "
"non-None to_replace"
)
mapping = {col: (to_replace, val) for col, val in value.items()}
return self._replace_columnwise(mapping, inplace, regex)
elif not is_list_like(value): # NA -> 0
new_data = self._mgr.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="replace")
def interpolate(
self: FrameOrSeries,
method: str = "linear",
axis: Axis = 0,
limit: int | None = None,
inplace: bool_t = False,
limit_direction: str | None = None,
limit_area: str | None = None,
downcast: str | None = None,
**kwargs,
) -> FrameOrSeries | None:
"""
Fill NaN values using an interpolation method.
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
'cubicspline': Wrappers around the SciPy interpolation methods of
similar names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {{'forward', 'backward', 'both'}}, Optional
Consecutive NaNs will be filled in this direction.
If limit is specified:
* If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.
* If 'method' is 'backfill' or 'bfill', 'limit_direction' must be
'backwards'.
If 'limit' is not specified:
* If 'method' is 'backfill' or 'bfill', the default is 'backward'
* else the default is 'forward'
.. versionchanged:: 1.1.0
raises ValueError if `limit_direction` is 'forward' or 'both' and
method is 'backfill' or 'bfill'.
raises ValueError if `limit_direction` is 'backward' or 'both' and
method is 'pad' or 'ffill'.
limit_area : {{`None`, 'inside', 'outside'}}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
``**kwargs`` : optional
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame or None
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values or None if ``inplace=True``.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
fillna_methods = ["ffill", "bfill", "pad", "backfill"]
should_transpose = axis == 1 and method not in fillna_methods
obj = self.T if should_transpose else self
if obj.empty:
return self.copy()
if method not in fillna_methods:
axis = self._info_axis_number
if isinstance(obj.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
# Set `limit_direction` depending on `method`
if limit_direction is None:
limit_direction = (
"backward" if method in ("backfill", "bfill") else "forward"
)
else:
if method in ("pad", "ffill") and limit_direction != "forward":
raise ValueError(
f"`limit_direction` must be 'forward' for method `{method}`"
)
if method in ("backfill", "bfill") and limit_direction != "backward":
raise ValueError(
f"`limit_direction` must be 'backward' for method `{method}`"
)
if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(obj.index))
index = Index(index)
else:
index = obj.index
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index.dtype)
or is_datetime64_any_dtype(index.dtype)
or is_timedelta64_dtype(index.dtype)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
new_data = obj._mgr.interpolate(
method=method,
axis=axis,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
result = self._constructor(new_data)
if should_transpose:
result = result.T
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="interpolate")
# ----------------------------------------------------------------------
# Timeseries methods Methods
@final
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
return self._constructor_sliced(
index=self.columns, name=where, dtype=np.float64
)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
self = cast("Series", self)
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
self = cast("DataFrame", self)
return self._constructor(np.nan, index=where, columns=self.columns)
else:
self = cast("DataFrame", self)
return self._constructor_sliced(
np.nan, index=self.columns, name=where[0]
)
locs = self.index.asof_locs(where, ~(nulls._values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
@doc(klass=_shared_doc_kwargs["klass"])
def isna(self: FrameOrSeries) -> FrameOrSeries:
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
{klass}
Mask of bool values for each element in {klass} that
indicates whether an element is an NA value.
See Also
--------
{klass}.isnull : Alias of isna.
{klass}.notna : Boolean inverse of isna.
{klass}.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
return isna(self).__finalize__(self, method="isna")
@doc(isna, klass=_shared_doc_kwargs["klass"])
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self, method="isnull")
@doc(klass=_shared_doc_kwargs["klass"])
def notna(self: FrameOrSeries) -> FrameOrSeries:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
{klass}
Mask of bool values for each element in {klass} that
indicates whether an element is not an NA value.
See Also
--------
{klass}.notnull : Alias of notna.
{klass}.isna : Boolean inverse of notna.
{klass}.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
return notna(self).__finalize__(self, method="notna")
@doc(notna, klass=_shared_doc_kwargs["klass"])
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self, method="notnull")
@final
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self._values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
return self._update_inplace(result)
else:
return result
@final
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = align_method_FRAME(self, threshold, axis, flex=None)[1]
# GH 40420
# Treat missing thresholds as no bounds, not clipping the values
if is_list_like(threshold):
fill_value = np.inf if method.__name__ == "le" else -np.inf
threshold_inf = threshold.fillna(fill_value)
else:
threshold_inf = threshold
subset = method(threshold_inf, axis=axis) | isna(self)
# GH 40420
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis: Axis | None = None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries | None:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array-like, default None
Minimum threshold value. All values below this
threshold will be set to it. A missing
threshold (e.g `NA`) will not clip the value.
upper : float or array-like, default None
Maximum threshold value. All values above this
threshold will be set to it. A missing
threshold (e.g `NA`) will not clip the value.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame or None
Same type as calling object with the values outside the
clip boundaries replaced or None if ``inplace=True``.
See Also
--------
Series.clip : Trim values at input threshold in series.
DataFrame.clip : Trim values at input threshold in dataframe.
numpy.clip : Clip (limit) the values in an array.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
Clips using specific lower threshold per column element, with missing values:
>>> t = pd.Series([2, -4, np.NaN, 6, 3])
>>> t
0 2.0
1 -4.0
2 NaN
3 6.0
4 3.0
dtype: float64
>>> df.clip(t, axis=0)
col_0 col_1
0 9 2
1 -3 -4
2 0 6
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
isna_lower = isna(lower)
if not is_list_like(lower):
if np.any(isna_lower):
lower = None
elif np.all(isna_lower):
lower = None
isna_upper = isna(upper)
if not is_list_like(upper):
if np.any(isna_upper):
upper = None
elif np.all(isna_upper):
upper = None
# GH 2747 (arguments were reversed)
if (
lower is not None
and upper is not None
and is_scalar(lower)
and is_scalar(upper)
):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
@doc(**_shared_doc_kwargs)
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: str | None = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert time series to specified frequency.
Returns the original data conformed to a new index with the specified
frequency.
If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index
is the result of transforming the original index with
:meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index
will map one-to-one to the new index).
Otherwise, the new index will be equivalent to ``pd.date_range(start, end,
freq=freq)`` where ``start`` and ``end`` are, respectively, the first and
last entries in the original index (see :func:`pandas.date_range`). The
values corresponding to any timesteps in the new index which were not present
in the original index will be null (``NaN``), unless a method for filling
such unknowns is provided (see the ``method`` parameter below).
The :meth:`resample` method is more appropriate if an operation on each group of
timesteps (such as an aggregate) is necessary to represent the data at the new
frequency.
Parameters
----------
freq : DateOffset or str
Frequency DateOffset or string.
method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {{'start', 'end'}}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
{klass}
{klass} object reindexed to the specified frequency.
See Also
--------
reindex : Conform DataFrame to new index with optional filling logic.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({{'s': series}})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
@final
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_at_time(time, asof=asof)
return self._take_with_is_copy(indexer, axis=axis)
@final
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_between_time(
start_time, end_time, include_start=include_start, include_end=include_end
)
return self._take_with_is_copy(indexer, axis=axis)
@doc(**_shared_doc_kwargs)
def resample(
self,
rule,
axis=0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
on=None,
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
) -> Resampler:
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time series.
The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`,
or `TimedeltaIndex`), or the caller must pass the label of a datetime-like
series/index to the ``on``/``level`` keyword parameter.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {{'right', 'left'}}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {{'right', 'left'}}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {{'start', 'end', 's', 'e'}}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {{'timestamp', 'period'}}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
.. deprecated:: 1.1.0
You should add the loffset to the `df.index` after the resample.
See below.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
.. deprecated:: 1.1.0
The new arguments that you should use are 'offset' or 'origin'.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
origin : {{'epoch', 'start', 'start_day', 'end', 'end_day'}}, Timestamp
or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
.. versionadded:: 1.1.0
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
.. versionadded:: 1.3.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
.. versionadded:: 1.1.0
Returns
-------
pandas.core.Resampler
:class:`~pandas.core.Resampler` object.
See Also
--------
Series.resample : Resample a Series.
DataFrame.resample : Resample a DataFrame.
groupby : Group {klass} by mapping, function, label, or list of labels.
asfreq : Reindex a {klass} with the given frequency without grouping.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(arraylike):
... return np.sum(arraylike) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
>>> df2 = pd.DataFrame(
... d2,
... index=pd.MultiIndex.from_product(
... [days, ['morning', 'afternoon']]
... )
... )
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7T, dtype: int64
>>> ts.resample('17min').sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', origin='epoch').sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', origin='2000-01-01').sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17T, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.resample('17min', origin='start').sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', offset='23h30min').sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
If you want to take the largest Timestamp as the end of the bins:
>>> ts.resample('17min', origin='end').sum()
2000-10-01 23:35:00 0
2000-10-01 23:52:00 18
2000-10-02 00:09:00 27
2000-10-02 00:26:00 63
Freq: 17T, dtype: int64
In contrast with the `start_day`, you can use `end_day` to take the ceiling
midnight of the largest Timestamp as the end of the bins and drop the bins
not containing data:
>>> ts.resample('17min', origin='end_day').sum()
2000-10-01 23:38:00 3
2000-10-01 23:55:00 15
2000-10-02 00:12:00 45
2000-10-02 00:29:00 45
Freq: 17T, dtype: int64
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
>>> ts.resample('17min', offset='2min').sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
2000-10-02 00:07:00 39
2000-10-02 00:24:00 24
Freq: 17T, dtype: int64
To replace the use of the deprecated `loffset` argument:
>>> from pandas.tseries.frequencies import to_offset
>>> loffset = '19min'
>>> ts_out = ts.resample('17min').sum()
>>> ts_out.index = ts_out.index + to_offset(loffset)
>>> ts_out
2000-10-01 23:33:00 0
2000-10-01 23:50:00 9
2000-10-02 00:07:00 21
2000-10-02 00:24:00 54
2000-10-02 00:41:00 24
Freq: 17T, dtype: int64
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
origin=origin,
offset=offset,
)
@final
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select initial periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the first few rows based on a date offset.
Parameters
----------
offset : str, DateOffset or dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'1M' will display all the rows having their index within the first month.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calendar days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]):
# GH#29623 if first value is end of period, remove offset with n = 1
# before adding the real offset
end_date = end = self.index[0] - offset.base + offset
else:
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if isinstance(offset, Tick) and end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
@final
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select final periods of time series data based on a date offset.
For a DataFrame with a sorted DatetimeIndex, this function
selects the last few rows based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
# error: Slice index must be an integer or None
return self.iloc[start:] # type: ignore[misc]
@final
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: bool_t | None = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign lowest rank to NaN values
* bottom: assign highest rank to NaN values
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
# error: Argument 1 to "NDFrame" has incompatible type "ndarray"; expected
# "Union[ArrayManager, BlockManager]"
ranks_obj = self._constructor(
ranks, **data._construct_axes_dict() # type: ignore[arg-type]
)
return ranks_obj.__finalize__(self, method="rank")
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
@doc(_shared_docs["compare"], klass=_shared_doc_kwargs["klass"])
def compare(
self,
other,
align_axis: Axis = 1,
keep_shape: bool_t = False,
keep_equal: bool_t = False,
):
from pandas.core.reshape.concat import concat
if type(self) is not type(other):
cls_self, cls_other = type(self).__name__, type(other).__name__
raise TypeError(
f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'"
)
mask = ~((self == other) | (self.isna() & other.isna()))
keys = ["self", "other"]
if not keep_equal:
self = self.where(mask)
other = other.where(mask)
if not keep_shape:
if isinstance(self, ABCDataFrame):
cmask = mask.any()
rmask = mask.any(axis=1)
self = self.loc[rmask, cmask]
other = other.loc[rmask, cmask]
else:
self = self[mask]
other = other[mask]
if align_axis in (1, "columns"): # This is needed for Series
axis = 1
else:
axis = self._get_axis_number(align_axis)
diff = concat([self, other], axis=axis, keys=keys)
if axis >= self.ndim:
# No need to reorganize data if stacking on new axis
# This currently applies for stacking two Series on columns
return diff
ax = diff._get_axis(axis)
ax_names = np.array(ax.names)
# set index names to positions to avoid confusion
ax.names = np.arange(len(ax_names))
# bring self-other to inner level
order = list(range(1, ax.nlevels)) + [0]
if isinstance(diff, ABCDataFrame):
diff = diff.reorder_levels(order, axis=axis)
else:
diff = diff.reorder_levels(order)
# restore the index names in order
diff._get_axis(axis=axis).names = ax_names[order]
# reorder axis to keep things organized
indices = (
np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten()
)
diff = diff.take(indices, axis=axis)
return diff
@doc(**_shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : {axes_single_arg}, default 0
Filling axis, method and limit.
broadcast_axis : {axes_single_arg}, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : ({klass}, type of other)
Aligned objects.
"""
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
@final
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if (axis is None or axis == 0) and not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if (
(axis is None or axis == 1)
and not is_series
and not self.columns.equals(other.columns)
):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
_left = left.fillna(method=method, axis=fill_axis, limit=limit)
assert _left is not None # needed for mypy
left = _left
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
left, right = _align_as_utc(left, right, join_index)
return (
left.__finalize__(self),
right.__finalize__(other),
)
@final
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._mgr
if axis in [0, 1]:
join_index = self.axes[axis]
lidx, ridx = None, None
if not join_index.equals(other.index):
join_index, lidx, ridx = join_index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
bm_axis = self._get_block_manager_axis(axis)
fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._mgr:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
left, right = _align_as_utc(left, right, join_index)
return (
left.__finalize__(self),
right.__finalize__(other),
)
@final
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if axis is not None:
axis = self._get_axis_number(axis)
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1, copy=False)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not cond.empty:
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
else:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
else:
# GH#21947 we have an empty DataFrame/Series, could be object-dtype
cond = cond.astype(bool)
cond = -cond if inplace else cond
cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False)
# try to align with other
if isinstance(other, NDFrame):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other,
join="left",
axis=axis,
level=level,
fill_value=None,
copy=False,
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not other._indexed_same(self):
raise InvalidIndexError
elif other.ndim < self.ndim:
# TODO(EA2D): avoid object-dtype cast in EA case GH#38729
other = other._values
if axis == 0:
other = np.reshape(other, (-1, 1))
elif axis == 1:
other = np.reshape(other, (1, -1))
other = np.broadcast_to(other, self.shape)
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
elif not isinstance(other, (MultiIndex, NDFrame)):
# mainly just catching Index here
other = extract_array(other, extract_numpy=True)
if isinstance(other, (np.ndarray, ExtensionArray)):
if other.shape != self.shape:
if self.ndim != 1:
# In the ndim == 1 case we may have
# other length 1, which we treat as scalar (GH#2745, GH#4192)
# or len(other) == icond.sum(), which we treat like
# __setitem__ (GH#3235)
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
# error: Argument 1 to "NDFrame" has incompatible type "ndarray";
# expected "BlockManager"
other = self._constructor(
other, **self._construct_axes_dict() # type: ignore[arg-type]
)
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._mgr.putmask(mask=cond, new=other, align=align)
result = self._constructor(new_data)
return self._update_inplace(result)
else:
new_data = self._mgr.where(
other=other,
cond=cond,
align=align,
errors=errors,
)
result = self._constructor(new_data)
return result.__finalize__(self)
@doc(
klass=_shared_doc_kwargs["klass"],
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=lib.no_default,
):
"""
Replace values where the condition is {cond_rev}.
Parameters
----------
cond : bool {klass}, array-like, or callable
Where `cond` is {cond}, keep the original value. Where
{cond_rev}, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the {klass} and
should return boolean {klass} or array. The callable must
not change input {klass} (though pandas doesn't check it).
other : scalar, {klass}, or callable
Entries where `cond` is {cond_rev} are replaced with
corresponding value from `other`.
If other is callable, it is computed on the {klass} and
should return scalar or {klass}. The callable must not
change input {klass} (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {{'raise', 'ignore'}}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default None
Try to cast the result back to the input type (if possible).
.. deprecated:: 1.3.0
Manually cast back if necessary.
Returns
-------
Same type as caller or None if ``inplace=True``.
See Also
--------
:func:`DataFrame.{name_other}` : Return an object of same shape as
self.
Notes
-----
The {name} method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``{cond}`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``{name}`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> s.mask(s > 1, 10)
0 0
1 1
2 10
3 10
4 10
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df % 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
other = com.apply_if_callable(other, self)
if try_cast is not lib.no_default:
warnings.warn(
"try_cast keyword is deprecated and will be removed in a "
"future version",
FutureWarning,
stacklevel=4,
)
return self._where(cond, other, inplace, axis, level, errors=errors)
@final
@doc(
where,
klass=_shared_doc_kwargs["klass"],
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=lib.no_default,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
if try_cast is not lib.no_default:
warnings.warn(
"try_cast keyword is deprecated and will be removed in a "
"future version",
FutureWarning,
stacklevel=4,
)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
errors=errors,
)
@doc(klass=_shared_doc_kwargs["klass"])
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
"""
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`. `freq` can be inferred
when specified as "infer" as long as either freq or inferred_freq
attribute is set in the index.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
If `freq` is specified as "infer" then it will be inferred from
the freq or inferred_freq attributes of the index. If neither of
those attributes exist, a ValueError is thrown.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 1.1.0
Returns
-------
{klass}
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45],
... "Col2": [13, 23, 18, 33, 48],
... "Col3": [17, 27, 22, 37, 52]}},
... index=pd.date_range("2020-01-01", "2020-01-05"))
>>> df
Col1 Col2 Col3
2020-01-01 10 13 17
2020-01-02 20 23 27
2020-01-03 15 18 22
2020-01-04 30 33 37
2020-01-05 45 48 52
>>> df.shift(periods=3)
Col1 Col2 Col3
2020-01-01 NaN NaN NaN
2020-01-02 NaN NaN NaN
2020-01-03 NaN NaN NaN
2020-01-04 10.0 13.0 17.0
2020-01-05 20.0 23.0 27.0
>>> df.shift(periods=1, axis="columns")
Col1 Col2 Col3
2020-01-01 NaN 10 13
2020-01-02 NaN 20 23
2020-01-03 NaN 15 18
2020-01-04 NaN 30 33
2020-01-05 NaN 45 48
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
2020-01-01 0 0 0
2020-01-02 0 0 0
2020-01-03 0 0 0
2020-01-04 10 13 17
2020-01-05 20 23 27
>>> df.shift(periods=3, freq="D")
Col1 Col2 Col3
2020-01-04 10 13 17
2020-01-05 20 23 27
2020-01-06 15 18 22
2020-01-07 30 33 37
2020-01-08 45 48 52
>>> df.shift(periods=3, freq="infer")
Col1 Col2 Col3
2020-01-04 10 13 17
2020-01-05 20 23 27
2020-01-06 15 18 22
2020-01-07 30 33 37
2020-01-08 45 48 52
"""
if periods == 0:
return self.copy()
if freq is None:
# when freq is None, data is shifted, index is not
axis = self._get_axis_number(axis)
new_data = self._mgr.shift(
periods=periods, axis=axis, fill_value=fill_value
)
return self._constructor(new_data).__finalize__(self, method="shift")
# when freq is given, index is shifted, data is not
index = self._get_axis(axis)
if freq == "infer":
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not set in the index hence cannot be inferred"
raise ValueError(msg)
elif isinstance(freq, str):
freq = to_offset(freq)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq != orig_freq:
assert orig_freq is not None # for mypy
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
new_ax = index.shift(periods)
else:
new_ax = index.shift(periods, freq)
result = self.set_axis(new_ax, axis=axis)
return result.__finalize__(self, method="shift")
@final
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
.. deprecated:: 1.2.0
slice_shift is deprecated,
use DataFrame/Series.shift instead.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
msg = (
"The 'slice_shift' method is deprecated "
"and will be removed in a future version. "
"You can use DataFrame/Series.shift instead"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self, method="slice_shift")
@final
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis: Axis = 0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
.. deprecated:: 1.1.0
Use `shift` instead.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
warnings.warn(
(
"tshift is deprecated and will be removed in a future version. "
"Please use shift instead."
),
FutureWarning,
stacklevel=2,
)
if freq is None:
freq = "infer"
return self.shift(periods, freq, axis)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax._is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None and before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1:
before, after = after, before
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
@final
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
{klass}
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self.copy(deep=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_convert")
@final
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backward with a timedelta object or `'shift_forward'`
or `'shift_backward'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self.copy(deep=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_localize")
# ----------------------------------------------------------------------
# Numeric Methods
@final
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
# error: Incompatible return value type (got "ndarray[Any, dtype[Any]]",
# expected "FrameOrSeries")
return np.abs(self) # type: ignore[return-value]
@final
def describe(
self: FrameOrSeries,
percentiles=None,
include=None,
exclude=None,
datetime_is_numeric=False,
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
datetime_is_numeric : bool, default False
Whether to treat datetime dtypes as numeric. This affects statistics
calculated for the column. For DataFrame input, this also
controls whether datetime columns are included by default.
.. versionadded:: 1.1.0
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe(datetime_is_numeric=True)
count 3
mean 2006-09-01 08:00:00
min 2000-01-01 00:00:00
25% 2004-12-31 12:00:00
50% 2010-01-01 00:00:00
75% 2010-01-01 00:00:00
max 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all') # doctest: +SKIP
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN a
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[object]) # doctest: +SKIP
object
count 3
unique 3
top a
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top d
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number]) # doctest: +SKIP
categorical object
count 3 3
unique 3 3
top f a
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[object]) # doctest: +SKIP
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
return describe_ndframe(
obj=self,
include=include,
exclude=exclude,
datetime_is_numeric=datetime_is_numeric,
percentiles=percentiles,
)
@final
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
"""
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns', periods=-1)
2016 2015 2014
GOOG 0.179241 0.094112 NaN
APPL -0.252395 -0.011860 NaN
"""
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
_data = self.fillna(method=fill_method, axis=axis, limit=limit)
assert _data is not None # needed for mypy
data = _data
shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs)
# Unsupported left operand type for / ("FrameOrSeries")
rs = data / shifted - 1 # type: ignore[operator]
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
@final
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@final
def _logical_func(
self, name: str, func, axis=0, bool_only=None, skipna=True, level=None, **kwargs
):
nv.validate_logical_func((), kwargs, fname=name)
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.any(level=1) should use df.groupby(level=1).any()",
FutureWarning,
stacklevel=4,
)
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
if self.ndim > 1 and axis is None:
# Reduce along one dimension then the other, to simplify DataFrame._reduce
res = self._logical_func(
name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs
)
return res._logical_func(name, func, skipna=skipna, **kwargs)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
return self._logical_func(
"any", nanops.nanany, axis, bool_only, skipna, level, **kwargs
)
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
return self._logical_func(
"all", nanops.nanall, axis, bool_only, skipna, level, **kwargs
)
@final
def _accum_func(self, name: str, func, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return self.T._accum_func(
name, func, axis=0, skipna=skipna, *args, **kwargs
).T
def block_accum_func(blk_values):
values = blk_values.T if hasattr(blk_values, "T") else blk_values
result = nanops.na_accum_func(values, func, skipna=skipna)
result = result.T if hasattr(result, "T") else result
return result
result = self._mgr.apply(block_accum_func)
return self._constructor(result).__finalize__(self, method=name)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func(
"cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func(
"cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs)
@final
def _stat_function_ddof(
self,
name: str,
func,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
nv.validate_stat_ddof_func((), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.var(level=1) should use df.groupby(level=1).var().",
FutureWarning,
stacklevel=4,
)
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
def sem(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
return self._stat_function_ddof(
"sem", nanops.nansem, axis, skipna, level, ddof, numeric_only, **kwargs
)
def var(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
return self._stat_function_ddof(
"var", nanops.nanvar, axis, skipna, level, ddof, numeric_only, **kwargs
)
def std(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
return self._stat_function_ddof(
"std", nanops.nanstd, axis, skipna, level, ddof, numeric_only, **kwargs
)
@final
def _stat_function(
self,
name: str,
func,
axis=None,
skipna=None,
level=None,
numeric_only=None,
**kwargs,
):
if name == "median":
nv.validate_median((), kwargs)
else:
nv.validate_stat_func((), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.median(level=1) should use df.groupby(level=1).median().",
FutureWarning,
stacklevel=4,
)
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, numeric_only=numeric_only
)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"min", nanops.nanmin, axis, skipna, level, numeric_only, **kwargs
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"max", nanops.nanmax, axis, skipna, level, numeric_only, **kwargs
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"mean", nanops.nanmean, axis, skipna, level, numeric_only, **kwargs
)
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"median", nanops.nanmedian, axis, skipna, level, numeric_only, **kwargs
)
def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"skew", nanops.nanskew, axis, skipna, level, numeric_only, **kwargs
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"kurt", nanops.nankurt, axis, skipna, level, numeric_only, **kwargs
)
kurtosis = kurt
@final
def _min_count_stat_function(
self,
name: str,
func,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum((), kwargs)
elif name == "prod":
nv.validate_prod((), kwargs)
else:
nv.validate_stat_func((), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.sum(level=1) should use df.groupby(level=1).sum().",
FutureWarning,
stacklevel=4,
)
return self._agg_by_level(
name,
axis=axis,
level=level,
skipna=skipna,
min_count=min_count,
numeric_only=numeric_only,
)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
def sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
return self._min_count_stat_function(
"sum", nanops.nansum, axis, skipna, level, numeric_only, min_count, **kwargs
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
return self._min_count_stat_function(
"prod",
nanops.nanprod,
axis,
skipna,
level,
numeric_only,
min_count,
**kwargs,
)
product = prod
def mad(self, axis=None, skipna=None, level=None):
"""
{desc}
Parameters
----------
axis : {axis_descr}
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
Returns
-------
{name1} or {name2} (if level specified)\
{see_also}\
{examples}
"""
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.mad(level=1) should use df.groupby(level=1).mad()",
FutureWarning,
stacklevel=3,
)
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name1, name2 = _doc_params(cls)
@doc(
_bool_doc,
desc=_any_desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
return NDFrame.any(self, axis, bool_only, skipna, level, **kwargs)
setattr(cls, "any", any)
@doc(
_bool_doc,
desc=_all_desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
return NDFrame.all(self, axis, bool_only, skipna, level, **kwargs)
setattr(cls, "all", all)
# error: Argument 1 to "doc" has incompatible type "Optional[str]"; expected
# "Union[str, Callable[..., Any]]"
@doc(
NDFrame.mad.__doc__, # type: ignore[arg-type]
desc="Return the mean absolute deviation of the values "
"over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also="",
examples="",
)
def mad(self, axis=None, skipna=None, level=None):
return NDFrame.mad(self, axis, skipna, level)
setattr(cls, "mad", mad)
@doc(
_num_ddof_doc,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
name1=name1,
name2=name2,
axis_descr=axis_descr,
)
def sem(
self,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
return NDFrame.sem(self, axis, skipna, level, ddof, numeric_only, **kwargs)
setattr(cls, "sem", sem)
@doc(
_num_ddof_doc,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
name1=name1,
name2=name2,
axis_descr=axis_descr,
)
def var(
self,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
return NDFrame.var(self, axis, skipna, level, ddof, numeric_only, **kwargs)
setattr(cls, "var", var)
@doc(
_num_ddof_doc,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
name1=name1,
name2=name2,
axis_descr=axis_descr,
)
def std(
self,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
return NDFrame.std(self, axis, skipna, level, ddof, numeric_only, **kwargs)
setattr(cls, "std", std)
@doc(
_cnum_doc,
desc="minimum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="min",
examples=_cummin_examples,
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
return NDFrame.cummin(self, axis, skipna, *args, **kwargs)
setattr(cls, "cummin", cummin)
@doc(
_cnum_doc,
desc="maximum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="max",
examples=_cummax_examples,
)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
return NDFrame.cummax(self, axis, skipna, *args, **kwargs)
setattr(cls, "cummax", cummax)
@doc(
_cnum_doc,
desc="sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="sum",
examples=_cumsum_examples,
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
return NDFrame.cumsum(self, axis, skipna, *args, **kwargs)
setattr(cls, "cumsum", cumsum)
@doc(
_cnum_doc,
desc="product",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="prod",
examples=_cumprod_examples,
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
return NDFrame.cumprod(self, axis, skipna, *args, **kwargs)
setattr(cls, "cumprod", cumprod)
@doc(
_num_doc,
desc="Return the sum of the values over the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
def sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
return NDFrame.sum(
self, axis, skipna, level, numeric_only, min_count, **kwargs
)
setattr(cls, "sum", sum)
@doc(
_num_doc,
desc="Return the product of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=_stat_func_see_also,
examples=_prod_examples,
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
return NDFrame.prod(
self, axis, skipna, level, numeric_only, min_count, **kwargs
)
setattr(cls, "prod", prod)
cls.product = prod
@doc(
_num_doc,
desc="Return the mean of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.mean(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "mean", mean)
@doc(
_num_doc,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.skew(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "skew", skew)
@doc(
_num_doc,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.kurt(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "kurt", kurt)
cls.kurtosis = kurt
@doc(
_num_doc,
desc="Return the median of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def median(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
return NDFrame.median(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "median", median)
@doc(
_num_doc,
desc="Return the maximum of the values over the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is "
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=_stat_func_see_also,
examples=_max_examples,
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.max(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "max", max)
@doc(
_num_doc,
desc="Return the minimum of the values over the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is "
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=_stat_func_see_also,
examples=_min_examples,
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.min(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "min", min)
@final
@doc(Rolling)
def rolling(
self,
window: int | timedelta | BaseOffset | BaseIndexer,
min_periods: int | None = None,
center: bool_t = False,
win_type: str | None = None,
on: str | None = None,
axis: Axis = 0,
closed: str | None = None,
method: str = "single",
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
method=method,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
method=method,
)
@final
@doc(Expanding)
def expanding(
self,
min_periods: int = 1,
center: bool_t | None = None,
axis: Axis = 0,
method: str = "single",
) -> Expanding:
axis = self._get_axis_number(axis)
if center is not None:
warnings.warn(
"The `center` argument on `expanding` will be removed in the future",
FutureWarning,
stacklevel=2,
)
else:
center = False
return Expanding(
self, min_periods=min_periods, center=center, axis=axis, method=method
)
@final
@doc(ExponentialMovingWindow)
def ewm(
self,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool_t = True,
ignore_na: bool_t = False,
axis: Axis = 0,
times: str | np.ndarray | FrameOrSeries | None = None,
method: str = "single",
) -> ExponentialMovingWindow:
axis = self._get_axis_number(axis)
# error: Value of type variable "FrameOrSeries" of "ExponentialMovingWindow"
# cannot be "object"
return ExponentialMovingWindow( # type: ignore[type-var]
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
method=method,
)
# ----------------------------------------------------------------------
# Arithmetic Methods
@final
def _inplace_method(self, other, op):
"""
Wrap arithmetic method to operate inplace.
"""
result = op(self, other)
if (
self.ndim == 1
and result._indexed_same(self)
and is_dtype_equal(result.dtype, self.dtype)
):
# GH#36498 this inplace op can _actually_ be inplace.
self._values[:] = result._values
return self
# Delete cacher
self._reset_cacher()
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
result.reindex_like(self, copy=False), verify_is_copy=False
)
return self
def __iadd__(self, other):
# error: Unsupported left operand type for + ("Type[NDFrame]")
return self._inplace_method(other, type(self).__add__) # type: ignore[operator]
def __isub__(self, other):
# error: Unsupported left operand type for - ("Type[NDFrame]")
return self._inplace_method(other, type(self).__sub__) # type: ignore[operator]
def __imul__(self, other):
# error: Unsupported left operand type for * ("Type[NDFrame]")
return self._inplace_method(other, type(self).__mul__) # type: ignore[operator]
def __itruediv__(self, other):
# error: Unsupported left operand type for / ("Type[NDFrame]")
return self._inplace_method(
other, type(self).__truediv__ # type: ignore[operator]
)
def __ifloordiv__(self, other):
# error: Unsupported left operand type for // ("Type[NDFrame]")
return self._inplace_method(
other, type(self).__floordiv__ # type: ignore[operator]
)
def __imod__(self, other):
# error: Unsupported left operand type for % ("Type[NDFrame]")
return self._inplace_method(other, type(self).__mod__) # type: ignore[operator]
def __ipow__(self, other):
# error: Unsupported left operand type for ** ("Type[NDFrame]")
return self._inplace_method(other, type(self).__pow__) # type: ignore[operator]
def __iand__(self, other):
# error: Unsupported left operand type for & ("Type[NDFrame]")
return self._inplace_method(other, type(self).__and__) # type: ignore[operator]
def __ior__(self, other):
# error: Unsupported left operand type for | ("Type[NDFrame]")
return self._inplace_method(other, type(self).__or__) # type: ignore[operator]
def __ixor__(self, other):
# error: Unsupported left operand type for ^ ("Type[NDFrame]")
return self._inplace_method(other, type(self).__xor__) # type: ignore[operator]
# ----------------------------------------------------------------------
# Misc methods
@final
def _find_valid_index(self, *, how: str) -> Hashable | None:
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how=how)
if idxpos is None:
return None
return self.index[idxpos]
@final
@doc(position="first", klass=_shared_doc_kwargs["klass"])
def first_valid_index(self) -> Hashable | None:
"""
Return index for {position} non-NA value or None, if no NA value is found.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty {klass}.
"""
return self._find_valid_index(how="first")
@final
@doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"])
def last_valid_index(self) -> Hashable | None:
return self._find_valid_index(how="last")
def _doc_params(cls):
"""Return a tuple of the doc params."""
axis_descr = (
f"{{{', '.join([f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS)])}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
{desc}
Parameters
----------
axis : {axis_descr}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
{min_count}\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
{name1} or {name2} (if level specified)\
{see_also}\
{examples}
"""
_num_ddof_doc = """
{desc}
Parameters
----------
axis : {axis_descr}
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
{name1} or {name2} (if level specified)
Notes
-----
To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the
default `ddof=1`)\n"""
_bool_doc = """
{desc}
Parameters
----------
axis : {{0 or 'index', 1 or 'columns', None}}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be {empty_value}, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
{name1} or {name2}
If level is specified, then, {name2} is returned; otherwise, {name1}
is returned.
{see_also}
{examples}"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([], dtype="float64").all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative {desc} over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
{desc}.
Parameters
----------
axis : {{0 or 'index', 1 or 'columns'}}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
{name1} or {name2}
Return cumulative {desc} of {name1} or {name2}.
See Also
--------
core.window.Expanding.{accum_func_name} : Similar functionality
but ignores ``NaN`` values.
{name2}.{accum_func_name} : Return the {desc} over
{name2} axis.
{name2}.cummax : Return cumulative maximum over {name2} axis.
{name2}.cummin : Return cumulative minimum over {name2} axis.
{name2}.cumsum : Return cumulative sum over {name2} axis.
{name2}.cumprod : Return cumulative product over {name2} axis.
{examples}"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there is at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([], dtype="float64").any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([], dtype="float64").sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([], dtype="float64").sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([], dtype="float64").prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([], dtype="float64").prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
"""
def _align_as_utc(
left: FrameOrSeries, right: FrameOrSeries, join_index: Index | None
) -> tuple[FrameOrSeries, FrameOrSeries]:
"""
If we are aligning timezone-aware DatetimeIndexes and the timezones
do not match, convert both to UTC.
"""
if is_datetime64tz_dtype(left.index.dtype):
if left.index.tz != right.index.tz:
if join_index is not None:
# GH#33671 ensure we don't change the index on
# our original Series (NB: by default deep=False)
left = left.copy()
right = right.copy()
left.index = join_index
right.index = join_index
return left, right
|
{
"content_hash": "1b8a0b763bcbf92e715097b7492cd492",
"timestamp": "",
"source": "github",
"line_count": 11642,
"max_line_length": 104,
"avg_line_length": 33.289125579797286,
"alnum_prop": 0.5206398109156964,
"repo_name": "gfyoung/pandas",
"id": "5f57c353391c2601c2476e6a7b8d7693d0eb9b28",
"size": "387628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/generic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
}
|
from api import *
import os
@cmd_func('ls', ['dir'])
def list_dir(interpreter, line):
if not line:
res = os.listdir('.')
else:
dir = os.path.dirname(line)
if not dir:
res = os.listdir('.')
inp = line
else:
res = os.listdir(dir)
inp = line[len(dir) + 1:]
res = filter(lambda d: d.startswith(inp), res)
res = [p + os.path.sep if os.path.isdir(p) else p for p in res]
interpreter.poutput('\n'.join(res))
@cmd_helper('ls', ['dir'])
def help_list_dir(interpreter):
interpreter.poutput('\n'.join([
'ls\t\t print current directory contents',
'ls [path]\t print directories and files matching [path]'
]))
@cmd_completer('ls', ['dir'])
def complete_list_dir(interpreter, text, line, begidx, endidx):
path = line[3:]
if not path:
res = next(os.walk('.'))[1]
res = [p + os.path.sep if os.path.isdir(p) else p for p in res]
else:
dir = os.path.dirname(path)
if not dir:
res = next(os.walk('.'))[1]
else:
res = next(os.walk(dir))[1]
res = filter(lambda d: d.startswith(text), res)
res = [p + os.path.sep if os.path.isdir(os.path.join(dir, p)) else p for p in res]
return res
|
{
"content_hash": "e5c51c9808bf596369ed1b15a2a06e66",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 90,
"avg_line_length": 29.363636363636363,
"alnum_prop": 0.544891640866873,
"repo_name": "kinpa200296/cmdpy",
"id": "6dce95e442071244c677b107fb147f01e448319f",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/navigation/list_dir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11706"
}
],
"symlink_target": ""
}
|
import copy
import os
import shutil
import tempfile
import mock
from oslo_utils import fileutils
from oslo_utils import timeutils
from cinder import test
from cinder.volume import configuration as conf
class TargetDriverFixture(test.TestCase):
def setUp(self):
super(TargetDriverFixture, self).setUp()
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get)
self.configuration.iscsi_ip_address = '10.9.8.7'
self.configuration.iscsi_port = 3260
self.fake_volumes_dir = tempfile.mkdtemp()
fileutils.ensure_tree(self.fake_volumes_dir)
self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
self.fake_project_id_2 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
self.fake_volume_id = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba'
self.addCleanup(self._cleanup)
self.testvol =\
{'project_id': self.fake_project_id,
'name': 'testvol',
'size': 1,
'id': self.fake_volume_id,
'volume_type_id': None,
'provider_location': '10.10.7.1:3260 '
'iqn.2010-10.org.openstack:'
'volume-%s 0' % self.fake_volume_id,
'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
'c76370d66b 2FE0CQ8J196R',
'provider_geometry': '512 512',
'created_at': timeutils.utcnow(),
'host': 'fake_host@lvm#lvm'}
self.testvol_no_prov_loc = copy.copy(self.testvol)
self.testvol_no_prov_loc['provider_location'] = None
self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:'
self.target_string = ('127.0.0.1:3260,1 ' +
self.iscsi_target_prefix +
'volume-%s' % self.testvol['id'])
self.testvol_2 =\
{'project_id': self.fake_project_id_2,
'name': 'testvol2',
'size': 1,
'id': self.fake_volume_id,
'volume_type_id': None,
'provider_location': ('%(ip)s:%(port)d%(iqn)svolume-%(vol)s 2' %
{'ip': self.configuration.iscsi_ip_address,
'port': self.configuration.iscsi_port,
'iqn': self.iscsi_target_prefix,
'vol': self.fake_volume_id}),
'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
'c76370d66b 2FE0CQ8J196R',
'provider_geometry': '512 512',
'created_at': timeutils.utcnow(),
'host': 'fake_host@lvm#lvm'}
self.expected_iscsi_properties = \
{'auth_method': 'CHAP',
'auth_password': '2FE0CQ8J196R',
'auth_username': 'stack-1-a60e2611875f40199931f2c76370d66b',
'encrypted': False,
'logical_block_size': '512',
'physical_block_size': '512',
'target_discovered': False,
'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' %
self.fake_volume_id,
'target_lun': 0,
'target_portal': '10.10.7.1:3260',
'volume_id': self.fake_volume_id}
self.VOLUME_ID = '83c2e877-feed-46be-8435-77884fe55b45'
self.VOLUME_NAME = 'volume-' + self.VOLUME_ID
self.test_vol = (self.iscsi_target_prefix +
self.VOLUME_NAME)
def _cleanup(self):
if os.path.exists(self.fake_volumes_dir):
shutil.rmtree(self.fake_volumes_dir)
def fake_safe_get(self, value):
if value == 'volumes_dir':
return self.fake_volumes_dir
elif value == 'iscsi_protocol':
return self.configuration.iscsi_protocol
elif value == 'iscsi_target_prefix':
return self.iscsi_target_prefix
|
{
"content_hash": "c0023d8eb63bd23c0f48be01ec7d9032",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 79,
"avg_line_length": 40.277227722772274,
"alnum_prop": 0.5464601769911505,
"repo_name": "eharney/cinder",
"id": "1d1bf20669920d7331181581d42d2a633adf76bb",
"size": "4641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/targets/targets_fixture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "19839107"
},
{
"name": "Shell",
"bytes": "6453"
}
],
"symlink_target": ""
}
|
"""A simple JavaScript 1.5 lexer which is used for the JavaScript
extractor.
"""
import re
from operator import itemgetter
operators = [
'+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
'+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
'>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
'[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':'
]
operators.sort(lambda a, b: cmp(-len(a), -len(b)))
escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
rules = [
(None, re.compile(r'\s+(?u)')),
(None, re.compile(r'<!--.*')),
('linecomment', re.compile(r'//.*')),
('multilinecomment', re.compile(r'/\*.*?\*/(?us)')),
('name', re.compile(r'(\$+\w*|[^\W\d]\w*)(?u)')),
('number', re.compile(r'''(?x)(
(?:0|[1-9]\d*)
(\.\d+)?
([eE][-+]?\d+)? |
(0x[a-fA-F0-9]+)
)''')),
('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),
('string', re.compile(r'''(?xs)(
'(?:[^'\\]*(?:\\.[^'\\]*)*)' |
"(?:[^"\\]*(?:\\.[^"\\]*)*)"
)'''))
]
division_re = re.compile(r'/=?')
regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*(?s)')
line_re = re.compile(r'(\r\n|\n|\r)')
line_join_re = re.compile(r'\\' + line_re.pattern)
uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
class Token(tuple):
"""Represents a token as returned by `tokenize`."""
__slots__ = ()
def __new__(cls, type, value, lineno):
return tuple.__new__(cls, (type, value, lineno))
type = property(itemgetter(0))
value = property(itemgetter(1))
lineno = property(itemgetter(2))
def indicates_division(token):
"""A helper function that helps the tokenizer to decide if the current
token may be followed by a division operator.
"""
if token.type == 'operator':
return token.value in (')', ']', '}', '++', '--')
return token.type in ('name', 'number', 'string', 'regexp')
def unquote_string(string):
"""Unquote a string with JavaScript rules. The string has to start with
string delimiters (``'`` or ``"``.)
:return: a string
"""
assert string and string[0] == string[-1] and string[0] in '"\'', \
'string provided is not properly delimited'
string = line_join_re.sub('\\1', string[1:-1])
result = []
add = result.append
pos = 0
while 1:
# scan for the next escape
escape_pos = string.find('\\', pos)
if escape_pos < 0:
break
add(string[pos:escape_pos])
# check which character is escaped
next_char = string[escape_pos + 1]
if next_char in escapes:
add(escapes[next_char])
# unicode escapes. trie to consume up to four characters of
# hexadecimal characters and try to interpret them as unicode
# character point. If there is no such character point, put
# all the consumed characters into the string.
elif next_char in 'uU':
escaped = uni_escape_re.match(string, escape_pos + 2)
if escaped is not None:
escaped_value = escaped.group()
if len(escaped_value) == 4:
try:
add(unichr(int(escaped_value, 16)))
except ValueError:
pass
else:
pos = escape_pos + 6
continue
add(next_char + escaped_value)
pos = escaped.end()
continue
else:
add(next_char)
# bogus escape. Just remove the backslash.
else:
add(next_char)
pos = escape_pos + 2
if pos < len(string):
add(string[pos:])
return u''.join(result)
def tokenize(source):
"""Tokenize a JavaScript source.
:return: generator of `Token`\s
"""
may_divide = False
pos = 0
lineno = 1
end = len(source)
while pos < end:
# handle regular rules first
for token_type, rule in rules:
match = rule.match(source, pos)
if match is not None:
break
# if we don't have a match we don't give up yet, but check for
# division operators or regular expression literals, based on
# the status of `may_divide` which is determined by the last
# processed non-whitespace token using `indicates_division`.
else:
if may_divide:
match = division_re.match(source, pos)
token_type = 'operator'
else:
match = regex_re.match(source, pos)
token_type = 'regexp'
if match is None:
# woops. invalid syntax. jump one char ahead and try again.
pos += 1
continue
token_value = match.group()
if token_type is not None:
token = Token(token_type, token_value, lineno)
may_divide = indicates_division(token)
yield token
lineno += len(line_re.findall(token_value))
pos = match.end()
|
{
"content_hash": "6df17008c893d3ae9ca7bd67ff655652",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 76,
"avg_line_length": 31.753086419753085,
"alnum_prop": 0.49144634525660963,
"repo_name": "toomoresuch/pysonengine",
"id": "d063ef09c2f64f2841aca43f9144842121d41956",
"size": "5634",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "eggs/Babel-0.9.5-py2.6.egg/babel/messages/jslexer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "401941"
},
{
"name": "JavaScript",
"bytes": "628757"
},
{
"name": "Python",
"bytes": "12919662"
},
{
"name": "Shell",
"bytes": "416"
},
{
"name": "VimL",
"bytes": "4587"
}
],
"symlink_target": ""
}
|
"""
Nordigen Account Information Services API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0 (v2)
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SpectacularJWTRefresh(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access': 'str',
'access_expires': 'int'
}
attribute_map = {
'access': 'access',
'access_expires': 'access_expires'
}
def __init__(self, access=None, access_expires=86400): # noqa: E501
"""SpectacularJWTRefresh - a model defined in Swagger""" # noqa: E501
self._access = None
self._access_expires = None
self.discriminator = None
self.access = access
self.access_expires = access_expires
@property
def access(self):
"""Gets the access of this SpectacularJWTRefresh. # noqa: E501
Your access token # noqa: E501
:return: The access of this SpectacularJWTRefresh. # noqa: E501
:rtype: str
"""
return self._access
@access.setter
def access(self, access):
"""Sets the access of this SpectacularJWTRefresh.
Your access token # noqa: E501
:param access: The access of this SpectacularJWTRefresh. # noqa: E501
:type: str
"""
if access is None:
raise ValueError("Invalid value for `access`, must not be `None`") # noqa: E501
self._access = access
@property
def access_expires(self):
"""Gets the access_expires of this SpectacularJWTRefresh. # noqa: E501
Access token expires in seconds # noqa: E501
:return: The access_expires of this SpectacularJWTRefresh. # noqa: E501
:rtype: int
"""
return self._access_expires
@access_expires.setter
def access_expires(self, access_expires):
"""Sets the access_expires of this SpectacularJWTRefresh.
Access token expires in seconds # noqa: E501
:param access_expires: The access_expires of this SpectacularJWTRefresh. # noqa: E501
:type: int
"""
if access_expires is None:
raise ValueError("Invalid value for `access_expires`, must not be `None`") # noqa: E501
self._access_expires = access_expires
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SpectacularJWTRefresh, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SpectacularJWTRefresh):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "4b3c36a7ee8b874085d2c05cb98305b3",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 119,
"avg_line_length": 30.885714285714286,
"alnum_prop": 0.5723866790009251,
"repo_name": "ltowarek/budget-supervisor",
"id": "ec69fbdd76407b1dce7c8cc41e6f9362f7ecd22b",
"size": "4341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/nordigen/nordigen/models/spectacular_jwt_refresh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7960"
},
{
"name": "JavaScript",
"bytes": "79489"
}
],
"symlink_target": ""
}
|
"""Remove assistance/vc columns from rb
Revision ID: 7024f7f66e20
Revises: a83e77e11e36
Create Date: 2019-04-25 15:07:20.614620
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '7024f7f66e20'
down_revision = 'a83e77e11e36'
branch_labels = None
depends_on = None
def upgrade():
op.drop_column('reservations', 'needs_vc_assistance', schema='roombooking')
op.drop_column('reservations', 'uses_vc', schema='roombooking')
op.drop_column('reservations', 'needs_assistance', schema='roombooking')
op.drop_column('rooms', 'notification_for_assistance', schema='roombooking')
def downgrade():
op.add_column('rooms',
sa.Column('notification_for_assistance', sa.Boolean(), nullable=False, server_default='false'),
schema='roombooking')
op.add_column('reservations',
sa.Column('needs_assistance', sa.Boolean(), nullable=False, server_default='false'),
schema='roombooking')
op.add_column('reservations',
sa.Column('uses_vc', sa.Boolean(), nullable=False, server_default='false'),
schema='roombooking')
op.add_column('reservations',
sa.Column('needs_vc_assistance', sa.Boolean(), nullable=False, server_default='false'),
schema='roombooking')
op.alter_column('rooms', 'notification_for_assistance', server_default=None, schema='roombooking')
op.alter_column('reservations', 'needs_assistance', server_default=None, schema='roombooking')
op.alter_column('reservations', 'uses_vc', server_default=None, schema='roombooking')
op.alter_column('reservations', 'needs_vc_assistance', server_default=None, schema='roombooking')
|
{
"content_hash": "6a780745d6bf67394bff94052b4dd417",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 113,
"avg_line_length": 41.95238095238095,
"alnum_prop": 0.674233825198638,
"repo_name": "DirkHoffmann/indico",
"id": "ff89c5244fc3b8daf28a400cbde58802437c7c77",
"size": "1762",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "indico/migrations/versions/20190425_1507_7024f7f66e20_remove_assistance_vc_columns_from_rb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
"""
Cell messaging module.
This module defines the different message types that are passed between
cells and the methods that they can call when the target cell has been
reached.
The interface into this module is the MessageRunner class.
"""
import sys
from eventlet import queue
from oslo.config import cfg
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import compute
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.db import base
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import utils
cell_messaging_opts = [
cfg.IntOpt('max_hop_count',
default=10,
help='Maximum number of hops for cells routing.'),
cfg.StrOpt('scheduler',
default='nova.cells.scheduler.CellsScheduler',
help='Cells scheduler to use')]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_messaging_opts, group='cells')
LOG = logging.getLogger(__name__)
# Separator used between cell names for the 'full cell name' and routing
# path.
_PATH_CELL_SEP = cells_utils._PATH_CELL_SEP
def _reverse_path(path):
"""Reverse a path. Used for sending responses upstream."""
path_parts = path.split(_PATH_CELL_SEP)
path_parts.reverse()
return _PATH_CELL_SEP.join(path_parts)
def _response_cell_name_from_path(routing_path, neighbor_only=False):
"""Reverse the routing_path. If we only want to send to our parent,
set neighbor_only to True.
"""
path = _reverse_path(routing_path)
if not neighbor_only or len(path) == 1:
return path
return _PATH_CELL_SEP.join(path.split(_PATH_CELL_SEP)[:2])
#
# Message classes.
#
class _BaseMessage(object):
"""Base message class. It defines data that is passed with every
single message through every cell.
Messages are JSON-ified before sending and turned back into a
class instance when being received.
Every message has a unique ID. This is used to route responses
back to callers. In the future, this might be used to detect
receiving the same message more than once.
routing_path is updated on every hop through a cell. The current
cell name is appended to it (cells are separated by
_PATH_CELL_SEP ('!')). This is used to tell if we've reached the
target cell and also to determine the source of a message for
responses by reversing it.
hop_count is incremented and compared against max_hop_count. The
only current usefulness of this is to break out of a routing loop
if someone has a broken config.
fanout means to send to all nova-cells services running in a cell.
This is useful for capacity and capability broadcasting as well
as making sure responses get back to the nova-cells service that
is waiting.
"""
# Override message_type in a subclass
message_type = None
base_attrs_to_json = ['message_type',
'ctxt',
'method_name',
'method_kwargs',
'direction',
'need_response',
'fanout',
'uuid',
'routing_path',
'hop_count',
'max_hop_count']
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, need_response=False, fanout=False, uuid=None,
routing_path=None, hop_count=0, max_hop_count=None,
**kwargs):
self.ctxt = ctxt
self.resp_queue = None
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
# Copy these.
self.base_attrs_to_json = self.base_attrs_to_json[:]
# Normally this would just be CONF.cells.name, but going through
# the msg_runner allows us to stub it more easily.
self.our_path_part = self.msg_runner.our_name
self.uuid = uuid
if self.uuid is None:
self.uuid = uuidutils.generate_uuid()
self.method_name = method_name
self.method_kwargs = method_kwargs
self.direction = direction
self.need_response = need_response
self.fanout = fanout
self.routing_path = routing_path
self.hop_count = hop_count
if max_hop_count is None:
max_hop_count = CONF.cells.max_hop_count
self.max_hop_count = max_hop_count
self.is_broadcast = False
self._append_hop()
# Each sub-class should set this when the message is inited
self.next_hops = []
self.resp_queue = None
def __repr__(self):
_dict = self._to_dict()
_dict.pop('method_kwargs')
return "<%s: %s>" % (self.__class__.__name__, _dict)
def _append_hop(self):
"""Add our hop to the routing_path."""
routing_path = (self.routing_path and
self.routing_path + _PATH_CELL_SEP or '')
self.routing_path = routing_path + self.our_path_part
self.hop_count += 1
def _at_max_hop_count(self, do_raise=True):
"""Check if we're at the max hop count. If we are and do_raise is
True, raise CellMaxHopCountReached. If we are at the max and
do_raise is False... return True, else False.
"""
if self.hop_count >= self.max_hop_count:
if do_raise:
raise exception.CellMaxHopCountReached(
hop_count=self.hop_count)
return True
return False
def _process_locally(self):
"""Its been determined that we should process this message in this
cell. Go through the MessageRunner to call the appropriate
method for this message. Catch the response and/or exception and
encode it within a Response instance. Return it so the caller
can potentially return it to another cell... or return it to
a caller waiting in this cell.
"""
try:
resp_value = self.msg_runner._process_message_locally(self)
failure = False
except Exception as exc:
resp_value = sys.exc_info()
failure = True
LOG.exception(_("Error processing message locally: %(exc)s"),
{'exc': exc})
return Response(self.routing_path, resp_value, failure)
def _setup_response_queue(self):
"""Shortcut to creating a response queue in the MessageRunner."""
self.resp_queue = self.msg_runner._setup_response_queue(self)
def _cleanup_response_queue(self):
"""Shortcut to deleting a response queue in the MessageRunner."""
if self.resp_queue:
self.msg_runner._cleanup_response_queue(self)
self.resp_queue = None
def _wait_for_json_responses(self, num_responses=1):
"""Wait for response(s) to be put into the eventlet queue. Since
each queue entry actually contains a list of JSON-ified responses,
combine them all into a single list to return.
Destroy the eventlet queue when done.
"""
if not self.resp_queue:
# Source is not actually expecting a response
return
responses = []
wait_time = CONF.cells.call_timeout
try:
for x in xrange(num_responses):
json_responses = self.resp_queue.get(timeout=wait_time)
responses.extend(json_responses)
except queue.Empty:
raise exception.CellTimeout()
finally:
self._cleanup_response_queue()
return responses
def _send_json_responses(self, json_responses, neighbor_only=False,
fanout=False):
"""Send list of responses to this message. Responses passed here
are JSON-ified. Targeted messages have a single response while
Broadcast messages may have multiple responses.
If this cell was the source of the message, these responses will
be returned from self.process().
Otherwise, we will route the response to the source of the
request. If 'neighbor_only' is True, the response will be sent
to the neighbor cell, not the original requester. Broadcast
messages get aggregated at each hop, so neighbor_only will be
True for those messages.
"""
if not self.need_response:
return
if self.source_is_us():
responses = []
for json_response in json_responses:
responses.append(Response.from_json(json_response))
return responses
direction = self.direction == 'up' and 'down' or 'up'
response_kwargs = {'orig_message': self.to_json(),
'responses': json_responses}
target_cell = _response_cell_name_from_path(self.routing_path,
neighbor_only=neighbor_only)
response = self.msg_runner._create_response_message(self.ctxt,
direction, target_cell, self.uuid, response_kwargs,
fanout=fanout)
response.process()
def _send_response(self, response, neighbor_only=False):
"""Send a response to this message. If the source of the
request was ourselves, just return the response. It'll be
passed back to the caller of self.process(). See DocString for
_send_json_responses() as it handles most of the real work for
this method.
'response' is an instance of Response class.
"""
if not self.need_response:
return
if self.source_is_us():
return response
self._send_json_responses([response.to_json()],
neighbor_only=neighbor_only)
def _send_response_from_exception(self, exc_info):
"""Take an exception as returned from sys.exc_info(), encode
it in a Response, and send it.
"""
response = Response(self.routing_path, exc_info, True)
return self._send_response(response)
def _to_dict(self):
"""Convert a message to a dictionary. Only used internally."""
_dict = {}
for key in self.base_attrs_to_json:
_dict[key] = getattr(self, key)
return _dict
def to_json(self):
"""Convert a message into JSON for sending to a sibling cell."""
_dict = self._to_dict()
# Convert context to dict.
_dict['ctxt'] = _dict['ctxt'].to_dict()
return jsonutils.dumps(_dict)
def source_is_us(self):
"""Did this cell create this message?"""
return self.routing_path == self.our_path_part
def process(self):
"""Process a message. Deal with it locally and/or forward it to a
sibling cell.
Override in a subclass.
"""
raise NotImplementedError()
class _TargetedMessage(_BaseMessage):
"""A targeted message is a message that is destined for a specific
single cell.
'target_cell' can be a full cell name like 'api!child-cell' or it can
be an instance of the CellState class if the target is a neighbor cell.
"""
message_type = 'targeted'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, **kwargs):
super(_TargetedMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
if isinstance(target_cell, cells_state.CellState):
# Neighbor cell or ourselves. Convert it to a 'full path'.
if target_cell.is_me:
target_cell = self.our_path_part
else:
target_cell = '%s%s%s' % (self.our_path_part,
_PATH_CELL_SEP,
target_cell.name)
self.target_cell = target_cell
self.base_attrs_to_json.append('target_cell')
def _get_next_hop(self):
"""Return the cell name for the next hop. If the next hop is
the current cell, return None.
"""
if self.target_cell == self.routing_path:
return self.state_manager.my_cell_state
target_cell = self.target_cell
routing_path = self.routing_path
current_hops = routing_path.count(_PATH_CELL_SEP)
next_hop_num = current_hops + 1
dest_hops = target_cell.count(_PATH_CELL_SEP)
if dest_hops < current_hops:
reason_args = {'target_cell': target_cell,
'routing_path': routing_path}
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
dest_name_parts = target_cell.split(_PATH_CELL_SEP)
if (_PATH_CELL_SEP.join(dest_name_parts[:next_hop_num]) !=
routing_path):
reason_args = {'target_cell': target_cell,
'routing_path': routing_path}
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
next_hop_name = dest_name_parts[next_hop_num]
if self.direction == 'up':
next_hop = self.state_manager.get_parent_cell(next_hop_name)
else:
next_hop = self.state_manager.get_child_cell(next_hop_name)
if not next_hop:
cell_type = 'parent' if self.direction == 'up' else 'child'
reason_args = {'cell_type': cell_type,
'target_cell': target_cell}
reason = _("Unknown %(cell_type)s when routing to "
"%(target_cell)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
return next_hop
def process(self):
"""Process a targeted message. This is called for all cells
that touch this message. If the local cell is the one that
created this message, we reply directly with a Response instance.
If the local cell is not the target, an eventlet queue is created
and we wait for the response to show up via another thread
receiving the Response back.
Responses to targeted messages are routed directly back to the
source. No eventlet queues are created in intermediate hops.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller.
"""
try:
next_hop = self._get_next_hop()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_("Error locating next hop for message: %(exc)s"),
{'exc': exc})
return self._send_response_from_exception(exc_info)
if next_hop.is_me:
# Final destination.
response = self._process_locally()
return self._send_response(response)
# Need to forward via neighbor cell.
if self.need_response and self.source_is_us():
# A response is needed and the source of the message is
# this cell. Create the eventlet queue.
self._setup_response_queue()
wait_for_response = True
else:
wait_for_response = False
try:
# This is inside the try block, so we can encode the
# exception and return it to the caller.
if self.hop_count >= self.max_hop_count:
raise exception.CellMaxHopCountReached(
hop_count=self.hop_count)
next_hop.send_message(self)
except Exception as exc:
exc_info = sys.exc_info()
err_str = _("Failed to send message to cell: %(next_hop)s: "
"%(exc)s")
LOG.exception(err_str, {'exc': exc, 'next_hop': next_hop})
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if wait_for_response:
# Targeted messages only have 1 response.
remote_response = self._wait_for_json_responses()[0]
return Response.from_json(remote_response)
class _BroadcastMessage(_BaseMessage):
"""A broadcast message. This means to call a method in every single
cell going in a certain direction.
"""
message_type = 'broadcast'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, run_locally=True, **kwargs):
super(_BroadcastMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
# The local cell creating this message has the option
# to be able to process the message locally or not.
self.run_locally = run_locally
self.is_broadcast = True
def _get_next_hops(self):
"""Set the next hops and return the number of hops. The next
hops may include ourself.
"""
if self.hop_count >= self.max_hop_count:
return []
if self.direction == 'down':
return self.state_manager.get_child_cells()
else:
return self.state_manager.get_parent_cells()
def _send_to_cells(self, target_cells):
"""Send a message to multiple cells."""
for cell in target_cells:
cell.send_message(self)
def _send_json_responses(self, json_responses):
"""Responses to broadcast messages always need to go to the
neighbor cell from which we received this message. That
cell aggregates the responses and makes sure to forward them
to the correct source.
"""
return super(_BroadcastMessage, self)._send_json_responses(
json_responses, neighbor_only=True, fanout=True)
def process(self):
"""Process a broadcast message. This is called for all cells
that touch this message.
The message is sent to all cells in the certain direction and
the creator of this message has the option of whether or not
to process it locally as well.
If responses from all cells are required, each hop creates an
eventlet queue and waits for responses from its immediate
neighbor cells. All responses are then aggregated into a
single list and are returned to the neighbor cell until the
source is reached.
When the source is reached, a list of Response instances are
returned to the caller.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller. It is possible to get a mix of
successful responses and failure responses. The caller is
responsible for dealing with this.
"""
try:
next_hops = self._get_next_hops()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_("Error locating next hops for message: %(exc)s"),
{'exc': exc})
return self._send_response_from_exception(exc_info)
# Short circuit if we don't need to respond
if not self.need_response:
if self.run_locally:
self._process_locally()
self._send_to_cells(next_hops)
return
# We'll need to aggregate all of the responses (from ourself
# and our sibling cells) into 1 response
try:
self._setup_response_queue()
self._send_to_cells(next_hops)
except Exception as exc:
# Error just trying to send to cells. Send a single response
# with the failure.
exc_info = sys.exc_info()
LOG.exception(_("Error sending message to next hops: %(exc)s"),
{'exc': exc})
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if self.run_locally:
# Run locally and store the Response.
local_response = self._process_locally()
else:
local_response = None
try:
remote_responses = self._wait_for_json_responses(
num_responses=len(next_hops))
except Exception as exc:
# Error waiting for responses, most likely a timeout.
# Send a single response back with the failure.
exc_info = sys.exc_info()
err_str = _("Error waiting for responses from neighbor cells: "
"%(exc)s")
LOG.exception(err_str, {'exc': exc})
return self._send_response_from_exception(exc_info)
if local_response:
remote_responses.append(local_response.to_json())
return self._send_json_responses(remote_responses)
class _ResponseMessage(_TargetedMessage):
"""A response message is really just a special targeted message,
saying to call 'parse_responses' when we reach the source of a 'call'.
The 'fanout' attribute on this message may be true if we're responding
to a broadcast or if we're about to respond to the source of an
original target message. Because multiple nova-cells services may
be running within a cell, we need to make sure the response gets
back to the correct one, so we have to fanout.
"""
message_type = 'response'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, response_uuid, **kwargs):
super(_ResponseMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, target_cell, **kwargs)
self.response_uuid = response_uuid
self.base_attrs_to_json.append('response_uuid')
def process(self):
"""Process a response. If the target is the local cell, process
the response here. Otherwise, forward it to where it needs to
go.
"""
next_hop = self._get_next_hop()
if next_hop.is_me:
self._process_locally()
return
if self.fanout is False:
# Really there's 1 more hop on each of these below, but
# it doesn't matter for this logic.
target_hops = self.target_cell.count(_PATH_CELL_SEP)
current_hops = self.routing_path.count(_PATH_CELL_SEP)
if current_hops + 1 == target_hops:
# Next hop is the target.. so we must fanout. See
# DocString above.
self.fanout = True
next_hop.send_message(self)
#
# Methods that may be called when processing messages after reaching
# a target cell.
#
class _BaseMessageMethods(base.Base):
"""Base class for defining methods by message types."""
def __init__(self, msg_runner):
super(_BaseMessageMethods, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.host_api = compute.HostAPI()
def task_log_get_all(self, message, task_name, period_beginning,
period_ending, host, state):
"""Get task logs from the DB. The message could have
directly targeted this cell, or it could have been a broadcast
message.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
"""
task_logs = self.db.task_log_get_all(message.ctxt, task_name,
period_beginning,
period_ending,
host=host,
state=state)
return jsonutils.to_primitive(task_logs)
class _ResponseMessageMethods(_BaseMessageMethods):
"""Methods that are called from a ResponseMessage. There's only
1 method (parse_responses) and it is called when the message reaches
the source of a 'call'. All we do is stuff the response into the
eventlet queue to signal the caller that's waiting.
"""
def parse_responses(self, message, orig_message, responses):
self.msg_runner._put_response(message.response_uuid,
responses)
class _TargetedMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called when routing a message
to a specific cell.
"""
def __init__(self, *args, **kwargs):
super(_TargetedMessageMethods, self).__init__(*args, **kwargs)
def schedule_run_instance(self, message, host_sched_kwargs):
"""Parent cell told us to schedule new instance creation."""
self.msg_runner.scheduler.run_instance(message, host_sched_kwargs)
def build_instances(self, message, build_inst_kwargs):
"""Parent cell told us to schedule new instance creation."""
self.msg_runner.scheduler.build_instances(message, build_inst_kwargs)
def run_compute_api_method(self, message, method_info):
"""Run a method in the compute api class."""
method = method_info['method']
fn = getattr(self.compute_api, method, None)
if not fn:
detail = _("Unknown method '%(method)s' in compute API")
raise exception.CellServiceAPIMethodNotFound(
detail=detail % {'method': method})
args = list(method_info['method_args'])
# 1st arg is instance_uuid that we need to turn into the
# instance object.
instance_uuid = args[0]
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
args[0] = instance
return fn(message.ctxt, *args, **method_info['method_kwargs'])
def update_capabilities(self, message, cell_name, capabilities):
"""A child cell told us about their capabilities."""
LOG.debug(_("Received capabilities from child cell "
"%(cell_name)s: %(capabilities)s"),
{'cell_name': cell_name, 'capabilities': capabilities})
self.state_manager.update_cell_capabilities(cell_name,
capabilities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def update_capacities(self, message, cell_name, capacities):
"""A child cell told us about their capacity."""
LOG.debug(_("Received capacities from child cell "
"%(cell_name)s: %(capacities)s"),
{'cell_name': cell_name, 'capacities': capacities})
self.state_manager.update_cell_capacities(cell_name,
capacities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def announce_capabilities(self, message):
"""A parent cell has told us to send our capabilities, so let's
do so.
"""
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def announce_capacities(self, message):
"""A parent cell has told us to send our capacity, so let's
do so.
"""
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def service_get_by_compute_host(self, message, host_name):
"""Return the service entry for a compute host."""
service = self.db.service_get_by_compute_host(message.ctxt,
host_name)
return jsonutils.to_primitive(service)
def service_update(self, message, host_name, binary, params_to_update):
"""
Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
return jsonutils.to_primitive(
self.host_api.service_update(message.ctxt, host_name, binary,
params_to_update))
def proxy_rpc_to_manager(self, message, host_name, rpc_message,
topic, timeout):
"""Proxy RPC to the given compute topic."""
# Check that the host exists.
self.db.service_get_by_compute_host(message.ctxt, host_name)
if message.need_response:
return rpc.call(message.ctxt, topic, rpc_message,
timeout=timeout)
rpc.cast(message.ctxt, topic, rpc_message)
def compute_node_get(self, message, compute_id):
"""Get compute node by ID."""
compute_node = self.db.compute_node_get(message.ctxt,
compute_id)
return jsonutils.to_primitive(compute_node)
def actions_get(self, message, instance_uuid):
actions = self.db.actions_get(message.ctxt, instance_uuid)
return jsonutils.to_primitive(actions)
def action_get_by_request_id(self, message, instance_uuid, request_id):
action = self.db.action_get_by_request_id(message.ctxt, instance_uuid,
request_id)
return jsonutils.to_primitive(action)
def action_events_get(self, message, action_id):
action_events = self.db.action_events_get(message.ctxt, action_id)
return jsonutils.to_primitive(action_events)
def validate_console_port(self, message, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
# 1st arg is instance_uuid that we need to turn into the
# instance object.
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
return self.compute_rpcapi.validate_console_port(message.ctxt,
instance, console_port, console_type)
class _BroadcastMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called as a part of a broadcast
message.
"""
def _at_the_top(self):
"""Are we the API level?"""
return not self.state_manager.get_parent_cells()
def instance_update_at_top(self, message, instance, **kwargs):
"""Update an instance in the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
# Remove things that we can't update in the top level cells.
# 'metadata' is only updated in the API cell, so don't overwrite
# it based on what child cells say. Make sure to update
# 'cell_name' based on the routing path.
items_to_remove = ['id', 'security_groups', 'volumes', 'cell_name',
'name', 'metadata']
for key in items_to_remove:
instance.pop(key, None)
instance['cell_name'] = _reverse_path(message.routing_path)
# Fixup info_cache. We'll have to update this separately if
# it exists.
info_cache = instance.pop('info_cache', None)
if info_cache is not None:
info_cache.pop('id', None)
info_cache.pop('instance', None)
if 'system_metadata' in instance:
# Make sure we have the dict form that we need for
# instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
LOG.debug(_("Got update for instance: %(instance)s"),
{'instance': instance}, instance_uuid=instance_uuid)
# To attempt to address out-of-order messages, do some sanity
# checking on the VM state.
expected_vm_state_map = {
# For updates containing 'vm_state' of 'building',
# only allow them to occur if the DB already says
# 'building' or if the vm_state is None. None
# really shouldn't be possible as instances always
# start out in 'building' anyway.. but just in case.
vm_states.BUILDING: [vm_states.BUILDING, None]}
expected_vm_states = expected_vm_state_map.get(
instance.get('vm_state'))
if expected_vm_states:
instance['expected_vm_state'] = expected_vm_states
# It's possible due to some weird condition that the instance
# was already set as deleted... so we'll attempt to update
# it with permissions that allows us to read deleted.
with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
try:
self.db.instance_update(message.ctxt, instance_uuid,
instance, update_cells=False)
except exception.NotFound:
# FIXME(comstud): Strange. Need to handle quotas here,
# if we actually want this code to remain..
self.db.instance_create(message.ctxt, instance)
if info_cache:
try:
self.db.instance_info_cache_update(
message.ctxt, instance_uuid, info_cache)
except exception.InstanceInfoCacheNotFound:
# Can happen if we try to update a deleted instance's
# network information.
pass
def instance_destroy_at_top(self, message, instance, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
LOG.debug(_("Got update to delete instance"),
instance_uuid=instance_uuid)
try:
self.db.instance_destroy(message.ctxt, instance_uuid,
update_cells=False)
except exception.InstanceNotFound:
pass
def instance_delete_everywhere(self, message, instance, delete_type,
**kwargs):
"""Call compute API delete() or soft_delete() in every cell.
This is used when the API cell doesn't know what cell an instance
belongs to but the instance was requested to be deleted or
soft-deleted. So, we'll run it everywhere.
"""
LOG.debug(_("Got broadcast to %(delete_type)s delete instance"),
{'delete_type': delete_type}, instance=instance)
if delete_type == 'soft':
self.compute_api.soft_delete(message.ctxt, instance)
else:
self.compute_api.delete(message.ctxt, instance)
def instance_fault_create_at_top(self, message, instance_fault, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
items_to_remove = ['id']
for key in items_to_remove:
instance_fault.pop(key, None)
log_str = _("Got message to create instance fault: "
"%(instance_fault)s")
LOG.debug(log_str, {'instance_fault': instance_fault})
self.db.instance_fault_create(message.ctxt, instance_fault)
def bw_usage_update_at_top(self, message, bw_update_info, **kwargs):
"""Update Bandwidth usage in the DB if we're a top level cell."""
if not self._at_the_top():
return
self.db.bw_usage_update(message.ctxt, **bw_update_info)
def _sync_instance(self, ctxt, instance):
if instance['deleted']:
self.msg_runner.instance_destroy_at_top(ctxt, instance)
else:
self.msg_runner.instance_update_at_top(ctxt, instance)
def sync_instances(self, message, project_id, updated_since, deleted,
**kwargs):
projid_str = project_id is None and "<all>" or project_id
since_str = updated_since is None and "<all>" or updated_since
LOG.info(_("Forcing a sync of instances, project_id="
"%(projid_str)s, updated_since=%(since_str)s"),
{'projid_str': projid_str, 'since_str': since_str})
if updated_since is not None:
updated_since = timeutils.parse_isotime(updated_since)
instances = cells_utils.get_instances_to_sync(message.ctxt,
updated_since=updated_since, project_id=project_id,
deleted=deleted)
for instance in instances:
self._sync_instance(message.ctxt, instance)
def service_get_all(self, message, filters):
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
services = self.db.service_get_all(message.ctxt, disabled=disabled)
ret_services = []
for service in services:
service = jsonutils.to_primitive(service)
for key, val in filters.iteritems():
if service[key] != val:
break
else:
ret_services.append(service)
return ret_services
def compute_node_get_all(self, message, hypervisor_match):
"""Return compute nodes in this cell."""
if hypervisor_match is not None:
nodes = self.db.compute_node_search_by_hypervisor(message.ctxt,
hypervisor_match)
else:
nodes = self.db.compute_node_get_all(message.ctxt)
return jsonutils.to_primitive(nodes)
def compute_node_stats(self, message):
"""Return compute node stats from this cell."""
return self.db.compute_node_statistics(message.ctxt)
def consoleauth_delete_tokens(self, message, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
if not self._at_the_top():
return
self.consoleauth_rpcapi.delete_tokens_for_instance(message.ctxt,
instance_uuid)
_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage,
'broadcast': _BroadcastMessage,
'response': _ResponseMessage}
_CELL_MESSAGE_TYPE_TO_METHODS_CLS = {'targeted': _TargetedMessageMethods,
'broadcast': _BroadcastMessageMethods,
'response': _ResponseMessageMethods}
#
# Below are the public interfaces into this module.
#
class MessageRunner(object):
"""This class is the main interface into creating messages and
processing them.
Public methods in this class are typically called by the CellsManager
to create a new message and process it with the exception of
'message_from_json' which should be used by CellsDrivers to convert
a JSONified message it has received back into the appropriate Message
class.
Private methods are used internally when we need to keep some
'global' state. For instance, eventlet queues used for responses are
held in this class. Also, when a Message is process()ed above and
it's determined we should take action locally,
_process_message_locally() will be called.
When needing to add a new method to call in a Cell2Cell message,
define the new method below and also add it to the appropriate
MessageMethods class where the real work will be done.
"""
def __init__(self, state_manager):
self.state_manager = state_manager
cells_scheduler_cls = importutils.import_class(
CONF.cells.scheduler)
self.scheduler = cells_scheduler_cls(self)
self.response_queues = {}
self.methods_by_type = {}
self.our_name = CONF.cells.name
for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.iteritems():
self.methods_by_type[msg_type] = cls(self)
def _process_message_locally(self, message):
"""Message processing will call this when its determined that
the message should be processed within this cell. Find the
method to call based on the message type, and call it. The
caller is responsible for catching exceptions and returning
results to cells, if needed.
"""
methods = self.methods_by_type[message.message_type]
fn = getattr(methods, message.method_name)
return fn(message, **message.method_kwargs)
def _put_response(self, response_uuid, response):
"""Put a response into a response queue. This is called when
a _ResponseMessage is processed in the cell that initiated a
'call' to another cell.
"""
resp_queue = self.response_queues.get(response_uuid)
if not resp_queue:
# Response queue is gone. We must have restarted or we
# received a response after our timeout period.
return
resp_queue.put(response)
def _setup_response_queue(self, message):
"""Set up an eventlet queue to use to wait for replies.
Replies come back from the target cell as a _ResponseMessage
being sent back to the source.
"""
resp_queue = queue.Queue()
self.response_queues[message.uuid] = resp_queue
return resp_queue
def _cleanup_response_queue(self, message):
"""Stop tracking the response queue either because we're
done receiving responses, or we've timed out.
"""
try:
del self.response_queues[message.uuid]
except KeyError:
# Ignore if queue is gone already somehow.
pass
def _create_response_message(self, ctxt, direction, target_cell,
response_uuid, response_kwargs, **kwargs):
"""Create a ResponseMessage. This is used internally within
the messaging module.
"""
return _ResponseMessage(self, ctxt, 'parse_responses',
response_kwargs, direction, target_cell,
response_uuid, **kwargs)
def message_from_json(self, json_message):
"""Turns a message in JSON format into an appropriate Message
instance. This is called when cells receive a message from
another cell.
"""
message_dict = jsonutils.loads(json_message)
message_type = message_dict.pop('message_type')
# Need to convert context back.
ctxt = message_dict['ctxt']
message_dict['ctxt'] = context.RequestContext.from_dict(ctxt)
message_cls = _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS[message_type]
return message_cls(self, **message_dict)
def ask_children_for_capabilities(self, ctxt):
"""Tell child cells to send us capabilities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt,
'announce_capabilities',
dict(), 'down', child_cell)
message.process()
def ask_children_for_capacities(self, ctxt):
"""Tell child cells to send us capacities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt, 'announce_capacities',
dict(), 'down', child_cell)
message.process()
def tell_parents_our_capabilities(self, ctxt):
"""Send our capabilities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capabs = self.state_manager.get_our_capabilities()
LOG.debug(_("Updating parents with our capabilities: %(capabs)s"),
{'capabs': capabs})
# We have to turn the sets into lists so they can potentially
# be json encoded when the raw message is sent.
for key, values in capabs.items():
capabs[key] = list(values)
method_kwargs = {'cell_name': my_cell_info.name,
'capabilities': capabs}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capabilities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def tell_parents_our_capacities(self, ctxt):
"""Send our capacities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capacities = self.state_manager.get_our_capacities()
LOG.debug(_("Updating parents with our capacities: %(capacities)s"),
{'capacities': capacities})
method_kwargs = {'cell_name': my_cell_info.name,
'capacities': capacities}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capacities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def schedule_run_instance(self, ctxt, target_cell, host_sched_kwargs):
"""Called by the scheduler to tell a child cell to schedule
a new instance for build.
"""
method_kwargs = dict(host_sched_kwargs=host_sched_kwargs)
message = _TargetedMessage(self, ctxt, 'schedule_run_instance',
method_kwargs, 'down', target_cell)
message.process()
def build_instances(self, ctxt, target_cell, build_inst_kwargs):
"""Called by the cell scheduler to tell a child cell to build
instance(s).
"""
method_kwargs = dict(build_inst_kwargs=build_inst_kwargs)
message = _TargetedMessage(self, ctxt, 'build_instances',
method_kwargs, 'down', target_cell)
message.process()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
message = _TargetedMessage(self, ctxt, 'run_compute_api_method',
dict(method_info=method_info), 'down',
cell_name, need_response=call)
return message.process()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_update_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_destroy_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
method_kwargs = dict(instance=instance, delete_type=delete_type)
message = _BroadcastMessage(self, ctxt,
'instance_delete_everywhere',
method_kwargs, 'down',
run_locally=False)
message.process()
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
message = _BroadcastMessage(self, ctxt,
'instance_fault_create_at_top',
dict(instance_fault=instance_fault),
'up', run_locally=False)
message.process()
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
message = _BroadcastMessage(self, ctxt, 'bw_usage_update_at_top',
dict(bw_update_info=bw_update_info),
'up', run_locally=False)
message.process()
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
method_kwargs = dict(project_id=project_id,
updated_since=updated_since,
deleted=deleted)
message = _BroadcastMessage(self, ctxt, 'sync_instances',
method_kwargs, 'down',
run_locally=False)
message.process()
def service_get_all(self, ctxt, filters=None):
method_kwargs = dict(filters=filters)
message = _BroadcastMessage(self, ctxt, 'service_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def service_get_by_compute_host(self, ctxt, cell_name, host_name):
method_kwargs = dict(host_name=host_name)
message = _TargetedMessage(self, ctxt,
'service_get_by_compute_host',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def service_update(self, ctxt, cell_name, host_name, binary,
params_to_update):
"""
Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the update service object
"""
method_kwargs = dict(host_name=host_name, binary=binary,
params_to_update=params_to_update)
message = _TargetedMessage(self, ctxt,
'service_update',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def proxy_rpc_to_manager(self, ctxt, cell_name, host_name, topic,
rpc_message, call, timeout):
method_kwargs = {'host_name': host_name,
'topic': topic,
'rpc_message': rpc_message,
'timeout': timeout}
message = _TargetedMessage(self, ctxt,
'proxy_rpc_to_manager',
method_kwargs, 'down', cell_name,
need_response=call)
return message.process()
def task_log_get_all(self, ctxt, cell_name, task_name,
period_beginning, period_ending,
host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'cell_name' is None or '', get responses from all cells.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
Return a list of Response objects.
"""
method_kwargs = dict(task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state)
if cell_name:
message = _TargetedMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
cell_name, need_response=True)
# Caller should get a list of Responses.
return [message.process()]
message = _BroadcastMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all child cells."""
method_kwargs = dict(hypervisor_match=hypervisor_match)
message = _BroadcastMessage(self, ctxt, 'compute_node_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_stats(self, ctxt):
"""Return compute node stats from all child cells."""
method_kwargs = dict()
message = _BroadcastMessage(self, ctxt, 'compute_node_stats',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get(self, ctxt, cell_name, compute_id):
"""Return compute node entry from a specific cell by ID."""
method_kwargs = dict(compute_id=compute_id)
message = _TargetedMessage(self, ctxt, 'compute_node_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def actions_get(self, ctxt, cell_name, instance_uuid):
method_kwargs = dict(instance_uuid=instance_uuid)
message = _TargetedMessage(self, ctxt, 'actions_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
method_kwargs = dict(instance_uuid=instance_uuid,
request_id=request_id)
message = _TargetedMessage(self, ctxt, 'action_get_by_request_id',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def action_events_get(self, ctxt, cell_name, action_id):
method_kwargs = dict(action_id=action_id)
message = _TargetedMessage(self, ctxt, 'action_events_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
message = _BroadcastMessage(self, ctxt, 'consoleauth_delete_tokens',
dict(instance_uuid=instance_uuid),
'up', run_locally=False)
message.process()
def validate_console_port(self, ctxt, cell_name, instance_uuid,
console_port, console_type):
"""Validate console port with child cell compute node."""
method_kwargs = {'instance_uuid': instance_uuid,
'console_port': console_port,
'console_type': console_type}
message = _TargetedMessage(self, ctxt, 'validate_console_port',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
@staticmethod
def get_message_types():
return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys()
class Response(object):
"""Holds a response from a cell. If there was a failure, 'failure'
will be True and 'response' will contain an encoded Exception.
"""
def __init__(self, cell_name, value, failure):
self.failure = failure
self.cell_name = cell_name
self.value = value
def to_json(self):
resp_value = self.value
if self.failure:
resp_value = rpc_common.serialize_remote_exception(resp_value,
log_failure=False)
_dict = {'cell_name': self.cell_name,
'value': resp_value,
'failure': self.failure}
return jsonutils.dumps(_dict)
@classmethod
def from_json(cls, json_message):
_dict = jsonutils.loads(json_message)
if _dict['failure']:
resp_value = rpc_common.deserialize_remote_exception(
CONF, _dict['value'])
_dict['value'] = resp_value
return cls(**_dict)
def value_or_raise(self):
if self.failure:
if isinstance(self.value, (tuple, list)):
raise self.value[0], self.value[1], self.value[2]
else:
raise self.value
return self.value
|
{
"content_hash": "4fe50eff572cc213865026e2248933bf",
"timestamp": "",
"source": "github",
"line_count": 1369,
"max_line_length": 78,
"avg_line_length": 43.096420745069395,
"alnum_prop": 0.5880099662706147,
"repo_name": "DirectXMan12/nova-hacking",
"id": "6f4183f5d140619926975488cec486ce403be65d",
"size": "59636",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature_novnc_krb",
"path": "nova/cells/messaging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "10361785"
},
{
"name": "Shell",
"bytes": "17485"
}
],
"symlink_target": ""
}
|
import binascii
import dns.exception
import dns.rdata
import dns.tokenizer
class NSAP(dns.rdata.Rdata):
"""NSAP record.
@ivar address: a NASP
@type address: string
@see: RFC 1706"""
__slots__ = ['address']
def __init__(self, rdclass, rdtype, address):
super(NSAP, self).__init__(rdclass, rdtype)
self.address = address
def to_text(self, origin=None, relativize=True, **kw):
return "0x%s" % binascii.hexlify(self.address).decode()
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
address = tok.get_string()
tok.get_eol()
if address[0:2] != '0x':
raise dns.exception.SyntaxError('string does not start with 0x')
address = address[2:].replace('.', '')
if len(address) % 2 != 0:
raise dns.exception.SyntaxError('hexstring has odd length')
address = binascii.unhexlify(address.encode())
return cls(rdclass, rdtype, address)
def to_wire(self, file, compress=None, origin=None):
file.write(self.address)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
address = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, address)
|
{
"content_hash": "00f532b4f6cd870e957e214e15e0063b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 30.046511627906977,
"alnum_prop": 0.6246130030959752,
"repo_name": "cloudera/hue",
"id": "05d0745ef83e6c568276389ae497f9554654e492",
"size": "2079",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/dnspython-1.15.0/dns/rdtypes/IN/NSAP.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
r"""Tests for tfrbert_task.py."""
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.configs import encoders
from tensorflow_ranking.extension.premade import tfrbert_task
from tensorflow_ranking.python import data as tfr_data
from tensorflow_ranking.python.keras import model as tfr_model
from tensorflow_serving.apis import input_pb2
FLAGS = flags.FLAGS
def _create_fake_preprocessed_dataset(output_path, seq_length, label_type):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
elwc_num = 32
list_size = 12
for query_id in range(elwc_num):
elwc = input_pb2.ExampleListWithContext()
for doc_id in range(list_size):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(np.ones_like(input_ids))
features['segment_ids'] = create_int_feature(np.ones_like(input_ids))
if label_type == tf.int64:
features['relevance'] = create_int_feature([1])
elif label_type == tf.float32:
features['relevance'] = create_float_feature([0.5])
else:
raise ValueError('Unsupported label_type: %s' % label_type)
features['query_id'] = create_int_feature([query_id])
features['document_id'] = create_int_feature([doc_id])
example = tf.train.Example(features=tf.train.Features(feature=features))
elwc.examples.append(example)
writer.write(elwc.SerializeToString())
writer.close()
class TFRBERTDataTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters((tf.int64,), (tf.float32,))
def test_load_dataset(self, label_type):
input_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
batch_size = 8
list_size = 12
seq_length = 128
_create_fake_preprocessed_dataset(input_path, seq_length, label_type)
label_spec = ('relevance', tf.io.FixedLenFeature(
shape=[1,], dtype=label_type, default_value=-1))
data_config = tfrbert_task.TFRBertDataConfig(
input_path=input_path,
data_format=tfr_data.ELWC,
list_size=list_size,
shuffle_examples=False,
seq_length=seq_length,
global_batch_size=batch_size,
mask_feature_name='example_list_mask',
read_query_id=True,
read_document_id=True)
dataset = tfrbert_task.TFRBertDataLoader(data_config, label_spec).load()
features, labels = next(iter(dataset))
self.assertCountEqual(['input_word_ids', 'input_mask', 'input_type_ids',
'example_list_mask', 'query_id', 'document_id'],
features.keys())
self.assertEqual(features['input_word_ids'].shape,
(batch_size, list_size, seq_length))
self.assertEqual(features['input_mask'].shape,
(batch_size, list_size, seq_length))
self.assertEqual(features['input_type_ids'].shape,
(batch_size, list_size, seq_length))
self.assertEqual(features['example_list_mask'].shape,
(batch_size, list_size))
self.assertEqual(features['query_id'].shape,
(batch_size, list_size, 1))
self.assertEqual(features['document_id'].shape,
(batch_size, list_size, 1))
self.assertEqual(labels.shape, (batch_size, list_size))
class ModelBuilderTest(tf.test.TestCase):
def _create_input_data(self):
dummy_word_ids = tf.random.uniform(
minval=0, maxval=100, shape=(12, 10, 128), dtype=tf.int64)
dummy_mask = tf.ones((12, 10, 128))
dummy_type_ids = tf.zeros((12, 10, 128))
dummy_example_list_mask = tf.ones((12, 10), dtype=tf.bool)
x = dict(
input_word_ids=dummy_word_ids,
input_mask=dummy_mask,
input_type_ids=dummy_type_ids,
example_list_mask=dummy_example_list_mask)
return x
def test_tfr_bert_model_builder(self):
encoder_config = encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1))
encoder_network = encoders.build_encoder(encoder_config)
preprocess_dict = {}
scorer = tfrbert_task.TFRBertScorer(
encoder=encoder_network,
bert_output_dropout=0.1)
example_feature_spec = {
'input_word_ids': tf.io.FixedLenFeature(shape=(None,), dtype=tf.int64),
'input_mask': tf.io.FixedLenFeature(shape=(None,), dtype=tf.int64),
'input_type_ids': tf.io.FixedLenFeature(shape=(None,), dtype=tf.int64)
}
context_feature_spec = {}
model_builder = tfrbert_task.TFRBertModelBuilder(
input_creator=tfr_model.FeatureSpecInputCreator(
context_feature_spec, example_feature_spec),
preprocessor=tfr_model.PreprocessorWithSpec(preprocess_dict),
scorer=scorer,
mask_feature_name='example_list_mask',
name='tfrbert_model')
model = model_builder.build()
output = model(self._create_input_data())
self.assertAllEqual(output.shape.as_list(), [12, 10])
class TFRBertTaskTest(tf.test.TestCase):
def setUp(self):
super(TFRBertTaskTest, self).setUp()
self._logging_dir = self.get_temp_dir()
def _create_bert_ckpt(self):
config = encoders.EncoderConfig(
type='bert', bert=encoders.BertEncoderConfig(num_layers=1))
encoder = encoders.build_encoder(config)
ckpt = tf.train.Checkpoint(encoder=encoder)
ckpt_path = ckpt.save(os.path.join(self._logging_dir, 'ckpt'))
return ckpt_path
def test_task(self):
# Prepare check point and test data
ckpt_path = self._create_bert_ckpt()
input_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
seq_length = 128
_create_fake_preprocessed_dataset(input_path, seq_length, tf.float32)
# Set up data config
train_data_config = tfrbert_task.TFRBertDataConfig(
input_path=input_path,
is_training=True,
global_batch_size=5,
list_size=3,
dataset_fn='tfrecord',
seq_length=128)
validation_data_config = tfrbert_task.TFRBertDataConfig(
input_path=input_path,
is_training=False,
global_batch_size=5,
list_size=3,
dataset_fn='tfrecord',
seq_length=128,
read_query_id=True,
read_document_id=True)
# Set up task config
task_config = tfrbert_task.TFRBertConfig(
output_preds=True,
init_checkpoint=ckpt_path,
aggregated_metrics=True,
train_data=train_data_config,
validation_data=validation_data_config,
model=tfrbert_task.TFRBertModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(num_layers=1))))
# Set up TFRBertTask
label_spec = ('label',
tf.io.FixedLenFeature(
shape=(1,), dtype=tf.int64, default_value=-1))
task = tfrbert_task.TFRBertTask(
task_config,
label_spec=label_spec,
dataset_fn=tf.data.TFRecordDataset,
logging_dir=self._logging_dir)
# Test
model = task.build_model()
metrics = task.build_metrics()
train_dataset = task.build_inputs(task_config.train_data)
vali_dataset = task.build_inputs(task_config.validation_data)
task.initialize(model)
train_iterator = iter(train_dataset)
vali_iterator = iter(vali_dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(train_iterator), model, optimizer, metrics=metrics)
logs = task.validation_step(next(vali_iterator), model, metrics=metrics)
logs = {x: (logs[x],) for x in logs}
logs = task.aggregate_logs(step_outputs=logs)
self.assertEqual(tf.constant(logs['query_id']).shape, (1, 5, 3))
self.assertEqual(tf.constant(logs['document_id']).shape, (1, 5, 3))
self.assertEqual(
tf.constant(logs[tfrbert_task._PREDICTION]).shape, (1, 5, 3))
self.assertEqual(tf.constant(logs[tfrbert_task._LABEL]).shape, (1, 5, 3))
metrics = task.reduce_aggregated_logs(logs)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "740b748abbddc9096d1832fa99dc718d",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 36.56331877729258,
"alnum_prop": 0.654484653051475,
"repo_name": "tensorflow/ranking",
"id": "d810b07d57ff4bb7c271608cdf44754fa44eae43",
"size": "8969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_ranking/extension/premade/tfrbert_task_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "42681"
},
{
"name": "Python",
"bytes": "1396830"
},
{
"name": "Shell",
"bytes": "2133"
},
{
"name": "Starlark",
"bytes": "30704"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from functools import wraps
__all__ = ['memoize', 'singleton', 'memoize_attr_check']
def _make_key(args, kwargs):
return args, frozenset(kwargs.items())
def memoize(func):
"""Save results of function calls to avoid repeated calculation"""
memo = {}
@wraps(func)
def wrapper(*args, **kwargs):
# Note that here we have two separate try...except statements, because
# we want to make sure that we catch only TypeError on the first
# statement, and both TypeError and KeyError on the second.
try:
key = _make_key(args, kwargs)
except TypeError: # unhashable input
return func(*args, **kwargs)
try:
return memo[key]
except KeyError:
result = func(*args, **kwargs)
memo[key] = result
return result
except TypeError: # unhashable input
return func(*args, **kwargs)
wrapper.__memoize_cache = memo
return wrapper
def clear_cache(func):
"""
Clear the cache of a function that has potentially been
decorated by memoize. Safely ignores non-decorated functions
"""
try:
func.__memoize_cache.clear()
except AttributeError:
pass
def memoize_attr_check(attr):
""" Memoize a method call, cached both on arguments and given attribute
of first argument (which is presumably self)
Has the effect of re-calculating results if a specific attribute changes
"""
def decorator(func):
# must return a decorator function
@wraps(func)
def result(*args, **kwargs):
first_arg = getattr(args[0], attr)
return memo(first_arg, *args, **kwargs)
@memoize
def memo(*args, **kwargs):
return func(*args[1:], **kwargs)
return result
return decorator
def singleton(cls):
"""Turn a class into a singleton, such that new objects
in this class share the same instance"""
instances = {}
@wraps(cls)
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
|
{
"content_hash": "4c3858449373e6b3f5ec225433f9676a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 25.64367816091954,
"alnum_prop": 0.6131779471089198,
"repo_name": "stscieisenhamer/glue",
"id": "29d3727e38083b7ced9b53064ac5bfd0c68f4624",
"size": "2231",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "glue/core/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1591083"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import ValidationError
from django.utils.dateparse import parse_date
class DateDisplayMixin:
@property
def active_period_text(self):
text = f"{self.start_date.strftime('%d %b %Y')}"
if self.end_date:
return f"{text} to {self.end_date.strftime('%d %b %Y')}"
else:
return f"{text} onwards"
class DateConstraintMixin:
def check_start_date(self):
if type(self.start_date) == str:
self.start_date = parse_date(self.start_date)
if (
self.start_date
and self.organisation.start_date
and self.start_date < self.organisation.start_date
):
raise ValidationError(
"start_date (%s) must be on or after parent organisation start_date (%s)"
% (
self.start_date.isoformat(),
self.organisation.start_date.isoformat(),
)
)
def check_end_date(self):
if type(self.end_date) == str:
self.end_date = parse_date(self.end_date)
if (
self.end_date
and self.organisation.end_date
and self.end_date > self.organisation.end_date
):
raise ValidationError(
"end_date (%s) must be on or before parent organisation end_date (%s)"
% (self.end_date.isoformat(), self.organisation.end_date.isoformat())
)
|
{
"content_hash": "798ba3c24fc0ac21c514d4e07353262f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 89,
"avg_line_length": 34.41860465116279,
"alnum_prop": 0.5527027027027027,
"repo_name": "DemocracyClub/EveryElection",
"id": "1e48f7ccc63973fb867e9a5c2c474eb1ad01c254",
"size": "1480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "every_election/apps/organisations/models/mixins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "37294"
},
{
"name": "JavaScript",
"bytes": "3930"
},
{
"name": "Python",
"bytes": "548734"
},
{
"name": "SCSS",
"bytes": "3314"
}
],
"symlink_target": ""
}
|
"""Support for Canary camera."""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from homeassistant.util import Throttle
from . import DATA_CANARY, DEFAULT_TIMEOUT
_LOGGER = logging.getLogger(__name__)
CONF_FFMPEG_ARGUMENTS = "ffmpeg_arguments"
DEFAULT_ARGUMENTS = "-pred 1"
MIN_TIME_BETWEEN_SESSION_RENEW = timedelta(seconds=90)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_FFMPEG_ARGUMENTS, default=DEFAULT_ARGUMENTS): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Canary sensors."""
data = hass.data[DATA_CANARY]
devices = []
for location in data.locations:
for device in location.devices:
if device.is_online:
devices.append(
CanaryCamera(
hass,
data,
location,
device,
DEFAULT_TIMEOUT,
config.get(CONF_FFMPEG_ARGUMENTS),
)
)
add_entities(devices, True)
class CanaryCamera(Camera):
"""An implementation of a Canary security camera."""
def __init__(self, hass, data, location, device, timeout, ffmpeg_args):
"""Initialize a Canary security camera."""
super().__init__()
self._ffmpeg = hass.data[DATA_FFMPEG]
self._ffmpeg_arguments = ffmpeg_args
self._data = data
self._location = location
self._device = device
self._timeout = timeout
self._live_stream_session = None
@property
def name(self):
"""Return the name of this device."""
return self._device.name
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._location.is_recording
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return not self._location.is_recording
async def async_camera_image(self):
"""Return a still image response from the camera."""
self.renew_live_stream_session()
from haffmpeg.tools import ImageFrame, IMAGE_JPEG
ffmpeg = ImageFrame(self._ffmpeg.binary, loop=self.hass.loop)
image = await asyncio.shield(
ffmpeg.get_image(
self._live_stream_session.live_stream_url,
output_format=IMAGE_JPEG,
extra_cmd=self._ffmpeg_arguments,
)
)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
if self._live_stream_session is None:
return
from haffmpeg.camera import CameraMjpeg
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(
self._live_stream_session.live_stream_url, extra_cmd=self._ffmpeg_arguments
)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
self._ffmpeg.ffmpeg_stream_content_type,
)
finally:
await stream.close()
@Throttle(MIN_TIME_BETWEEN_SESSION_RENEW)
def renew_live_stream_session(self):
"""Renew live stream session."""
self._live_stream_session = self._data.get_live_stream_session(self._device)
|
{
"content_hash": "126c718116fa87e70b623411ab70dbe3",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 87,
"avg_line_length": 31.25409836065574,
"alnum_prop": 0.6126409651193286,
"repo_name": "Cinntax/home-assistant",
"id": "8a6d27b891663d9270048496ba9dea481f0a4378",
"size": "3813",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/canary/camera.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
}
|
from include import IncludeManager
from django.apps import apps
from django.db import models
from django.utils import timezone
from osf.models.base import BaseModel, ObjectIDMixin
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import NonNaiveDateTimeField
from website.util import api_v2_url
class NodeLog(ObjectIDMixin, BaseModel):
FIELD_ALIASES = {
# TODO: Find a better way
'node': 'node__guids___id',
'user': 'user__guids___id',
'original_node': 'original_node__guids___id'
}
objects = IncludeManager()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
NODE_ACCESS_REQUESTS_ENABLED = 'node_access_requests_enabled'
NODE_ACCESS_REQUESTS_DISABLED = 'node_access_requests_disabled'
POINTER_CREATED = NODE_LINK_CREATED = 'pointer_created'
POINTER_FORKED = NODE_LINK_FORKED = 'pointer_forked'
POINTER_REMOVED = NODE_LINK_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
MADE_WIKI_PUBLIC = 'made_wiki_public'
MADE_WIKI_PRIVATE = 'made_wiki_private'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
CHECKED_IN = 'checked_in'
CHECKED_OUT = 'checked_out'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
FILE_TAG_ADDED = 'file_tag_added'
FILE_TAG_REMOVED = 'file_tag_removed'
FILE_METADATA_UPDATED = 'file_metadata_updated'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
CHANGED_LICENSE = 'license_changed'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FILE_RENAMED = 'addon_file_renamed'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
COMMENT_RESTORED = 'comment_restored'
CUSTOM_CITATION_ADDED = 'custom_citation_added'
CUSTOM_CITATION_EDITED = 'custom_citation_edited'
CUSTOM_CITATION_REMOVED = 'custom_citation_removed'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
EMBARGO_TERMINATED = 'embargo_terminated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
PREREG_REGISTRATION_INITIATED = 'prereg_registration_initiated'
AFFILIATED_INSTITUTION_ADDED = 'affiliated_institution_added'
AFFILIATED_INSTITUTION_REMOVED = 'affiliated_institution_removed'
PREPRINT_INITIATED = 'preprint_initiated'
PREPRINT_FILE_UPDATED = 'preprint_file_updated'
PREPRINT_LICENSE_UPDATED = 'preprint_license_updated'
SUBJECTS_UPDATED = 'subjects_updated'
VIEW_ONLY_LINK_ADDED = 'view_only_link_added'
VIEW_ONLY_LINK_REMOVED = 'view_only_link_removed'
actions = ([CHECKED_IN, CHECKED_OUT, FILE_TAG_REMOVED, FILE_TAG_ADDED, CREATED_FROM, PROJECT_CREATED,
PROJECT_REGISTERED, PROJECT_DELETED, NODE_CREATED, NODE_FORKED, NODE_REMOVED,
NODE_ACCESS_REQUESTS_ENABLED, NODE_ACCESS_REQUESTS_DISABLED,
NODE_LINK_CREATED, NODE_LINK_FORKED, NODE_LINK_REMOVED, WIKI_UPDATED,
WIKI_DELETED, WIKI_RENAMED, MADE_WIKI_PUBLIC,
MADE_WIKI_PRIVATE, CONTRIB_ADDED, CONTRIB_REMOVED, CONTRIB_REORDERED,
PERMISSIONS_UPDATED, MADE_PRIVATE, MADE_PUBLIC, TAG_ADDED, TAG_REMOVED, EDITED_TITLE,
EDITED_DESCRIPTION, UPDATED_FIELDS, FILE_MOVED, FILE_COPIED, FILE_METADATA_UPDATED,
FOLDER_CREATED, FILE_ADDED, FILE_UPDATED, FILE_REMOVED, FILE_RESTORED, ADDON_ADDED,
ADDON_REMOVED, COMMENT_ADDED, COMMENT_REMOVED, COMMENT_UPDATED, COMMENT_RESTORED,
MADE_CONTRIBUTOR_VISIBLE,
MADE_CONTRIBUTOR_INVISIBLE, EXTERNAL_IDS_ADDED, EMBARGO_APPROVED, EMBARGO_TERMINATED,
EMBARGO_CANCELLED, EMBARGO_COMPLETED, EMBARGO_INITIATED, RETRACTION_APPROVED,
RETRACTION_CANCELLED, RETRACTION_INITIATED, REGISTRATION_APPROVAL_CANCELLED,
REGISTRATION_APPROVAL_INITIATED, REGISTRATION_APPROVAL_APPROVED,
PREREG_REGISTRATION_INITIATED,
AFFILIATED_INSTITUTION_ADDED, AFFILIATED_INSTITUTION_REMOVED, PREPRINT_INITIATED,
PREPRINT_FILE_UPDATED, PREPRINT_LICENSE_UPDATED, VIEW_ONLY_LINK_ADDED, VIEW_ONLY_LINK_REMOVED] + list(sum([
config.actions for config in apps.get_app_configs() if config.name.startswith('addons.')
], tuple())))
action_choices = [(action, action.upper()) for action in actions]
date = NonNaiveDateTimeField(db_index=True, null=True, blank=True, default=timezone.now)
# TODO build action choices on the fly with the addon stuff
action = models.CharField(max_length=255, db_index=True) # , choices=action_choices)
params = DateTimeAwareJSONField(default=dict)
should_hide = models.BooleanField(default=False)
user = models.ForeignKey('OSFUser', related_name='logs', db_index=True,
null=True, blank=True, on_delete=models.CASCADE)
foreign_user = models.CharField(max_length=255, null=True, blank=True)
node = models.ForeignKey('AbstractNode', related_name='logs',
db_index=True, null=True, blank=True, on_delete=models.CASCADE)
original_node = models.ForeignKey('AbstractNode', db_index=True,
null=True, blank=True, on_delete=models.CASCADE)
def __unicode__(self):
return ('({self.action!r}, user={self.user!r},, node={self.node!r}, params={self.params!r}) '
'with id {self.id!r}').format(self=self)
class Meta:
ordering = ['-date']
get_latest_by = 'date'
@property
def absolute_api_v2_url(self):
path = '/logs/{}/'.format(self._id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def absolute_url(self):
return self.absolute_api_v2_url
def _natural_key(self):
return self._id
|
{
"content_hash": "74e1246e7f0cc0453ebb702143f2c972",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 123,
"avg_line_length": 40.185792349726775,
"alnum_prop": 0.6782703290726135,
"repo_name": "pattisdr/osf.io",
"id": "4f1b3d0e6432684e71a6da3f9cf8d88bb62b9b2e",
"size": "7354",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osf/models/nodelog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "306671"
},
{
"name": "JavaScript",
"bytes": "1790426"
},
{
"name": "Mako",
"bytes": "647535"
},
{
"name": "Python",
"bytes": "9601810"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
import unittest
import IECore
import Gaffer
import GafferTest
class SpeedTest( unittest.TestCase ) :
# original (r335):
#
# 15.110s
# 15.088s
# 15.102s
#
# interned string :
#
# 17.288s
# 17.216s
# 17.213s
#
# no setName in addChildInternal (still interned string) :
#
# 0.104s
# 0.099s
# 0.099s
#
# no setName in addChildInternal (std::string) :
#
# 0.103s
# 0.098s
# 0.098s
#
# replace string comparisons with InternedString comparisons (r336):
#
# 5.161s
# 5.140s
# 5.138s
#
def testMakeNamesUnique( self ) :
s = Gaffer.ScriptNode()
for i in range( 0, 1000 ) :
n = GafferTest.AddNode()
s.addChild( n )
#
# this test checks it doesn't take a ludicrous amount of time
# to retrieve children from their parents by name. even though
# we're currently doing a linear search to achieve this it doesn't
# seem to be a particularly pressing issue, perhaps because comparison
# against many InternedStrings is much cheaper than comparison
# against many std::strings. if necessary we can improve things by
# storing a map from name to children in GraphComponent.
#
# r338 (linear search with string comparisons)
#
# 0.214s
# 0.183s
# 0.172s
#
# r339 (linear search with InternedString comparisons)
#
# 0.146s
# 0.136s
# 0.140s
def testGetChild( self ) :
s = Gaffer.ScriptNode()
for i in range( 0, 1000 ) :
# explicitly setting the name to something unique
# avoids the overhead incurred by the example
# in testMakeNamesUnique
n = GafferTest.AddNode( "AddNode" + str( i ) )
s.addChild( n )
for i in range( 0, 1000 ) :
n = "AddNode" + str( i )
c = s[n]
self.assertEqual( c.getName(), n )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "aabea18cf28f9155601ede13e6e0ef74",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 71,
"avg_line_length": 20.302325581395348,
"alnum_prop": 0.6609392898052692,
"repo_name": "paulondc/gaffer",
"id": "9135f65c0b502c81859a904519fe0cf9a2100df7",
"size": "3551",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/GafferTest/SpeedTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "15447"
},
{
"name": "C++",
"bytes": "2630344"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Objective-C",
"bytes": "107529"
},
{
"name": "Python",
"bytes": "2745422"
},
{
"name": "Shell",
"bytes": "6943"
},
{
"name": "Slash",
"bytes": "32856"
}
],
"symlink_target": ""
}
|
import os
from os.path import join
import sys
import json
import csv
import subprocess
import shutil
import itertools
from django.views.generic import ListView, DetailView
from django.views.generic.base import ContextMixin
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.contrib.messages.views import SuccessMessageMixin
from django.apps import apps
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from base.models import Project
from apps.crawl_space.models import Crawl, CrawlModel
from apps.crawl_space.forms import AddCrawlForm, AddCrawlModelForm, CrawlSettingsForm
from apps.crawl_space.utils import touch
from apps.crawl_space.viz.plot import AcheDashboard
from apps.crawl_space.settings import CRAWL_PATH, IMAGES_PATH
class ProjectObjectMixin(ContextMixin):
def get_project(self):
return Project.objects.get(slug=self.kwargs['project_slug'])
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProjectObjectMixin, self).get_context_data(**kwargs)
context['project'] = self.get_project()
return context
class AddCrawlView(SuccessMessageMixin, ProjectObjectMixin, CreateView):
form_class = AddCrawlForm
template_name = "crawl_space/add_crawl.html"
success_message = "Crawl %(name)s was saved successfully."
def get_success_url(self):
return self.object.get_absolute_url()
def form_valid(self, form):
form.instance.project = self.get_project()
return super(AddCrawlView, self).form_valid(form)
class ListCrawlsView(ProjectObjectMixin, ListView):
model = Crawl
template_name = "crawl_space/crawls.html"
class CrawlView(ProjectObjectMixin, DetailView):
model = Crawl
template_name = "crawl_space/crawl.html"
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CrawlView, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
crawl_model = self.get_object()
# Start
if request.POST['action'] == "start":
crawl_model.status = "starting"
crawl_model.save()
project_slug = self.kwargs['project_slug']
crawl_slug = self.kwargs['crawl_slug']
call = ["python",
"apps/crawl_space/crawl_supervisor.py",
"--project", project_slug,
"--crawl", crawl_slug]
subprocess.Popen(call)
return HttpResponse(json.dumps(dict(
status="starting")),
content_type="application/json")
# Stop
elif request.POST['action'] == "stop":
crawl_model.status = 'stopping'
crawl_model.save()
crawl_path = crawl_model.get_crawl_path()
# TODO use crawl_model.status as a stop flag
touch(join(crawl_path, 'stop'))
return HttpResponse(json.dumps(dict(
status="stopping")),
content_type="application/json")
# Dump Images
elif request.POST['action'] == "dump":
self.dump_images()
return HttpResponse("Success")
# Update status, statistics
elif request.POST['action'] == "status":
return HttpResponse(json.dumps(dict(
status=crawl_model.status,
harvest_rate=crawl_model.harvest_rate,
pages_crawled=crawl_model.pages_crawled,
)),
content_type="application/json")
# TESTING reflect POST request
return HttpResponse(json.dumps(dict(
args=args,
kwargs=kwargs,
post=request.POST)),
content_type="application/json")
def dump_images(self):
self.img_dir = os.path.join(IMAGES_PATH, self.get_object().slug)
if os.path.exists(self.img_dir):
shutil.rmtree(self.img_dir)
else:
os.makedirs(self.img_dir)
img_dump_proc = subprocess.Popen(["nutch", "dump", "-outputDir", self.img_dir, "-segment",
os.path.join(self.get_object().get_crawl_path(), 'segments'),"-mimetype",
"image/jpeg", "image/png"]).wait()
return "Dumping images"
def get(self, request, *args, **kwargs):
# Get Relevant Seeds File
if not request.GET:
# no url parameters, return regular response
return super(CrawlView, self).get(request, *args, **kwargs)
elif 'resource' in request.GET and request.GET['resource'] == "seeds":
seeds = self.get_ache_dashboard().get_relevant_seeds()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=relevant_seeds.txt'
response.write('\n'.join(seeds))
return response
elif 'resource' in request.GET and request.GET['resource'] == "initial_seeds":
seeds = self.get_seeds_list()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=seeds.txt'
response.write(''.join(seeds))
return response
elif 'resource' in request.GET and request.GET['resource'] == "crawl_log":
crawl_log = self.get_crawl_log()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=crawl_log.txt'
response.write(crawl_log)
return response
def get_crawl_log(self):
log_path = os.path.join(self.get_object().get_crawl_path(), "crawl_proc.log")
with open(log_path) as f:
crawl_log = f.readlines()
return ''.join(crawl_log)
def get_seeds_path(self):
if self.get_object().crawler == "nutch":
seeds_path = os.path.join(self.get_object().seeds_list.path, "seeds")
elif self.get_object().crawler == "ache":
seeds_path = self.get_object().seeds_list.path
else:
seeds_path = ""
return seeds_path
def get_seeds_list(self, lines=None):
with open(self.get_seeds_path()) as f:
if lines:
seeds_list = list(itertools.islice(f, lines))
else:
seeds_list = f.readlines()
return seeds_list
def get_object(self):
return Crawl.objects.get(
project=self.get_project(),
slug=self.kwargs['crawl_slug'])
def get_ache_dashboard(self):
return AcheDashboard(self.get_object())
def get_context_data(self, **kwargs):
context = super(CrawlView, self).get_context_data(**kwargs)
context['project'] = self.get_project()
context['seeds'] = self.get_seeds_list(10)
if self.get_object().crawler == "ache":
plots = AcheDashboard(self.get_object()).get_plots()
context['scripts'] = plots['scripts']
context['divs'] = plots['divs']
return context
class CrawlSettingsView(SuccessMessageMixin, ProjectObjectMixin, UpdateView):
model = Crawl
form_class = CrawlSettingsForm
success_message = "Crawl %(name)s was edited successfully."
template_name_suffix = '_update_form'
def get_success_url(self):
return self.object.get_absolute_url()
def get_object(self):
return Crawl.objects.get(
project=self.get_project(),
slug=self.kwargs['crawl_slug'])
class AddCrawlModelView(SuccessMessageMixin, ProjectObjectMixin, CreateView):
form_class = AddCrawlModelForm
template_name = "crawl_space/add_crawl_model.html"
success_message = "Crawl model %(name)s was added successfully."
def form_valid(self, form):
form.instance.project = self.get_project()
return super(AddCrawlModelView, self).form_valid(form)
def get_success_url(self):
return self.object.get_absolute_url()
class DeleteCrawlView(SuccessMessageMixin, ProjectObjectMixin, DeleteView):
model = Crawl
success_message = "Crawl %(name)s was deleted successfully."
def delete(self, request, *args, **kwargs):
""" Remove crawl folder """
# shutil.rmtree(os.path.join(CRAWL_PATH, str(self.get_object().pk)))
return super(DeleteCrawlView, self).delete(request, *args, **kwargs)
def get_success_url(self):
return self.get_project().get_absolute_url()
def get_object(self):
return Crawl.objects.get(project=self.get_project(),
slug=self.kwargs['crawl_slug'])
class DeleteCrawlModelView(SuccessMessageMixin, ProjectObjectMixin, DeleteView):
model = CrawlModel
success_message = "Crawl model %(name)s was deleted successfully."
def get_success_url(self):
return self.get_project().get_absolute_url()
def get_object(self):
return CrawlModel.objects.get(
project=self.get_project(),
slug=self.kwargs['model_slug'])
|
{
"content_hash": "92e9c98eadd9d30b8a3b0687fcef62e2",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 114,
"avg_line_length": 34.87218045112782,
"alnum_prop": 0.6191246226821906,
"repo_name": "0x0mar/memex-explorer",
"id": "2bd725565c6ccb9aaf5b87e283d7193db5ac9d1b",
"size": "9276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/apps/crawl_space/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "40546"
},
{
"name": "HTML",
"bytes": "29000"
},
{
"name": "JavaScript",
"bytes": "75604"
},
{
"name": "Nginx",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "255801"
},
{
"name": "Python",
"bytes": "194008"
},
{
"name": "Ruby",
"bytes": "973"
},
{
"name": "Shell",
"bytes": "1552"
}
],
"symlink_target": ""
}
|
"""
Support for Insteon dimmers via local hub control.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light.insteon_local/
"""
import json
import logging
import os
from datetime import timedelta
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from homeassistant.loader import get_component
import homeassistant.util as util
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['insteon_local']
DOMAIN = 'light'
INSTEON_LOCAL_LIGHTS_CONF = 'insteon_local_lights.conf'
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
SUPPORT_INSTEON_LOCAL = SUPPORT_BRIGHTNESS
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Insteon local light platform."""
insteonhub = hass.data['insteon_local']
conf_lights = config_from_file(hass.config.path(INSTEON_LOCAL_LIGHTS_CONF))
if len(conf_lights):
for device_id in conf_lights:
setup_light(device_id, conf_lights[device_id], insteonhub, hass,
add_devices)
else:
linked = insteonhub.get_linked()
for device_id in linked:
if (linked[device_id]['cat_type'] == 'dimmer' and
device_id not in conf_lights):
request_configuration(device_id,
insteonhub,
linked[device_id]['model_name'] + ' ' +
linked[device_id]['sku'],
hass, add_devices)
def request_configuration(device_id, insteonhub, model, hass,
add_devices_callback):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if device_id in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[device_id], 'Failed to register, please try again.')
return
def insteon_light_config_callback(data):
"""The actions to do when our configuration callback is called."""
setup_light(device_id, data.get('name'), insteonhub, hass,
add_devices_callback)
_CONFIGURING[device_id] = configurator.request_config(
hass, 'Insteon ' + model + ' addr: ' + device_id,
insteon_light_config_callback,
description=('Enter a name for ' + model + ' addr: ' + device_id),
entity_picture='/static/images/config_insteon.png',
submit_caption='Confirm',
fields=[{'id': 'name', 'name': 'Name', 'type': ''}]
)
def setup_light(device_id, name, insteonhub, hass, add_devices_callback):
"""Set up the light."""
if device_id in _CONFIGURING:
request_id = _CONFIGURING.pop(device_id)
configurator = get_component('configurator')
configurator.request_done(request_id)
_LOGGER.info("Device configuration done!")
conf_lights = config_from_file(hass.config.path(INSTEON_LOCAL_LIGHTS_CONF))
if device_id not in conf_lights:
conf_lights[device_id] = name
if not config_from_file(
hass.config.path(INSTEON_LOCAL_LIGHTS_CONF),
conf_lights):
_LOGGER.error("Failed to save configuration file")
device = insteonhub.dimmer(device_id)
add_devices_callback([InsteonLocalDimmerDevice(device, name)])
def config_from_file(filename, config=None):
"""Small configuration file management function."""
if config:
# We're writing configuration
try:
with open(filename, 'w') as fdesc:
fdesc.write(json.dumps(config))
except IOError as error:
_LOGGER.error('Saving config file failed: %s', error)
return False
return True
else:
# We're reading config
if os.path.isfile(filename):
try:
with open(filename, 'r') as fdesc:
return json.loads(fdesc.read())
except IOError as error:
_LOGGER.error("Reading configuration file failed: %s", error)
# This won't work yet
return False
else:
return {}
class InsteonLocalDimmerDevice(Light):
"""An abstract Class for an Insteon node."""
def __init__(self, node, name):
"""Initialize the device."""
self.node = node
self.node.deviceName = name
self._value = 0
@property
def name(self):
"""Return the the name of the node."""
return self.node.deviceName
@property
def unique_id(self):
"""Return the ID of this Insteon node."""
return 'insteon_local_{}'.format(self.node.device_id)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._value
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Update state of the light."""
resp = self.node.status(0)
if 'cmd2' in resp:
self._value = int(resp['cmd2'], 16)
@property
def is_on(self):
"""Return the boolean response if the node is on."""
return self._value != 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_INSTEON_LOCAL
def turn_on(self, **kwargs):
"""Turn device on."""
brightness = 100
if ATTR_BRIGHTNESS in kwargs:
brightness = int(kwargs[ATTR_BRIGHTNESS]) / 255 * 100
self.node.on(brightness)
def turn_off(self, **kwargs):
"""Turn device off."""
self.node.off()
|
{
"content_hash": "f64da0b11b22e5132c5172511a0e209d",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 79,
"avg_line_length": 32.69101123595506,
"alnum_prop": 0.606461591338718,
"repo_name": "kyvinh/home-assistant",
"id": "c51c7d9d8390409a8deb3a00923e5cf87bb438c9",
"size": "5819",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/light/insteon_local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1548645"
},
{
"name": "Python",
"bytes": "5298607"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14220"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django import http
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
import crowdsource
class CommentPostBadRequest(http.HttpResponseBadRequest):
"""
Response returned when a comment post is invalid. If ``DEBUG`` is on a
nice-ish error message will be displayed (for debugging purposes), but in
production mode a simple opaque 400 page will be displayed.
"""
def __init__(self):
super(CommentPostBadRequest, self).__init__()
def post_csentry(request, next = None):
data = request.POST.copy()
next = data.get("next", next)
ctype = data.get("content_type")
object_id = data.get("object_id")
if ctype is None or object_id is None:
return Http404
try:
model = models.get_model(*ctype.split(".", 1))
target = model._default_manager.get(pk = object_id)
except TypeError:
return CommentPostBadRequest()
except AttributeError:
return CommentPostBadRequest()
except ObjectDoesNotExist:
return CommentPostBadRequest()
csentry_form = crowdsource.get_form()(target, data = data)
redir_to_next = lambda next, target: HttpResponseRedirect(next) if next else HttpResponseRedirect(target.get_absolute_url())
if csentry_form.is_valid():
csentry_form.save()
return redir_to_next(next, target)
else:
#TODO: Something needs to go here, but can't figure out what
return redir_to_next(next, target)
post_csentry = require_POST(post_csentry)
|
{
"content_hash": "4d54f4b26a2853bfc0c1c4f464e8ca95",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 128,
"avg_line_length": 35.02,
"alnum_prop": 0.7064534551684751,
"repo_name": "rmanocha/django-crowdsource",
"id": "49aee44071f9fab154e63049e4580f04c01c233e",
"size": "1751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crowdsource/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17612"
}
],
"symlink_target": ""
}
|
"""Unit tests for ReviewRequestPageEntryRegistry."""
from __future__ import unicode_literals
from djblets.registries.errors import AlreadyRegisteredError
from reviewboard.reviews.detail import (BaseReviewRequestPageEntry,
ReviewRequestPageEntryRegistry)
from reviewboard.testing import TestCase
class DummyEntry(BaseReviewRequestPageEntry):
entry_type_id = 'dummy'
class ReviewRequestPageEntryRegistryTests(TestCase):
"""Unit tests for ReviewRequestPageEntryRegistry."""
def setUp(self):
super(ReviewRequestPageEntryRegistryTests, self).setUp()
self.registry = ReviewRequestPageEntryRegistry()
def test_register(self):
"""Testing ReviewRequestPageEntryRegistry.register"""
self.registry.register(DummyEntry)
self.assertIn(DummyEntry, self.registry)
def test_register_with_entry_already_registered(self):
"""Testing ReviewRequestPageEntryRegistry.register with already
registered entry
"""
self.registry.register(DummyEntry)
message = 'This review request page entry is already registered.'
with self.assertRaisesMessage(AlreadyRegisteredError, message):
self.registry.register(DummyEntry)
def test_register_with_id_already_registered(self):
"""Testing ReviewRequestPageEntryRegistry.register with already
registered entry_type_id
"""
class DummyEntry2(DummyEntry):
pass
self.registry.register(DummyEntry)
message = (
'A review request page entry with the entry_type_id "dummy" is '
'already registered by another entry (<class \'reviewboard.'
'reviews.tests.test_review_request_page_entry_registry.'
'DummyEntry\'>).'
)
with self.assertRaisesMessage(AlreadyRegisteredError, message):
self.registry.register(DummyEntry2)
def test_unregister(self):
"""Testing ReviewRequestPageEntryRegistry.unregister"""
self.registry.register(DummyEntry)
self.registry.unregister(DummyEntry)
self.assertNotIn(DummyEntry, self.registry)
def test_get_entry(self):
"""Testing ReviewRequestPageEntryRegistry.get_entry"""
self.registry.register(DummyEntry)
self.assertEqual(self.registry.get_entry('dummy'), DummyEntry)
def test_get_entry_with_invalid_id(self):
"""Testing ReviewRequestPageEntryRegistry.get_entry with invalid entry
ID
"""
self.assertIsNone(self.registry.get_entry('dummy'))
|
{
"content_hash": "e4db1358110b73e7577f224a40645873",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 34.54666666666667,
"alnum_prop": 0.6896950984175994,
"repo_name": "chipx86/reviewboard",
"id": "0af22861e022c1fb37d38b9f0307ffb3c8b67772",
"size": "2591",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reviewboard/reviews/tests/test_review_request_page_entry_registry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "434719"
},
{
"name": "HTML",
"bytes": "224310"
},
{
"name": "JavaScript",
"bytes": "3830753"
},
{
"name": "Python",
"bytes": "7333453"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
}
|
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._component_containers_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ComponentContainersOperations:
"""ComponentContainersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
workspace_name: str,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, "_models.ListViewType"]] = None,
**kwargs: Any
) -> AsyncIterable["_models.ComponentContainerResourceArmPaginatedResult"]:
"""List component containers.
List component containers.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param skip: Continuation token for pagination.
:type skip: str
:param list_view_type: View type for including/excluding (for example) archived entities.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentContainerResourceArmPaginatedResult or
the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ComponentContainerResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainerResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
skip=skip,
list_view_type=list_view_type,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
skip=skip,
list_view_type=list_view_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentContainerResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
workspace_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete container.
Delete container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
name: str,
**kwargs: Any
) -> "_models.ComponentContainerData":
"""Get container.
Get container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainerData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainerData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainerData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ComponentContainerData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
name: str,
body: "_models.ComponentContainerData",
**kwargs: Any
) -> "_models.ComponentContainerData":
"""Create or update container.
Create or update container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param body: Container entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.ComponentContainerData
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainerData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainerData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentContainerData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'ComponentContainerData')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ComponentContainerData', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ComponentContainerData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}'} # type: ignore
|
{
"content_hash": "c6837965d53df807090e73795b9af77e",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 214,
"avg_line_length": 44.170967741935485,
"alnum_prop": 0.6603373986708537,
"repo_name": "Azure/azure-sdk-for-python",
"id": "99514f21c10d847650c0dfcb1c8153867455b682",
"size": "14160",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_02_01_preview/aio/operations/_component_containers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
class cPDFiDNameObfuscation(cPluginParent):
# onlyValidPDF = True
name = 'Name Obfuscation plugin'
def __init__(self, oPDFiD):
self.oPDFiD = oPDFiD
def Score(self):
if sum([oCount.hexcode for oCount in self.oPDFiD.keywords.values()]) > 0:
return 1.0
else:
return 0.0
AddPlugin(cPDFiDNameObfuscation)
|
{
"content_hash": "6b35ed8f96376c1b64dda3514aa0b972",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.6256830601092896,
"repo_name": "Rafiot/KittenGroomer",
"id": "e116da2e571607206734476c00dfe93b5cc3c84b",
"size": "414",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "circlean_fs/root_partition/usr/local/bin/plugin_nameobfuscation.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2299"
},
{
"name": "Makefile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "38990"
},
{
"name": "Shell",
"bytes": "23455"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class DjangoSmokeTestsConfig(AppConfig):
name = 'django_smoke_tests'
|
{
"content_hash": "945a17a464982881e9af4dc6d4820862",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 40,
"avg_line_length": 21.8,
"alnum_prop": 0.7798165137614679,
"repo_name": "kamilkijak/django-smoke-tests",
"id": "e22c59a73e5e3a3da86c1674e8c17435869d366d",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_smoke_tests/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1602"
},
{
"name": "Python",
"bytes": "62111"
}
],
"symlink_target": ""
}
|
import socket
import threading
import json
class EnablerConnection():
def __init__(self):
self.connections = []
self.stopped = False
self.enabler_listening_port = 40001
self.local_ip = socket.gethostbyname(socket.gethostname())
t = threading.Thread(target=self.listen_loop, name='0.0.0.0',
args=('0.0.0.0',))
t.setDaemon(True)
t.start()
def send(self, outString):
for socket_handler in self.connections:
try:
socket_handler.send(outString)
except Exception as e:
# TODO: Isolate dropped connection, recover from other things.
# For now, no recovery. If ANYTHING goes wrong, drop the
# connection.
print("Exception while sending data: %s" % e)
self.connections.remove(socket_handler)
print("Connection dropped.")
def listen_loop(self, this_ip):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((this_ip, self.enabler_listening_port))
s.listen(1)
print("Listening for OpenXC Enabler on " + this_ip + ":" +
str(self.enabler_listening_port))
while True:
conn, addr = s.accept()
print("New connection to " + this_ip + " from " + str(addr))
handler = SocketHandler(conn, addr)
handler.start()
self.connections.append(handler)
def send_measurement(self, name, value, event=None):
data = {'name':name,'value':value}
if event is not None and event != '':
data['event'] = event
self.send(json.dumps(data) + '\x00')
def received_messages(self):
all_received_data = ''.join(handler.received_command_data for handler in
self.connections)
return all_received_data.split('\0')
class SocketHandler(threading.Thread):
def __init__(self, connection, address):
super(SocketHandler, self).__init__()
self.daemon = True
self.connection = connection
self.address = address
self.received_command_data = ""
def send(self, data):
self.connection.sendall(data)
def run(self):
while True:
data = self.connection.recv(1024)
if not data:
break
else:
self.received_command_data += data
|
{
"content_hash": "fc3007f34a1bb952c017abf81aebb8cb",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 34.72222222222222,
"alnum_prop": 0.5704,
"repo_name": "worldline-spain/openxc-vehicle-simulator",
"id": "97b08cef24f2a40f01a225af4b3a8f416867b6ac",
"size": "2500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enabler_connection3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "7556"
},
{
"name": "JavaScript",
"bytes": "7494"
},
{
"name": "Python",
"bytes": "74343"
}
],
"symlink_target": ""
}
|
"""Defines IntegrityChecker."""
import re
import concurrent.futures
from typing import ( # noqa: F401
Any,
cast,
List,
Optional,
Set,
)
from .function_description import ( # noqa: F401
FunctionDescription,
)
from .docstring.base import BaseDocstring
from .docstring.docstring import Docstring
from .docstring.sections import Sections
from .docstring.style import DocstringStyle
from .errors import ( # noqa: F401
DarglintError,
ExcessParameterError,
ExcessRaiseError,
ExcessReturnError,
ExcessVariableError,
ExcessYieldError,
MissingParameterError,
MissingRaiseError,
MissingReturnError,
MissingYieldError,
ParameterTypeMismatchError,
ParameterTypeMissingError,
ReturnTypeMismatchError,
)
from .error_report import (
ErrorReport,
)
from .config import get_config
from .strictness import Strictness
SYNTAX_NOQA = re.compile(r'#\s*noqa:\sS001')
EXPLICIT_GLOBAL_NOQA = re.compile(r'#\s*noqa:\s*\*')
BARE_NOQA = re.compile(r'#\s*noqa([^:]|$)')
class IntegrityChecker(object):
"""Checks the integrity of the docstring compared to the definition."""
def __init__(self, raise_errors=False):
# type: (bool) -> None
"""Create a new checker for the given function and docstring.
Args:
raise_errors: If true, we will allow ParserExceptions to
propagate, crashing darglint. This is mostly useful
for development.
"""
self.errors = list() # type: List[DarglintError]
self._sorted = True
self.config = get_config()
self.raise_errors = raise_errors
# TODO: Move max workers into a configuration option.
# A thread pool for handling checks. Tasks are added to the
# pool when `schedule` is executed, if it has a docstring.
# The pool is collected when `get_error_report_string` is called.
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=4)
def schedule(self, function):
# type: (FunctionDescription) -> None
if self._skip_checks(function):
return
self.executor.submit(self.run_checks, function)
def run_checks(self, function):
# type: (FunctionDescription) -> None
"""Run checks on the given function.
Args:
function: A function whose docstring we are verifying.
Raises:
Exception: If the docstring format isn't supported.
"""
if self._skip_checks(function):
return
function_docstring = cast(str, function.docstring)
if self.config.style == DocstringStyle.GOOGLE:
docstring = Docstring.from_google(
function_docstring,
)
elif self.config.style == DocstringStyle.SPHINX:
docstring = Docstring.from_sphinx(
function_docstring,
)
self._check_variables(docstring, function)
elif self.config.style == DocstringStyle.NUMPY:
docstring = Docstring.from_numpy(
function_docstring,
)
else:
raise Exception('Unsupported docstring format.')
if self.config.strictness != Strictness.FULL_DESCRIPTION:
if docstring.satisfies_strictness(self.config.strictness):
return
if docstring.ignore_all:
return
self._check_parameters(docstring, function)
self._check_parameter_types(docstring, function)
self._check_parameter_types_missing(docstring, function)
self._check_return(docstring, function)
self._check_return_type(docstring, function)
self._check_yield(docstring, function)
self._check_raises(docstring, function)
self._check_style(docstring, function)
self._sorted = False
def _skip_checks(self, function):
# type: (FunctionDescription) -> bool
no_docsting = function.docstring is None
skip_by_regex = (
self.config.ignore_regex and
re.match(self.config.ignore_regex, function.name)
)
skip_property = (
self.config.ignore_properties and function.is_property
)
return bool(no_docsting or skip_by_regex or skip_property)
def _check_parameter_types(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
error_code = ParameterTypeMismatchError.error_code
if self._ignore_error(docstring, ParameterTypeMismatchError):
return
argument_types = dict(
zip(docstring.get_items(Sections.ARGUMENTS_SECTION) or [],
docstring.get_types(Sections.ARGUMENTS_SECTION) or [])
)
doc_arg_types = list() # type: List[Optional[str]]
for name in function.argument_names:
if name not in argument_types:
doc_arg_types.append(None)
else:
doc_arg_types.append(argument_types[name])
noqa_lookup = docstring.get_noqas()
for name, expected, actual in zip(
function.argument_names,
function.argument_types,
doc_arg_types,
):
if expected is None or actual is None:
continue
noqa_exists = error_code in noqa_lookup
name_has_noqa = noqa_exists and name in noqa_lookup[error_code]
if not (expected == actual or name_has_noqa):
default_line_numbers = docstring.get_line_numbers(
'arguments-section'
)
line_numbers = docstring.get_line_numbers_for_value(
'ident',
name,
) or default_line_numbers
self.errors.append(
ParameterTypeMismatchError(
function.function,
name=name,
expected=expected,
actual=actual,
line_numbers=line_numbers,
)
)
def _check_parameter_types_missing(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
error_code = ParameterTypeMissingError.error_code
if self._ignore_error(docstring, ParameterTypeMissingError):
return
argument_types = dict(
zip(docstring.get_items(Sections.ARGUMENTS_SECTION) or [],
docstring.get_types(Sections.ARGUMENTS_SECTION) or [])
)
noqa_lookup = docstring.get_noqas()
noqa_exists = error_code in noqa_lookup
for name, argument_type in argument_types.items():
name_has_no_qa = noqa_exists and name in noqa_lookup[error_code]
if argument_type is None and not name_has_no_qa:
default_line_numbers = docstring.get_line_numbers(
'arguments-section'
)
line_numbers = docstring.get_line_numbers_for_value(
'ident',
name,
) or default_line_numbers
self.errors.append(
ParameterTypeMissingError(
function.function,
name=name,
line_numbers=line_numbers,
)
)
def _check_return_type(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
if function.is_abstract:
return
if self._ignore_error(docstring, ReturnTypeMismatchError):
return
fun_type = function.return_type
doc_type = docstring.get_types(Sections.RETURNS_SECTION)
if not doc_type or isinstance(doc_type, list):
doc_type = None
if fun_type is not None and doc_type is not None:
if fun_type != doc_type:
line_numbers = docstring.get_line_numbers(
'returns-section',
)
self.errors.append(
ReturnTypeMismatchError(
function.function,
expected=fun_type,
actual=doc_type,
line_numbers=line_numbers,
),
)
def _check_yield(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
if function.is_abstract:
return
doc_yield = docstring.get_section(Sections.YIELDS_SECTION)
fun_yield = function.has_yield
ignore_missing = self._ignore_error(docstring, MissingYieldError)
ignore_excess = self._ignore_error(docstring, ExcessYieldError)
if fun_yield and not doc_yield and not ignore_missing:
self.errors.append(
MissingYieldError(function.function)
)
elif doc_yield and not fun_yield and not ignore_excess:
line_numbers = docstring.get_line_numbers(
'yields-section',
)
self.errors.append(
ExcessYieldError(
function.function,
line_numbers=line_numbers,
)
)
def _check_return(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
if function.is_abstract:
return
# If there is an empty return, we don't want to make any
# judgement about whether it should be reported, as it is
# probably an early escape.
if function.has_empty_return:
return
doc_return = docstring.get_section(Sections.RETURNS_SECTION)
fun_return = function.has_return
ignore_missing = self._ignore_error(docstring, MissingReturnError)
ignore_excess = self._ignore_error(docstring, ExcessReturnError)
if fun_return and not doc_return and not ignore_missing:
self.errors.append(
MissingReturnError(function.function)
)
elif doc_return and not fun_return and not ignore_excess:
line_numbers = docstring.get_line_numbers(
'returns-section',
)
self.errors.append(
ExcessReturnError(
function.function,
line_numbers=line_numbers,
)
)
def _check_parameters(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
docstring_arguments = set(docstring.get_items(
Sections.ARGUMENTS_SECTION
) or [])
actual_arguments = set(function.argument_names)
missing_in_doc = actual_arguments - docstring_arguments
missing_in_doc = self._remove_ignored(
docstring,
missing_in_doc,
MissingParameterError,
)
# Get a default line number.
default_line_numbers = docstring.get_line_numbers(
'arguments-section'
)
for missing in missing_in_doc:
# See if the documented argument begins with one
# or two asterisks.
if (
(missing.startswith('**')
and missing[2:] in docstring_arguments)
or (missing.startswith('*')
and missing[1:] in docstring_arguments)
):
continue
# Don't require private arguments.
if missing.startswith('_'):
continue
# We use the default line numbers because a missing
# parameter, by definition, will not have line numbers.
self.errors.append(
MissingParameterError(
function.function,
missing,
line_numbers=default_line_numbers
)
)
missing_in_function = docstring_arguments - actual_arguments
missing_in_function = self._remove_ignored(
docstring,
missing_in_function,
ExcessParameterError,
)
for missing in missing_in_function:
# If the actual argument begins with asterisk(s),
# then check to make sure the unasterisked version
# is not missing.
if (
'*' + missing in actual_arguments or
'**' + missing in actual_arguments
):
continue
line_numbers = docstring.get_line_numbers_for_value(
'arguments-section',
missing,
) or default_line_numbers
self.errors.append(
ExcessParameterError(
function.function,
missing,
line_numbers=line_numbers,
)
)
def _check_variables(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
described_variables = set(
docstring.get_items(Sections.VARIABLES_SECTION) or []
) # type: Set[str]
actual_variables = set(function.variables)
excess_in_doc = described_variables - actual_variables
# Get a default line number.
default_line_numbers = docstring.get_line_numbers(
'variables-section',
)
for excess in excess_in_doc:
line_numbers = docstring.get_line_numbers_for_value(
'variables-section',
excess,
) or default_line_numbers
self.errors.append(
ExcessVariableError(
function.function,
excess,
line_numbers=line_numbers,
)
)
def _ignore_error(self, docstring, error):
# type: (BaseDocstring, Any) -> bool
"""Return true if we should ignore this error.
Args:
docstring: The docstring we are reporting on.
error: The error we might be ignoring.
Returns:
True if we should ignore all instances of this error,
otherwise false.
"""
error_code = error.error_code
if error_code in self.config.errors_to_ignore:
return True
noqa_lookup = docstring.get_noqas()
inline_error = error_code in noqa_lookup
if inline_error and not noqa_lookup[error_code]:
return True
return False
def _remove_ignored(self, docstring, missing, error):
# type: (BaseDocstring, Set[str], Any) -> Set[str]
"""Remove ignored from missing.
Args:
docstring: The docstring we are reporting on.
missing: A set of missing items.
error: The error being checked.
Returns:
A set of missing items without those to be ignored.
"""
error_code = error.error_code
# Ignore globally
if self._ignore_error(docstring, error):
return set()
# There are no noqa statements
noqa_lookup = docstring.get_noqas()
inline_ignore = error_code in noqa_lookup
if not inline_ignore:
return missing
# We are to ignore specific instances.
return missing - set(noqa_lookup[error_code])
def _check_style(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
for StyleError, line_numbers in docstring.get_style_errors():
if self._ignore_error(docstring, StyleError):
continue
self.errors.append(StyleError(
function.function,
line_numbers,
))
def _check_raises(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
if function.is_abstract:
return
exception_types = docstring.get_items(Sections.RAISES_SECTION)
docstring_raises = set(exception_types or [])
actual_raises = function.raises
ignore_raise = set(self.config.ignore_raise)
missing_in_doc = actual_raises - docstring_raises - ignore_raise
missing_in_doc = self._remove_ignored(
docstring,
missing_in_doc,
MissingRaiseError,
)
for missing in missing_in_doc:
self.errors.append(
MissingRaiseError(function.function, missing)
)
# TODO: Disable by default.
#
# Should we even include this? It seems like the user
# would know if this function would be likely to raise
# a certain exception from underlying calls.
#
missing_in_function = docstring_raises - actual_raises
missing_in_function = self._remove_ignored(
docstring,
missing_in_function,
ExcessRaiseError,
)
# Remove AssertionError if there is an assert.
if 'AssertionError' in missing_in_function:
if function.raises_assert:
missing_in_function.remove('AssertionError')
default_line_numbers = docstring.get_line_numbers(
'raises-section',
)
for missing in missing_in_function:
line_numbers = docstring.get_line_numbers_for_value(
'raises-section',
missing,
) or default_line_numbers
self.errors.append(
ExcessRaiseError(
function.function,
missing,
line_numbers=line_numbers,
)
)
def _sort(self):
# type: () -> None
if not self._sorted:
self.errors.sort(key=lambda x: x.function.lineno)
self._sorted = True
def get_error_report(self, verbosity, filename, message_template=None):
# type: (int, str, str) -> ErrorReport
self.executor.shutdown()
return ErrorReport(
errors=self.errors,
filename=filename,
verbosity=verbosity,
message_template=message_template or self.config.message_template,
)
def get_error_report_string(self,
verbosity,
filename,
message_template=None):
# type: (int, str, str) -> str
"""Return a string representation of the errors.
Args:
verbosity: The level of verbosity. Should be an integer
in the range [1,3].
filename: The filename of where the error occurred.
message_template: A python format string for describing
how the error reports should look to the user.
Returns:
A string representation of the errors.
"""
return str(self.get_error_report(
verbosity, filename, message_template
))
|
{
"content_hash": "1894abffe4135a73a1feb05e39f9e0b5",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 78,
"avg_line_length": 35.10516605166052,
"alnum_prop": 0.5648814842066537,
"repo_name": "terrencepreilly/darglint",
"id": "76349984a19e8b9af4ab7d8d8e7c57bfc267b216",
"size": "19027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "darglint/integrity_checker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1960"
},
{
"name": "Elm",
"bytes": "25621"
},
{
"name": "HTML",
"bytes": "391"
},
{
"name": "Makefile",
"bytes": "1145"
},
{
"name": "Python",
"bytes": "678190"
}
],
"symlink_target": ""
}
|
"""
make report for QA using reportlab module
"""
from reportlab.pdfgen import canvas
import numpy as N
import time
import os
def mk_dti_report(infile,dtidir,datavars):
#imgsnr,meansfnr,spikes,badvols):
timestamp=time.strftime('%B %d, %Y: %H:%M:%S')
report_header=[]
report_header.append('QA Report: %s'%timestamp)
report_header.append('directory: %s'%os.path.dirname(infile))
report_header.append('filename: %s'%os.path.basename(infile))
report_header.append('Mean SNR: %f'%N.mean(datavars['imgsnr']))
badvols=['%d'%i for i in datavars['badvols']]
report_header.append('# potentially bad gradients: %d (%s)'%(len(datavars['badvols']),' '.join(badvols)))
c = canvas.Canvas(os.path.join(dtidir,"QA_report.pdf"))
yloc=820
stepsize=16
for line in report_header:
c.drawString(10,yloc,line)
yloc=yloc-stepsize
timeseries_to_draw=['snr.png','fd.png','interleavecorr.png','slicecorr.png']
tsfiles=[os.path.join(dtidir,t) for t in timeseries_to_draw]
ts_img_size=[467,140]
yloc=yloc-ts_img_size[1]
for imfile in tsfiles:
c.drawImage(imfile, 45,yloc,width=ts_img_size[0],height=ts_img_size[1])
yloc=yloc-ts_img_size[1]
c.showPage()
# yloc=650
# c.drawImage(os.path.join(qadir,'spike.png'),20,yloc,width=500,height=133)
yloc=330
images_to_draw=['FA.png','worst_gradient.png']
imfiles=[os.path.join(dtidir,t) for t in images_to_draw]
c.drawImage(imfiles[0],0,yloc,width=300,height=300)
c.drawImage(imfiles[1],300,yloc,width=300,height=300)
# yloc=20
# c.drawImage(imfiles[2],0,yloc,width=325,height=325)
c.save()
|
{
"content_hash": "bf680c257a800433a8b823573895992c",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 109,
"avg_line_length": 32.20754716981132,
"alnum_prop": 0.6449912126537786,
"repo_name": "poldrack/dtiqa",
"id": "d07b0c8c0884dd676b8f1e4a0fc95068c4a97fd6",
"size": "1707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mk_dti_report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10273"
}
],
"symlink_target": ""
}
|
"""
"""
from __future__ import absolute_import
from ..unitquantity import UnitLength, UnitQuantity
m = meter = metre = UnitLength(
'meter',
symbol='m',
aliases=['meters', 'metre', 'metres']
)
km = kilometer = kilometre = UnitLength(
'kilometer',
1000*m,
symbol='km',
aliases=['kilometers', 'kilometre', 'kilometres']
)
cm = centimeter = centimetre = UnitLength(
'centimeter',
m/100,
'cm',
aliases=['centimeters', 'centimetre', 'centimetres']
)
mm = millimeter = millimetre = UnitLength(
'millimeter',
m/1000,
symbol='mm',
aliases=['millimeters', 'millimetre', 'millimetres']
)
um = micrometer = micrometre = micron = UnitLength(
'micrometer',
mm/1000,
symbol='um',
u_symbol='µm',
aliases=[
'micron', 'microns', 'micrometers', 'micrometre', 'micrometres'
]
)
nm = nanometer = nanometre = UnitLength(
'nanometer',
um/1000,
symbol='nm',
aliases=['nanometers', 'nanometre', 'nanometres']
)
pm = picometer = picometre = UnitLength(
'picometer',
nm/1000,
symbol='pm',
aliases=['picometers', 'picometre', 'picometres']
)
angstrom = UnitLength(
'angstrom',
nm/10,
u_symbol='Å',
aliases=['angstroms']
)
fm = femtometer = femtometre = fermi = UnitLength(
'femtometer',
pm/1000,
symbol='fm',
aliases=['femtometers', 'femtometre', 'femtometres', 'fermi', 'fermis']
)
inch = international_inch = UnitLength(
'inch',
2.54*cm,
symbol='in',
aliases=['inches', 'international_inch', 'international_inches']
)
ft = foot = international_foot = UnitLength(
'foot',
12*inch,
symbol='ft',
aliases=['feet', 'international_foot' 'international_feet']
)
mi = mile = international_mile = UnitLength(
'mile',
5280*ft,
symbol='mi',
aliases=['miles', 'international_mile', 'international_miles']
)
yd = yard = international_yard = UnitLength(
'yard',
3*ft,
symbol='yd',
aliases=['yards', 'international_yard', 'international_yards']
)
mil = thou = UnitLength(
'mil',
inch/1000,
aliases=['mils', 'thou', 'thous']
)
pc = parsec = UnitLength(
'parsec',
3.08568025e16*m,
symbol='pc',
aliases=['parsecs'],
doc='approximate'
)
ly = light_year = UnitLength(
'light_year',
9460730472580.8*km,
symbol='ly',
aliases=['light_years']
)
au = astronomical_unit = UnitLength(
'astronomical_unit',
149597870691*m,
symbol='au',
aliases=['astronomical_units'],
doc='''
An astronomical unit (abbreviated as AU, au, a.u., or sometimes ua) is a
unit of length roughly equal to the mean distance between the Earth and
the Sun. It is approximately 150 million kilometres (93 million miles).
uncertainty ± 30 m
http://en.wikipedia.org/wiki/Astronomical_unit
'''
)
nmi = nautical_mile = UnitLength(
'nautical_mile',
1.852e3*m,
symbol='nmi',
aliases=['nmile', 'nmiles', 'nautical_miles']
)
pt = printers_point = point = UnitLength(
'printers_point',
127*mm/360,
symbol='point',
aliases=['printers_points', 'points'],
doc='pt is reserved for pint'
)
pica = UnitLength(
'pica',
12*printers_point,
aliases=['picas', 'printers_pica', 'printers_picas']
)
US_survey_foot = UnitLength(
'US_survey_foot',
1200*m/3937,
aliases=['US_survey_feet']
)
US_survey_yard = UnitLength(
'US_survey_yard',
3*US_survey_foot,
aliases=['US_survey_yards']
)
US_survey_mile = US_statute_mile = UnitLength(
'US_survey_mile',
5280*US_survey_foot,
aliases=['US_survey_miles', 'US_statute_mile', 'US_statute_miles']
)
rod = pole = perch = UnitLength(
'rod',
16.5*US_survey_foot,
aliases=['rods', 'pole', 'poles', 'perch', 'perches']
)
furlong = UnitLength(
'furlong',
660*US_survey_foot,
aliases=['furlongs']
)
fathom = UnitLength(
'fathom',
6*US_survey_foot,
aliases=['fathoms']
)
chain = UnitLength(
'chain',
66*US_survey_foot,
aliases=['chains']
)
barleycorn = UnitLength(
'barleycorn',
inch/3,
aliases=['barleycorns']
)
arpentlin = UnitLength(
'arpentlin',
191.835*ft
)
kayser = wavenumber = UnitQuantity(
'kayser',
1/cm,
aliases=['kaysers', 'wavenumber', 'wavenumbers']
)
del UnitQuantity
|
{
"content_hash": "603976524595814060be055a690d90fd",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 76,
"avg_line_length": 22.369791666666668,
"alnum_prop": 0.6190919674039581,
"repo_name": "AdaptiveApplications/carnegie",
"id": "0a7e822e21a2d7cdda8cd62d250ad79b53ff88a6",
"size": "4322",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tarc_bus_locator_client/quantities-0.10.1/quantities/units/length.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4023"
},
{
"name": "C",
"bytes": "20612685"
},
{
"name": "C++",
"bytes": "7216064"
},
{
"name": "CMake",
"bytes": "18586"
},
{
"name": "CSS",
"bytes": "216171"
},
{
"name": "Emacs Lisp",
"bytes": "7798"
},
{
"name": "Fortran",
"bytes": "7795"
},
{
"name": "HTML",
"bytes": "12999"
},
{
"name": "Java",
"bytes": "1341804"
},
{
"name": "JavaScript",
"bytes": "20474"
},
{
"name": "M4",
"bytes": "45508"
},
{
"name": "Makefile",
"bytes": "932812"
},
{
"name": "PureBasic",
"bytes": "165856"
},
{
"name": "Python",
"bytes": "11658962"
},
{
"name": "Roff",
"bytes": "5923"
},
{
"name": "Shell",
"bytes": "1442066"
},
{
"name": "Vim script",
"bytes": "3731"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["simulate_perf_cluster_alert_behaviour"]
import logging
import random
import time
from datetime import datetime
from resource_management.core.exceptions import Fail
RESULT_CODE_OK = 'OK'
RESULT_CODE_CRITICAL = 'CRITICAL'
RESULT_CODE_UNKNOWN = 'UNKNOWN'
OK_MESSAGE = 'Ok'
FAIL_MESSAGE = 'Expected Fail'
UNKNOWN_MESSAGE = 'Expected Unknown'
logger = logging.getLogger('ambari_alerts')
return_values_map = {"true":[RESULT_CODE_OK, OK_MESSAGE], "false":[RESULT_CODE_CRITICAL, FAIL_MESSAGE],
"none":[RESULT_CODE_UNKNOWN, UNKNOWN_MESSAGE]}
def simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
alert_behaviour_type=None
alert_behaviour_type_key=alert_behaviour_properties["alert_behaviour_type"]
if alert_behaviour_type_key in configurations:
alert_behaviour_type = configurations[alert_behaviour_type_key].lower()
if alert_behaviour_type == "percentage":
alert_success_percentage=None
alert_success_percentage_key=alert_behaviour_properties["alert_success_percentage"]
if alert_success_percentage_key in configurations:
alert_success_percentage = configurations[alert_success_percentage_key]
if alert_success_percentage:
random_number = random.uniform(0, 100)
if random_number <= int(alert_success_percentage):
return (RESULT_CODE_OK, [OK_MESSAGE])
else:
return (RESULT_CODE_CRITICAL, [FAIL_MESSAGE])
else:
raise Fail("Percentage behaviour was set but alert.success.percentage was not set!")
elif alert_behaviour_type == "timeout":
alert_timeout_return_value=None
alert_timeout_secs=None
alert_timeout_return_value_key=alert_behaviour_properties["alert_timeout_return_value"]
alert_timeout_secs_key=alert_behaviour_properties["alert_timeout_secs"]
if alert_timeout_return_value_key in configurations:
alert_timeout_return_value = configurations[alert_timeout_return_value_key].lower()
if alert_timeout_secs_key in configurations:
alert_timeout_secs = configurations[alert_timeout_secs_key]
if alert_timeout_return_value and alert_timeout_secs:
logger.info("Sleeping for {0} seconds".format(alert_timeout_secs))
print "Sleeping for {0} seconds".format(alert_timeout_secs)
time.sleep(int(alert_timeout_secs))
return (return_values_map[alert_timeout_return_value][0], [return_values_map[alert_timeout_return_value][1]])
else:
raise Fail("Timeout behaviour was set but alert.timeout.return.value/alert.timeout.secs were not set!")
elif alert_behaviour_type == "flip":
alert_flip_interval_mins=None
alert_flip_interval_mins_key=alert_behaviour_properties["alert_flip_interval_mins"]
if alert_flip_interval_mins_key in configurations:
alert_flip_interval_mins = configurations[alert_flip_interval_mins_key]
if alert_flip_interval_mins:
curr_time = datetime.utcnow()
return_value = ((curr_time.minute / int(alert_flip_interval_mins)) % 2) == 0
return (return_values_map[str(return_value).lower()][0], [return_values_map[str(return_value).lower()][1]])
else:
raise Fail("Flip behaviour was set but alert.flip.interval.mins was not set!")
result_code = RESULT_CODE_OK
label = OK_MESSAGE
return (result_code, [label])
|
{
"content_hash": "6a0aaea08bf8a1cc6bce039018695977",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 115,
"avg_line_length": 41.367924528301884,
"alnum_prop": 0.7393386545039908,
"repo_name": "arenadata/ambari",
"id": "736e5e36ebdfa51dcfb98a54f2c74e3b96d71f79",
"size": "4408",
"binary": false,
"copies": "5",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-common/src/main/python/resource_management/libraries/functions/simulate_perf_cluster_alert_behaviour.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
"""
Support for native homogeneous lists.
"""
import math
import operator
from llvmlite import ir
from numba.core import types, typing, errors, cgutils
from numba.core.imputils import (lower_builtin, lower_cast,
iternext_impl, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked,
RefType)
from numba.core.extending import overload_method, overload
from numba.core.utils import cached_property
from numba.misc import quicksort
from numba.cpython import slicing
from numba import literal_unroll
def get_list_payload(context, builder, list_type, value):
"""
Given a list value and type, get its payload structure (as a
reference, so that mutations are seen by all).
"""
payload_type = types.ListPayload(list_type)
payload = context.nrt.meminfo_data(builder, value.meminfo)
ptrty = context.get_data_type(payload_type).as_pointer()
payload = builder.bitcast(payload, ptrty)
return context.make_data_helper(builder, payload_type, ref=payload)
def get_itemsize(context, list_type):
"""
Return the item size for the given list type.
"""
llty = context.get_data_type(list_type.dtype)
return context.get_abi_sizeof(llty)
class _ListPayloadMixin(object):
@property
def size(self):
return self._payload.size
@size.setter
def size(self, value):
self._payload.size = value
@property
def dirty(self):
return self._payload.dirty
@property
def data(self):
return self._payload._get_ptr_by_name('data')
def _gep(self, idx):
return cgutils.gep(self._builder, self.data, idx)
def getitem(self, idx):
ptr = self._gep(idx)
data_item = self._builder.load(ptr)
return self._datamodel.from_data(self._builder, data_item)
def fix_index(self, idx):
"""
Fix negative indices by adding the size to them. Positive
indices are left untouched.
"""
is_negative = self._builder.icmp_signed('<', idx,
ir.Constant(idx.type, 0))
wrapped_index = self._builder.add(idx, self.size)
return self._builder.select(is_negative, wrapped_index, idx)
def is_out_of_bounds(self, idx):
"""
Return whether the index is out of bounds.
"""
underflow = self._builder.icmp_signed('<', idx,
ir.Constant(idx.type, 0))
overflow = self._builder.icmp_signed('>=', idx, self.size)
return self._builder.or_(underflow, overflow)
def clamp_index(self, idx):
"""
Clamp the index in [0, size].
"""
builder = self._builder
idxptr = cgutils.alloca_once_value(builder, idx)
zero = ir.Constant(idx.type, 0)
size = self.size
underflow = self._builder.icmp_signed('<', idx, zero)
with builder.if_then(underflow, likely=False):
builder.store(zero, idxptr)
overflow = self._builder.icmp_signed('>=', idx, size)
with builder.if_then(overflow, likely=False):
builder.store(size, idxptr)
return builder.load(idxptr)
def guard_index(self, idx, msg):
"""
Raise an error if the index is out of bounds.
"""
with self._builder.if_then(self.is_out_of_bounds(idx), likely=False):
self._context.call_conv.return_user_exc(self._builder,
IndexError, (msg,))
def fix_slice(self, slice):
"""
Fix slice start and stop to be valid (inclusive and exclusive, resp)
indexing bounds.
"""
return slicing.fix_slice(self._builder, slice, self.size)
def incref_value(self, val):
"Incref an element value"
self._context.nrt.incref(self._builder, self.dtype, val)
def decref_value(self, val):
"Decref an element value"
self._context.nrt.decref(self._builder, self.dtype, val)
class ListPayloadAccessor(_ListPayloadMixin):
"""
A helper object to access the list attributes given the pointer to the
payload type.
"""
def __init__(self, context, builder, list_type, payload_ptr):
self._context = context
self._builder = builder
self._ty = list_type
self._datamodel = context.data_model_manager[list_type.dtype]
payload_type = types.ListPayload(list_type)
ptrty = context.get_data_type(payload_type).as_pointer()
payload_ptr = builder.bitcast(payload_ptr, ptrty)
payload = context.make_data_helper(builder, payload_type,
ref=payload_ptr)
self._payload = payload
class ListInstance(_ListPayloadMixin):
def __init__(self, context, builder, list_type, list_val):
self._context = context
self._builder = builder
self._ty = list_type
self._list = context.make_helper(builder, list_type, list_val)
self._itemsize = get_itemsize(context, list_type)
self._datamodel = context.data_model_manager[list_type.dtype]
@property
def dtype(self):
return self._ty.dtype
@property
def _payload(self):
# This cannot be cached as it can be reallocated
return get_list_payload(self._context, self._builder, self._ty, self._list)
@property
def parent(self):
return self._list.parent
@parent.setter
def parent(self, value):
self._list.parent = value
@property
def value(self):
return self._list._getvalue()
@property
def meminfo(self):
return self._list.meminfo
def set_dirty(self, val):
if self._ty.reflected:
self._payload.dirty = cgutils.true_bit if val else cgutils.false_bit
def clear_value(self, idx):
"""Remove the value at the location
"""
self.decref_value(self.getitem(idx))
# it's necessary for the dtor which just decref every slot on it.
self.zfill(idx, self._builder.add(idx, idx.type(1)))
def setitem(self, idx, val, incref, decref_old_value=True):
# Decref old data
if decref_old_value:
self.decref_value(self.getitem(idx))
ptr = self._gep(idx)
data_item = self._datamodel.as_data(self._builder, val)
self._builder.store(data_item, ptr)
self.set_dirty(True)
if incref:
# Incref the underlying data
self.incref_value(val)
def inititem(self, idx, val, incref=True):
ptr = self._gep(idx)
data_item = self._datamodel.as_data(self._builder, val)
self._builder.store(data_item, ptr)
if incref:
self.incref_value(val)
def zfill(self, start, stop):
"""Zero-fill the memory at index *start* to *stop*
*stop* MUST not be smaller than *start*.
"""
builder = self._builder
base = self._gep(start)
end = self._gep(stop)
intaddr_t = self._context.get_value_type(types.intp)
size = builder.sub(builder.ptrtoint(end, intaddr_t),
builder.ptrtoint(base, intaddr_t))
cgutils.memset(builder, base, size, ir.IntType(8)(0))
@classmethod
def allocate_ex(cls, context, builder, list_type, nitems):
"""
Allocate a ListInstance with its storage.
Return a (ok, instance) tuple where *ok* is a LLVM boolean and
*instance* is a ListInstance object (the object's contents are
only valid when *ok* is true).
"""
intp_t = context.get_value_type(types.intp)
if isinstance(nitems, int):
nitems = ir.Constant(intp_t, nitems)
payload_type = context.get_data_type(types.ListPayload(list_type))
payload_size = context.get_abi_sizeof(payload_type)
itemsize = get_itemsize(context, list_type)
# Account for the fact that the payload struct contains one entry
payload_size -= itemsize
ok = cgutils.alloca_once_value(builder, cgutils.true_bit)
self = cls(context, builder, list_type, None)
# Total allocation size = <payload header size> + nitems * itemsize
allocsize, ovf = cgutils.muladd_with_overflow(builder, nitems,
ir.Constant(intp_t, itemsize),
ir.Constant(intp_t, payload_size))
with builder.if_then(ovf, likely=False):
builder.store(cgutils.false_bit, ok)
with builder.if_then(builder.load(ok), likely=True):
meminfo = context.nrt.meminfo_new_varsize_dtor(
builder, size=allocsize, dtor=self.get_dtor())
with builder.if_else(cgutils.is_null(builder, meminfo),
likely=False) as (if_error, if_ok):
with if_error:
builder.store(cgutils.false_bit, ok)
with if_ok:
self._list.meminfo = meminfo
self._list.parent = context.get_constant_null(types.pyobject)
self._payload.allocated = nitems
self._payload.size = ir.Constant(intp_t, 0) # for safety
self._payload.dirty = cgutils.false_bit
# Zero the allocated region
self.zfill(self.size.type(0), nitems)
return builder.load(ok), self
def define_dtor(self):
"Define the destructor if not already defined"
context = self._context
builder = self._builder
mod = builder.module
# Declare dtor
fnty = ir.FunctionType(ir.VoidType(), [cgutils.voidptr_t])
fn = cgutils.get_or_insert_function(mod, fnty,
'.dtor.list.{}'.format(self.dtype))
if not fn.is_declaration:
# End early if the dtor is already defined
return fn
fn.linkage = 'linkonce_odr'
# Populate the dtor
builder = ir.IRBuilder(fn.append_basic_block())
base_ptr = fn.args[0] # void*
# get payload
payload = ListPayloadAccessor(context, builder, self._ty, base_ptr)
# Loop over all data to decref
intp = payload.size.type
with cgutils.for_range_slice(
builder, start=intp(0), stop=payload.size, step=intp(1),
intp=intp) as (idx, _):
val = payload.getitem(idx)
context.nrt.decref(builder, self.dtype, val)
builder.ret_void()
return fn
def get_dtor(self):
""""Get the element dtor function pointer as void pointer.
It's safe to be called multiple times.
"""
# Define and set the Dtor
dtor = self.define_dtor()
dtor_fnptr = self._builder.bitcast(dtor, cgutils.voidptr_t)
return dtor_fnptr
@classmethod
def allocate(cls, context, builder, list_type, nitems):
"""
Allocate a ListInstance with its storage. Same as allocate_ex(),
but return an initialized *instance*. If allocation failed,
control is transferred to the caller using the target's current
call convention.
"""
ok, self = cls.allocate_ex(context, builder, list_type, nitems)
with builder.if_then(builder.not_(ok), likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot allocate list",))
return self
@classmethod
def from_meminfo(cls, context, builder, list_type, meminfo):
"""
Allocate a new list instance pointing to an existing payload
(a meminfo pointer).
Note the parent field has to be filled by the caller.
"""
self = cls(context, builder, list_type, None)
self._list.meminfo = meminfo
self._list.parent = context.get_constant_null(types.pyobject)
context.nrt.incref(builder, list_type, self.value)
# Payload is part of the meminfo, no need to touch it
return self
def resize(self, new_size):
"""
Ensure the list is properly sized for the new size.
"""
def _payload_realloc(new_allocated):
payload_type = context.get_data_type(types.ListPayload(self._ty))
payload_size = context.get_abi_sizeof(payload_type)
# Account for the fact that the payload struct contains one entry
payload_size -= itemsize
allocsize, ovf = cgutils.muladd_with_overflow(
builder, new_allocated,
ir.Constant(intp_t, itemsize),
ir.Constant(intp_t, payload_size))
with builder.if_then(ovf, likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot resize list",))
ptr = context.nrt.meminfo_varsize_realloc(builder, self._list.meminfo,
size=allocsize)
cgutils.guard_memory_error(context, builder, ptr,
"cannot resize list")
self._payload.allocated = new_allocated
context = self._context
builder = self._builder
intp_t = new_size.type
itemsize = get_itemsize(context, self._ty)
allocated = self._payload.allocated
two = ir.Constant(intp_t, 2)
eight = ir.Constant(intp_t, 8)
# allocated < new_size
is_too_small = builder.icmp_signed('<', allocated, new_size)
# (allocated >> 2) > new_size
is_too_large = builder.icmp_signed('>', builder.ashr(allocated, two), new_size)
with builder.if_then(is_too_large, likely=False):
# Exact downsize to requested size
# NOTE: is_too_large must be aggressive enough to avoid repeated
# upsizes and downsizes when growing a list.
_payload_realloc(new_size)
with builder.if_then(is_too_small, likely=False):
# Upsize with moderate over-allocation (size + size >> 2 + 8)
new_allocated = builder.add(eight,
builder.add(new_size,
builder.ashr(new_size, two)))
_payload_realloc(new_allocated)
self.zfill(self.size, new_allocated)
self._payload.size = new_size
self.set_dirty(True)
def move(self, dest_idx, src_idx, count):
"""
Move `count` elements from `src_idx` to `dest_idx`.
"""
dest_ptr = self._gep(dest_idx)
src_ptr = self._gep(src_idx)
cgutils.raw_memmove(self._builder, dest_ptr, src_ptr,
count, itemsize=self._itemsize)
self.set_dirty(True)
class ListIterInstance(_ListPayloadMixin):
def __init__(self, context, builder, iter_type, iter_val):
self._context = context
self._builder = builder
self._ty = iter_type
self._iter = context.make_helper(builder, iter_type, iter_val)
self._datamodel = context.data_model_manager[iter_type.yield_type]
@classmethod
def from_list(cls, context, builder, iter_type, list_val):
list_inst = ListInstance(context, builder, iter_type.container, list_val)
self = cls(context, builder, iter_type, None)
index = context.get_constant(types.intp, 0)
self._iter.index = cgutils.alloca_once_value(builder, index)
self._iter.meminfo = list_inst.meminfo
return self
@property
def _payload(self):
# This cannot be cached as it can be reallocated
return get_list_payload(self._context, self._builder,
self._ty.container, self._iter)
@property
def value(self):
return self._iter._getvalue()
@property
def index(self):
return self._builder.load(self._iter.index)
@index.setter
def index(self, value):
self._builder.store(value, self._iter.index)
#-------------------------------------------------------------------------------
# Constructors
def build_list(context, builder, list_type, items):
"""
Build a list of the given type, containing the given items.
"""
nitems = len(items)
inst = ListInstance.allocate(context, builder, list_type, nitems)
# Populate list
inst.size = context.get_constant(types.intp, nitems)
for i, val in enumerate(items):
inst.setitem(context.get_constant(types.intp, i), val, incref=True)
return impl_ret_new_ref(context, builder, list_type, inst.value)
@lower_builtin(list, types.IterableType)
def list_constructor(context, builder, sig, args):
def list_impl(iterable):
res = []
res.extend(iterable)
return res
return context.compile_internal(builder, list_impl, sig, args)
@lower_builtin(list)
def list_constructor(context, builder, sig, args):
list_type = sig.return_type
list_len = 0
inst = ListInstance.allocate(context, builder, list_type, list_len)
return impl_ret_new_ref(context, builder, list_type, inst.value)
#-------------------------------------------------------------------------------
# Various operations
@lower_builtin(len, types.List)
def list_len(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
return inst.size
@lower_builtin('getiter', types.List)
def getiter_list(context, builder, sig, args):
inst = ListIterInstance.from_list(context, builder, sig.return_type, args[0])
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
@lower_builtin('iternext', types.ListIter)
@iternext_impl(RefType.BORROWED)
def iternext_listiter(context, builder, sig, args, result):
inst = ListIterInstance(context, builder, sig.args[0], args[0])
index = inst.index
nitems = inst.size
is_valid = builder.icmp_signed('<', index, nitems)
result.set_valid(is_valid)
with builder.if_then(is_valid):
result.yield_(inst.getitem(index))
inst.index = builder.add(index, context.get_constant(types.intp, 1))
@lower_builtin(operator.getitem, types.List, types.Integer)
def getitem_list(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
index = args[1]
index = inst.fix_index(index)
inst.guard_index(index, msg="getitem out of range")
result = inst.getitem(index)
return impl_ret_borrowed(context, builder, sig.return_type, result)
@lower_builtin(operator.setitem, types.List, types.Integer, types.Any)
def setitem_list(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
index = args[1]
value = args[2]
index = inst.fix_index(index)
inst.guard_index(index, msg="setitem out of range")
inst.setitem(index, value, incref=True)
return context.get_dummy_value()
@lower_builtin(operator.getitem, types.List, types.SliceType)
def getslice_list(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
slice = context.make_helper(builder, sig.args[1], args[1])
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
inst.fix_slice(slice)
# Allocate result and populate it
result_size = slicing.get_slice_length(builder, slice)
result = ListInstance.allocate(context, builder, sig.return_type,
result_size)
result.size = result_size
with cgutils.for_range_slice_generic(builder, slice.start, slice.stop,
slice.step) as (pos_range, neg_range):
with pos_range as (idx, count):
value = inst.getitem(idx)
result.inititem(count, value, incref=True)
with neg_range as (idx, count):
value = inst.getitem(idx)
result.inititem(count, value, incref=True)
return impl_ret_new_ref(context, builder, sig.return_type, result.value)
@lower_builtin(operator.setitem, types.List, types.SliceType, types.Any)
def setitem_list(context, builder, sig, args):
dest = ListInstance(context, builder, sig.args[0], args[0])
src = ListInstance(context, builder, sig.args[2], args[2])
slice = context.make_helper(builder, sig.args[1], args[1])
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
dest.fix_slice(slice)
src_size = src.size
avail_size = slicing.get_slice_length(builder, slice)
size_delta = builder.sub(src.size, avail_size)
zero = ir.Constant(size_delta.type, 0)
one = ir.Constant(size_delta.type, 1)
with builder.if_else(builder.icmp_signed('==', slice.step, one)) as (then, otherwise):
with then:
# Slice step == 1 => we can resize
# Compute the real stop, e.g. for dest[2:0] = [...]
real_stop = builder.add(slice.start, avail_size)
# Size of the list tail, after the end of slice
tail_size = builder.sub(dest.size, real_stop)
with builder.if_then(builder.icmp_signed('>', size_delta, zero)):
# Grow list then move list tail
dest.resize(builder.add(dest.size, size_delta))
dest.move(builder.add(real_stop, size_delta), real_stop,
tail_size)
with builder.if_then(builder.icmp_signed('<', size_delta, zero)):
# Move list tail then shrink list
dest.move(builder.add(real_stop, size_delta), real_stop,
tail_size)
dest.resize(builder.add(dest.size, size_delta))
dest_offset = slice.start
with cgutils.for_range(builder, src_size) as loop:
value = src.getitem(loop.index)
dest.setitem(builder.add(loop.index, dest_offset), value, incref=True)
with otherwise:
with builder.if_then(builder.icmp_signed('!=', size_delta, zero)):
msg = "cannot resize extended list slice with step != 1"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
with cgutils.for_range_slice_generic(
builder, slice.start, slice.stop, slice.step) as (pos_range, neg_range):
with pos_range as (index, count):
value = src.getitem(count)
dest.setitem(index, value, incref=True)
with neg_range as (index, count):
value = src.getitem(count)
dest.setitem(index, value, incref=True)
return context.get_dummy_value()
@lower_builtin(operator.delitem, types.List, types.Integer)
def delitem_list_index(context, builder, sig, args):
def list_delitem_impl(lst, i):
lst.pop(i)
return context.compile_internal(builder, list_delitem_impl, sig, args)
@lower_builtin(operator.delitem, types.List, types.SliceType)
def delitem_list(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
slice = context.make_helper(builder, sig.args[1], args[1])
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
inst.fix_slice(slice)
slice_len = slicing.get_slice_length(builder, slice)
one = ir.Constant(slice_len.type, 1)
with builder.if_then(builder.icmp_signed('!=', slice.step, one), likely=False):
msg = "unsupported del list[start:stop:step] with step != 1"
context.call_conv.return_user_exc(builder, NotImplementedError, (msg,))
# Compute the real stop, e.g. for dest[2:0]
start = slice.start
real_stop = builder.add(start, slice_len)
# Decref the removed range
with cgutils.for_range_slice(
builder, start, real_stop, start.type(1)
) as (idx, _):
inst.decref_value(inst.getitem(idx))
# Size of the list tail, after the end of slice
tail_size = builder.sub(inst.size, real_stop)
inst.move(start, real_stop, tail_size)
inst.resize(builder.sub(inst.size, slice_len))
return context.get_dummy_value()
# XXX should there be a specific module for Sequence or collection base classes?
@lower_builtin(operator.contains, types.Sequence, types.Any)
def in_seq(context, builder, sig, args):
def seq_contains_impl(lst, value):
for elem in lst:
if elem == value:
return True
return False
return context.compile_internal(builder, seq_contains_impl, sig, args)
@lower_builtin(bool, types.Sequence)
def sequence_bool(context, builder, sig, args):
def sequence_bool_impl(seq):
return len(seq) != 0
return context.compile_internal(builder, sequence_bool_impl, sig, args)
@overload(operator.truth)
def sequence_truth(seq):
if isinstance(seq, types.Sequence):
def impl(seq):
return len(seq) != 0
return impl
@lower_builtin(operator.add, types.List, types.List)
def list_add(context, builder, sig, args):
a = ListInstance(context, builder, sig.args[0], args[0])
b = ListInstance(context, builder, sig.args[1], args[1])
a_size = a.size
b_size = b.size
nitems = builder.add(a_size, b_size)
dest = ListInstance.allocate(context, builder, sig.return_type, nitems)
dest.size = nitems
with cgutils.for_range(builder, a_size) as loop:
value = a.getitem(loop.index)
value = context.cast(builder, value, a.dtype, dest.dtype)
dest.setitem(loop.index, value, incref=True)
with cgutils.for_range(builder, b_size) as loop:
value = b.getitem(loop.index)
value = context.cast(builder, value, b.dtype, dest.dtype)
dest.setitem(builder.add(loop.index, a_size), value, incref=True)
return impl_ret_new_ref(context, builder, sig.return_type, dest.value)
@lower_builtin(operator.iadd, types.List, types.List)
def list_add_inplace(context, builder, sig, args):
assert sig.args[0].dtype == sig.return_type.dtype
dest = _list_extend_list(context, builder, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, dest.value)
@lower_builtin(operator.mul, types.List, types.Integer)
def list_mul(context, builder, sig, args):
src = ListInstance(context, builder, sig.args[0], args[0])
src_size = src.size
mult = args[1]
zero = ir.Constant(mult.type, 0)
mult = builder.select(cgutils.is_neg_int(builder, mult), zero, mult)
nitems = builder.mul(mult, src_size)
dest = ListInstance.allocate(context, builder, sig.return_type, nitems)
dest.size = nitems
with cgutils.for_range_slice(builder, zero, nitems, src_size, inc=True) as (dest_offset, _):
with cgutils.for_range(builder, src_size) as loop:
value = src.getitem(loop.index)
dest.setitem(builder.add(loop.index, dest_offset), value, incref=True)
return impl_ret_new_ref(context, builder, sig.return_type, dest.value)
@lower_builtin(operator.imul, types.List, types.Integer)
def list_mul_inplace(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
src_size = inst.size
mult = args[1]
zero = ir.Constant(mult.type, 0)
mult = builder.select(cgutils.is_neg_int(builder, mult), zero, mult)
nitems = builder.mul(mult, src_size)
inst.resize(nitems)
with cgutils.for_range_slice(builder, src_size, nitems, src_size, inc=True) as (dest_offset, _):
with cgutils.for_range(builder, src_size) as loop:
value = inst.getitem(loop.index)
inst.setitem(builder.add(loop.index, dest_offset), value, incref=True)
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
#-------------------------------------------------------------------------------
# Comparisons
@lower_builtin(operator.is_, types.List, types.List)
def list_is(context, builder, sig, args):
a = ListInstance(context, builder, sig.args[0], args[0])
b = ListInstance(context, builder, sig.args[1], args[1])
ma = builder.ptrtoint(a.meminfo, cgutils.intp_t)
mb = builder.ptrtoint(b.meminfo, cgutils.intp_t)
return builder.icmp_signed('==', ma, mb)
@lower_builtin(operator.eq, types.List, types.List)
def list_eq(context, builder, sig, args):
aty, bty = sig.args
a = ListInstance(context, builder, aty, args[0])
b = ListInstance(context, builder, bty, args[1])
a_size = a.size
same_size = builder.icmp_signed('==', a_size, b.size)
res = cgutils.alloca_once_value(builder, same_size)
with builder.if_then(same_size):
with cgutils.for_range(builder, a_size) as loop:
v = a.getitem(loop.index)
w = b.getitem(loop.index)
itemres = context.generic_compare(builder, operator.eq,
(aty.dtype, bty.dtype), (v, w))
with builder.if_then(builder.not_(itemres)):
# Exit early
builder.store(cgutils.false_bit, res)
loop.do_break()
return builder.load(res)
@lower_builtin(operator.ne, types.List, types.List)
def list_ne(context, builder, sig, args):
def list_ne_impl(a, b):
return not (a == b)
return context.compile_internal(builder, list_ne_impl, sig, args)
@lower_builtin(operator.le, types.List, types.List)
def list_le(context, builder, sig, args):
def list_le_impl(a, b):
m = len(a)
n = len(b)
for i in range(min(m, n)):
if a[i] < b[i]:
return True
elif a[i] > b[i]:
return False
return m <= n
return context.compile_internal(builder, list_le_impl, sig, args)
@lower_builtin(operator.lt, types.List, types.List)
def list_lt(context, builder, sig, args):
def list_lt_impl(a, b):
m = len(a)
n = len(b)
for i in range(min(m, n)):
if a[i] < b[i]:
return True
elif a[i] > b[i]:
return False
return m < n
return context.compile_internal(builder, list_lt_impl, sig, args)
@lower_builtin(operator.ge, types.List, types.List)
def list_ge(context, builder, sig, args):
def list_ge_impl(a, b):
return b <= a
return context.compile_internal(builder, list_ge_impl, sig, args)
@lower_builtin(operator.gt, types.List, types.List)
def list_gt(context, builder, sig, args):
def list_gt_impl(a, b):
return b < a
return context.compile_internal(builder, list_gt_impl, sig, args)
#-------------------------------------------------------------------------------
# Methods
@lower_builtin("list.append", types.List, types.Any)
def list_append(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
item = args[1]
n = inst.size
new_size = builder.add(n, ir.Constant(n.type, 1))
inst.resize(new_size)
inst.setitem(n, item, incref=True)
return context.get_dummy_value()
@lower_builtin("list.clear", types.List)
def list_clear(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
inst.resize(context.get_constant(types.intp, 0))
return context.get_dummy_value()
@lower_builtin("list.copy", types.List)
def list_copy(context, builder, sig, args):
def list_copy_impl(lst):
return list(lst)
return context.compile_internal(builder, list_copy_impl, sig, args)
@lower_builtin("list.count", types.List, types.Any)
def list_count(context, builder, sig, args):
def list_count_impl(lst, value):
res = 0
for elem in lst:
if elem == value:
res += 1
return res
return context.compile_internal(builder, list_count_impl, sig, args)
def _list_extend_list(context, builder, sig, args):
src = ListInstance(context, builder, sig.args[1], args[1])
dest = ListInstance(context, builder, sig.args[0], args[0])
src_size = src.size
dest_size = dest.size
nitems = builder.add(src_size, dest_size)
dest.resize(nitems)
dest.size = nitems
with cgutils.for_range(builder, src_size) as loop:
value = src.getitem(loop.index)
value = context.cast(builder, value, src.dtype, dest.dtype)
dest.setitem(builder.add(loop.index, dest_size), value, incref=True)
return dest
@lower_builtin("list.extend", types.List, types.IterableType)
def list_extend(context, builder, sig, args):
if isinstance(sig.args[1], types.List):
# Specialize for list operands, for speed.
_list_extend_list(context, builder, sig, args)
return context.get_dummy_value()
def list_extend(lst, iterable):
# Speed hack to avoid NRT refcount operations inside the loop
meth = lst.append
for v in iterable:
meth(v)
return context.compile_internal(builder, list_extend, sig, args)
@lower_builtin("list.index", types.List, types.Any)
def list_index(context, builder, sig, args):
def list_index_impl(lst, value):
for i in range(len(lst)):
if lst[i] == value:
return i
# XXX references are leaked when raising
raise ValueError("value not in list")
return context.compile_internal(builder, list_index_impl, sig, args)
@lower_builtin("list.index", types.List, types.Any,
types.Integer)
def list_index(context, builder, sig, args):
def list_index_impl(lst, value, start):
n = len(lst)
if start < 0:
start += n
if start < 0:
start = 0
for i in range(start, len(lst)):
if lst[i] == value:
return i
# XXX references are leaked when raising
raise ValueError("value not in list")
return context.compile_internal(builder, list_index_impl, sig, args)
@lower_builtin("list.index", types.List, types.Any,
types.Integer, types.Integer)
def list_index(context, builder, sig, args):
def list_index_impl(lst, value, start, stop):
n = len(lst)
if start < 0:
start += n
if start < 0:
start = 0
if stop < 0:
stop += n
if stop > n:
stop = n
for i in range(start, stop):
if lst[i] == value:
return i
# XXX references are leaked when raising
raise ValueError("value not in list")
return context.compile_internal(builder, list_index_impl, sig, args)
@lower_builtin("list.insert", types.List, types.Integer,
types.Any)
def list_insert(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
index = inst.fix_index(args[1])
index = inst.clamp_index(index)
value = args[2]
n = inst.size
one = ir.Constant(n.type, 1)
new_size = builder.add(n, one)
inst.resize(new_size)
inst.move(builder.add(index, one), index, builder.sub(n, index))
inst.setitem(index, value, incref=True, decref_old_value=False)
return context.get_dummy_value()
@lower_builtin("list.pop", types.List)
def list_pop(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
n = inst.size
cgutils.guard_zero(context, builder, n,
(IndexError, "pop from empty list"))
n = builder.sub(n, ir.Constant(n.type, 1))
res = inst.getitem(n)
inst.incref_value(res) # incref the pop'ed element
inst.clear_value(n) # clear the storage space
inst.resize(n)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin("list.pop", types.List, types.Integer)
def list_pop(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
idx = inst.fix_index(args[1])
n = inst.size
cgutils.guard_zero(context, builder, n,
(IndexError, "pop from empty list"))
inst.guard_index(idx, "pop index out of range")
res = inst.getitem(idx)
one = ir.Constant(n.type, 1)
n = builder.sub(n, ir.Constant(n.type, 1))
inst.move(idx, builder.add(idx, one), builder.sub(n, idx))
inst.resize(n)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin("list.remove", types.List, types.Any)
def list_remove(context, builder, sig, args):
def list_remove_impl(lst, value):
for i in range(len(lst)):
if lst[i] == value:
lst.pop(i)
return
# XXX references are leaked when raising
raise ValueError("list.remove(x): x not in list")
return context.compile_internal(builder, list_remove_impl, sig, args)
@lower_builtin("list.reverse", types.List)
def list_reverse(context, builder, sig, args):
def list_reverse_impl(lst):
for a in range(0, len(lst) // 2):
b = -a - 1
lst[a], lst[b] = lst[b], lst[a]
return context.compile_internal(builder, list_reverse_impl, sig, args)
# -----------------------------------------------------------------------------
# Sorting
def gt(a, b):
return a > b
sort_forwards = quicksort.make_jit_quicksort().run_quicksort
sort_backwards = quicksort.make_jit_quicksort(lt=gt).run_quicksort
arg_sort_forwards = quicksort.make_jit_quicksort(is_argsort=True,
is_list=True).run_quicksort
arg_sort_backwards = quicksort.make_jit_quicksort(is_argsort=True, lt=gt,
is_list=True).run_quicksort
def _sort_check_reverse(reverse):
if isinstance(reverse, types.Omitted):
rty = reverse.value
elif isinstance(reverse, types.Optional):
rty = reverse.type
else:
rty = reverse
if not isinstance(rty, (types.Boolean, types.Integer, int, bool)):
msg = "an integer is required for 'reverse' (got type %s)" % reverse
raise errors.TypingError(msg)
return rty
def _sort_check_key(key):
if isinstance(key, types.Optional):
msg = ("Key must concretely be None or a Numba JIT compiled function, "
"an Optional (union of None and a value) was found")
raise errors.TypingError(msg)
if not (cgutils.is_nonelike(key) or isinstance(key, types.Dispatcher)):
msg = "Key must be None or a Numba JIT compiled function"
raise errors.TypingError(msg)
@overload_method(types.List, "sort")
def ol_list_sort(lst, key=None, reverse=False):
_sort_check_key(key)
_sort_check_reverse(reverse)
if cgutils.is_nonelike(key):
KEY = False
sort_f = sort_forwards
sort_b = sort_backwards
elif isinstance(key, types.Dispatcher):
KEY = True
sort_f = arg_sort_forwards
sort_b = arg_sort_backwards
def impl(lst, key=None, reverse=False):
if KEY is True:
_lst = [key(x) for x in lst]
else:
_lst = lst
if reverse is False or reverse == 0:
tmp = sort_f(_lst)
else:
tmp = sort_b(_lst)
if KEY is True:
lst[:] = [lst[i] for i in tmp]
return impl
@overload(sorted)
def ol_sorted(iterable, key=None, reverse=False):
if not isinstance(iterable, types.IterableType):
return False
_sort_check_key(key)
_sort_check_reverse(reverse)
def impl(iterable, key=None, reverse=False):
lst = list(iterable)
lst.sort(key=key, reverse=reverse)
return lst
return impl
# -----------------------------------------------------------------------------
# Implicit casting
@lower_cast(types.List, types.List)
def list_to_list(context, builder, fromty, toty, val):
# Casting from non-reflected to reflected
assert fromty.dtype == toty.dtype
return val
# -----------------------------------------------------------------------------
# Implementations for types.LiteralList
# -----------------------------------------------------------------------------
_banned_error = errors.TypingError("Cannot mutate a literal list")
# Things that mutate literal lists are banned
@overload_method(types.LiteralList, 'append')
def literal_list_banned_append(lst, obj):
raise _banned_error
@overload_method(types.LiteralList, 'extend')
def literal_list_banned_extend(lst, iterable):
raise _banned_error
@overload_method(types.LiteralList, 'insert')
def literal_list_banned_insert(lst, index, obj):
raise _banned_error
@overload_method(types.LiteralList, 'remove')
def literal_list_banned_remove(lst, value):
raise _banned_error
@overload_method(types.LiteralList, 'pop')
def literal_list_banned_pop(lst, index=-1):
raise _banned_error
@overload_method(types.LiteralList, 'clear')
def literal_list_banned_clear(lst):
raise _banned_error
@overload_method(types.LiteralList, 'sort')
def literal_list_banned_sort(lst, key=None, reverse=False):
raise _banned_error
@overload_method(types.LiteralList, 'reverse')
def literal_list_banned_reverse(lst):
raise _banned_error
_index_end = types.intp.maxval
@overload_method(types.LiteralList, 'index')
def literal_list_index(lst, x, start=0, end=_index_end):
# TODO: To make this work, need consts as slice for start/end so as to
# be able to statically analyse the bounds, then its a just loop body
# versioning based iteration along with enumerate to find the item
if isinstance(lst, types.LiteralList):
msg = "list.index is unsupported for literal lists"
raise errors.TypingError(msg)
@overload_method(types.LiteralList, 'count')
def literal_list_count(lst, x):
if isinstance(lst, types.LiteralList):
def impl(lst, x):
count = 0
for val in literal_unroll(lst):
if val == x:
count += 1
return count
return impl
@overload_method(types.LiteralList, 'copy')
def literal_list_count(lst):
if isinstance(lst, types.LiteralList):
def impl(lst):
return lst # tuples are immutable, as is this, so just return it
return impl
@overload(operator.delitem)
def literal_list_delitem(lst, index):
if isinstance(lst, types.LiteralList):
raise _banned_error
@overload(operator.setitem)
def literal_list_setitem(lst, index, value):
if isinstance(lst, types.LiteralList):
raise errors.TypingError("Cannot mutate a literal list")
@overload(operator.getitem)
def literal_list_getitem(lst, *args):
if not isinstance(lst, types.LiteralList):
return
msg = ("Cannot __getitem__ on a literal list, return type cannot be "
"statically determined.")
raise errors.TypingError(msg)
@overload(len)
def literal_list_len(lst):
if not isinstance(lst, types.LiteralList):
return
l = lst.count
return lambda lst: l
@overload(operator.contains)
def literal_list_contains(lst, item):
if isinstance(lst, types.LiteralList):
def impl(lst, item):
for val in literal_unroll(lst):
if val == item:
return True
return False
return impl
@lower_cast(types.LiteralList, types.LiteralList)
def literallist_to_literallist(context, builder, fromty, toty, val):
if len(fromty) != len(toty):
# Disallowed by typing layer
raise NotImplementedError
olditems = cgutils.unpack_tuple(builder, val, len(fromty))
items = [context.cast(builder, v, f, t)
for v, f, t in zip(olditems, fromty, toty)]
return context.make_tuple(builder, toty, items)
|
{
"content_hash": "c7a0e6bb76a52b33aa5cf0adf2fd43d8",
"timestamp": "",
"source": "github",
"line_count": 1254,
"max_line_length": 100,
"avg_line_length": 34.83014354066986,
"alnum_prop": 0.6092909311536965,
"repo_name": "seibert/numba",
"id": "69d5fbf4990f656e92adfad4fbf3fedfe4885bc4",
"size": "43677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/cpython/listobj.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6877"
},
{
"name": "C",
"bytes": "639446"
},
{
"name": "C++",
"bytes": "93702"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8688132"
},
{
"name": "Shell",
"bytes": "13404"
}
],
"symlink_target": ""
}
|
import soundcloud
import requests
import sys
import argparse
import demjson
from datetime import datetime
from os.path import exists
from os import mkdir
from subprocess import Popen, PIPE
from mutagen.mp3 import MP3, EasyMP3
from mutagen.id3 import APIC
from mutagen.id3 import ID3 as OldID3
from clint.textui import colored, puts, progress
# Please be nice with this!
CLIENT_ID = '22e566527758690e6feb2b5cb300cc43'
CLIENT_SECRET = '3a7815c3f9a82c3448ee4e7d3aa484a4'
MAGIC_CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28'
def main():
parser = argparse.ArgumentParser(description='SoundScrape. Scrape an artist from SoundCloud.\n')
parser.add_argument('artist_url', metavar='U', type=str,
help='An artist\'s SoundCloud username or URL')
parser.add_argument('-n', '--num-tracks', type=int, default=sys.maxint,
help='The number of tracks to download')
parser.add_argument('-g', '--group', action='store_true',
help='Use if downloading tracks from a SoundCloud group')
parser.add_argument('-b', '--bandcamp', action='store_true',
help='Use if downloading from Bandcamp rather than SoundCloud')
parser.add_argument('-l', '--likes', action='store_true',
help='Download all of a user\'s Likes.')
parser.add_argument('-d', '--downloadable', action='store_true',
help='Only fetch traks with a Downloadable link.')
parser.add_argument('-t', '--track', type=str, default='',
help='The name of a specific track by an artist')
parser.add_argument('-f', '--folders', action='store_true',
help='Organize saved songs in folders by artists')
parser.add_argument('-o', '--open', action='store_true',
help='Open downloaded files after downloading.')
args = parser.parse_args()
vargs = vars(args)
if not any(vargs.values()):
parser.error('Please supply an artist\'s username or URL!')
artist_url = vargs['artist_url']
track_permalink = vargs['track']
one_track = False
if 'bandcamp.com' in artist_url or vargs['bandcamp']:
if 'bandcamp.com' in artist_url:
bc_url = artist_url
else:
bc_url = 'https://' + artist_url + '.bandcamp.com'
filenames = scrape_bandcamp_url(bc_url, num_tracks=vargs['num_tracks'], folders=vargs['folders'])
if vargs['open']:
open_files(filenames)
return
if 'soundcloud' not in artist_url.lower():
if vargs['group']:
artist_url = 'https://soundcloud.com/groups/' + artist_url.lower()
elif len(track_permalink) > 0:
one_track = True
track_url = 'https://soundcloud.com/' + artist_url.lower() + '/' + track_permalink.lower()
else:
artist_url = 'https://soundcloud.com/' + artist_url.lower()
if vargs['likes']:
artist_url = artist_url + '/likes'
client = soundcloud.Client(client_id=CLIENT_ID)
if one_track:
resolved = client.get('/resolve', url=track_url)
else:
resolved = client.get('/resolve', url=artist_url)
# This is is likely a 'likes' page.
if not hasattr(resolved, 'kind'):
tracks = resolved
else:
if resolved.kind == 'artist':
artist = resolved
artist_id = artist.id
tracks = client.get('/users/' + str(artist_id) + '/tracks')
elif resolved.kind == 'playlist':
tracks = resolved.tracks
elif resolved.kind == 'track':
tracks = [resolved]
elif resolved.kind == 'group':
group = resolved
group_id = group.id
tracks = client.get('/groups/' + str(group_id) + '/tracks')
else:
artist = resolved
artist_id = artist.id
tracks = client.get('/users/' + str(artist_id) + '/tracks')
if one_track:
num_tracks = 1
else:
num_tracks = vargs['num_tracks']
filenames = download_tracks(client, tracks, num_tracks, vargs['downloadable'], vargs['folders'])
if vargs['open']:
open_files(filenames)
def download_tracks(client, tracks, num_tracks=sys.maxint, downloadable=False, folders=False):
filenames = []
for i, track in enumerate(tracks):
# "Track" and "Resource" objects are actually different,
# even though they're the same.
if isinstance(track, soundcloud.resource.Resource):
try:
t_track = {}
t_track['downloadable'] = track.downloadable
t_track['streamable'] = track.streamable
t_track['title'] = track.title
t_track['user'] = {'username': track.user['username']}
t_track['release_year'] = track.release
t_track['genre'] = track.genre
t_track['artwork_url'] = track.artwork_url
if track.downloadable:
t_track['stream_url'] = track.download_url
else:
if downloadable:
puts(colored.red(u"Skipping") + ": " + track.title.encode('utf-8'))
continue
if hasattr(track, 'stream_url'):
t_track['stream_url'] = track.stream_url
else:
t_track['direct'] = True
t_track['stream_url'] = 'https://api.soundcloud.com/tracks/' + str(track.id) + '/stream?client_id=' + MAGIC_CLIENT_ID
track = t_track
except Exception, e:
puts(track.title.encode('utf-8') + colored.red(u' is not downloadable') + '.')
continue
if i > num_tracks - 1:
continue
try:
if not track.get('stream_url', False):
puts(track['title'].encode('utf-8') + colored.red(u' is not downloadable') + '.')
continue
else:
track_artist = track['user']['username'].replace('/', '-')
track_title = track['title'].replace('/', '-')
track_filename = track_artist + ' - ' + track_title + '.mp3'
if folders:
if not exists(track_artist):
mkdir(track_artist)
track_filename = track_artist + '/' + track_filename
if exists(track_filename) and folders:
puts(colored.yellow(u"Track already downloaded: ") + track_title.encode('utf-8'))
continue
puts(colored.green(u"Downloading") + ": " + track['title'].encode('utf-8'))
if track.get('direct', False):
location = track['stream_url']
else:
stream = client.get(track['stream_url'], allow_redirects=False)
if hasattr(stream, 'location'):
location = stream.location
else:
location = stream.url
download_file(location, track_filename)
tag_file(track_filename,
artist=track['user']['username'],
title=track['title'],
year=track['release_year'],
genre=track['genre'],
artwork_url=track['artwork_url'])
filenames.append(track_filename)
except Exception, e:
puts(colored.red(u"Problem downloading ") + track['title'].encode('utf-8'))
print
return filenames
def download_file(url, path):
r = requests.get(url, stream=True)
with open(path, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length / 1024) + 1):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return path
def tag_file(filename, artist, title, year, genre, artwork_url, album=None, track_number=None):
try:
audio = EasyMP3(filename)
audio["artist"] = artist
audio["title"] = title
if year:
audio["date"] = str(year)
if album:
audio["album"] = album
if track_number:
audio["tracknumber"] = str(track_number)
audio["genre"] = genre
audio.save()
if artwork_url:
artwork_url = artwork_url.replace('https', 'http')
mime = 'image/jpeg'
if '.jpg' in artwork_url:
mime = 'image/jpeg'
if '.png' in artwork_url:
mime = 'image/png'
if '-large' in artwork_url:
new_artwork_url = artwork_url.replace('-large', '-t500x500')
try:
image_data = requests.get(new_artwork_url).content
except Exception, e:
# No very large image available.
image_data = requests.get(artwork_url).content
else:
image_data = requests.get(artwork_url).content
audio = MP3(filename, ID3=OldID3)
audio.tags.add(
APIC(
encoding=3, # 3 is for utf-8
mime=mime,
type=3, # 3 is for the cover image
desc=u'Cover',
data=image_data
)
)
audio.save()
except Exception, e:
print e
def open_files(filenames):
command = ['open'] + filenames
process = Popen(command, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
# Largely borrowed from Ronier's bandcampscrape
def scrape_bandcamp_url(url, num_tracks=sys.maxint, folders=False):
album_data = get_bandcamp_metadata(url)
artist = album_data["artist"]
album_name = album_data["current"]["title"]
filenames = []
if folders:
directory = artist + " - " + album_name
directory = directory.replace("/", " - ")
if not exists(directory):
mkdir(directory)
for i, track in enumerate(album_data["trackinfo"]):
if i > num_tracks - 1:
continue
try:
track_name = track["title"]
track_number = str(track["track_num"]).zfill(2)
track_filename = '%s - %s.mp3' % (track_number, track_name)
if folders:
path = directory + "/" + track_filename
if exists(path):
puts(colored.yellow(u"Track already downloaded: ") + track_name.encode('utf-8'))
continue
else:
path = artist + ' - ' + track_filename
if not track['file']:
puts(colored.yellow(u"Track unavailble for scraping: ") + track_name.encode('utf-8'))
continue
puts(colored.green(u"Downloading") + ': ' + track['title'].encode('utf-8'))
download_file(track['file']['mp3-128'], path)
year = datetime.strptime(album_data['album_release_date'], "%d %b %Y %H:%M:%S GMT").year
tag_file(path,
artist,
track['title'],
album=album_data['current']['title'],
year=year,
genre='',
artwork_url=album_data['artFullsizeUrl'],
track_number=track['track_num'])
filenames.append(path)
except Exception, e:
puts(colored.red(u"Problem downloading ") + track['title'].encode('utf-8'))
print e
return filenames
def get_bandcamp_metadata(url):
request = requests.get(url)
sloppy_json = request.text.split("var TralbumData = ")
sloppy_json = sloppy_json[1].replace('" + "', "")
sloppy_json = sloppy_json.replace("'", "\'")
sloppy_json = sloppy_json.split("};")[0] + "};"
sloppy_json = sloppy_json.replace("};", "}")
return demjson.decode(sloppy_json)
if __name__ == '__main__':
try:
sys.exit(main())
except Exception, e:
print e
|
{
"content_hash": "01428f01605ca4b947fda97480b40ce1",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 141,
"avg_line_length": 37.09969788519638,
"alnum_prop": 0.5344462540716612,
"repo_name": "dylanlesko/SoundScrape",
"id": "81960f42aff5c56916180a7658371eb2b2499210",
"size": "12304",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "soundscrape/soundscrape.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13577"
}
],
"symlink_target": ""
}
|
"""
Title: Gradient Centralization for Better Training Performance
Author: [Rishit Dagli](https://github.com/Rishit-dagli)
Date created: 06/18/21
Last modified: 06/18/21
Description: Implement Gradient Centralization to improve training performance of DNNs.
"""
"""
## Introduction
This example implements [Gradient Centralization](https://arxiv.org/abs/2004.01461), a
new optimization technique for Deep Neural Networks by Yong et al., and demonstrates it
on Laurence Moroney's [Horses or Humans
Dataset](https://www.tensorflow.org/datasets/catalog/horses_or_humans). Gradient
Centralization can both speedup training process and improve the final generalization
performance of DNNs. It operates directly on gradients by centralizing the gradient
vectors to have zero mean. Gradient Centralization morever improves the Lipschitzness of
the loss function and its gradient so that the training process becomes more efficient
and stable.
This example requires TensorFlow 2.2 or higher as well as `tensorflow_datasets` which can
be installed with this command:
```
pip install tensorflow-datasets
```
We will be implementing Gradient Centralization in this example but you could also use
this very easily with a package I built,
[gradient-centralization-tf](https://github.com/Rishit-dagli/Gradient-Centralization-TensorFlow).
"""
"""
## Setup
"""
from time import time
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras import layers
from tensorflow.keras.optimizers import RMSprop
"""
## Prepare the data
For this example, we will be using the [Horses or Humans
dataset](https://www.tensorflow.org/datasets/catalog/horses_or_humans).
"""
num_classes = 2
input_shape = (300, 300, 3)
dataset_name = "horses_or_humans"
batch_size = 128
AUTOTUNE = tf.data.AUTOTUNE
(train_ds, test_ds), metadata = tfds.load(
name=dataset_name,
split=[tfds.Split.TRAIN, tfds.Split.TEST],
with_info=True,
as_supervised=True,
)
print(f"Image shape: {metadata.features['image'].shape}")
print(f"Training images: {metadata.splits['train'].num_examples}")
print(f"Test images: {metadata.splits['test'].num_examples}")
"""
## Use Data Augmentation
We will rescale the data to `[0, 1]` and perform simple augmentations to our data.
"""
rescale = layers.Rescaling(1.0 / 255)
data_augmentation = tf.keras.Sequential(
[
layers.RandomFlip("horizontal_and_vertical"),
layers.RandomRotation(0.3),
layers.RandomZoom(0.2),
]
)
def prepare(ds, shuffle=False, augment=False):
# Rescale dataset
ds = ds.map(lambda x, y: (rescale(x), y), num_parallel_calls=AUTOTUNE)
if shuffle:
ds = ds.shuffle(1024)
# Batch dataset
ds = ds.batch(batch_size)
# Use data augmentation only on the training set
if augment:
ds = ds.map(
lambda x, y: (data_augmentation(x, training=True), y),
num_parallel_calls=AUTOTUNE,
)
# Use buffered prefecting
return ds.prefetch(buffer_size=AUTOTUNE)
"""
Rescale and augment the data
"""
train_ds = prepare(train_ds, shuffle=True, augment=True)
test_ds = prepare(test_ds)
"""
## Define a model
In this section we will define a Convolutional neural network.
"""
model = tf.keras.Sequential(
[
layers.Conv2D(16, (3, 3), activation="relu", input_shape=(300, 300, 3)),
layers.MaxPooling2D(2, 2),
layers.Conv2D(32, (3, 3), activation="relu"),
layers.Dropout(0.5),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.Dropout(0.5),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(512, activation="relu"),
layers.Dense(1, activation="sigmoid"),
]
)
"""
## Implement Gradient Centralization
We will now
subclass the `RMSProp` optimizer class modifying the
`tf.keras.optimizers.Optimizer.get_gradients()` method where we now implement Gradient
Centralization. On a high level the idea is that let us say we obtain our gradients
through back propogation for a Dense or Convolution layer we then compute the mean of the
column vectors of the weight matrix, and then remove the mean from each column vector.
The experiments in [this paper](https://arxiv.org/abs/2004.01461) on various
applications, including general image classification, fine-grained image classification,
detection and segmentation and Person ReID demonstrate that GC can consistently improve
the performance of DNN learning.
Also, for simplicity at the moment we are not implementing gradient cliiping functionality,
however this quite easy to implement.
At the moment we are just creating a subclass for the `RMSProp` optimizer
however you could easily reproduce this for any other optimizer or on a custom
optimizer in the same way. We will be using this class in the later section when
we train a model with Gradient Centralization.
"""
class GCRMSprop(RMSprop):
def get_gradients(self, loss, params):
# We here just provide a modified get_gradients() function since we are
# trying to just compute the centralized gradients.
grads = []
gradients = super().get_gradients()
for grad in gradients:
grad_len = len(grad.shape)
if grad_len > 1:
axis = list(range(grad_len - 1))
grad -= tf.reduce_mean(grad, axis=axis, keep_dims=True)
grads.append(grad)
return grads
optimizer = GCRMSprop(learning_rate=1e-4)
"""
## Training utilities
We will also create a callback which allows us to easily measure the total training time
and the time taken for each epoch since we are interested in comparing the effect of
Gradient Centralization on the model we built above.
"""
class TimeHistory(tf.keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time()
def on_epoch_end(self, batch, logs={}):
self.times.append(time() - self.epoch_time_start)
"""
## Train the model without GC
We now train the model we built earlier without Gradient Centralization which we can
compare to the training performance of the model trained with Gradient Centralization.
"""
time_callback_no_gc = TimeHistory()
model.compile(
loss="binary_crossentropy",
optimizer=RMSprop(learning_rate=1e-4),
metrics=["accuracy"],
)
model.summary()
"""
We also save the history since we later want to compare our model trained with and not
trained with Gradient Centralization
"""
history_no_gc = model.fit(
train_ds, epochs=10, verbose=1, callbacks=[time_callback_no_gc]
)
"""
## Train the model with GC
We will now train the same model, this time using Gradient Centralization,
notice our optimizer is the one using Gradient Centralization this time.
"""
time_callback_gc = TimeHistory()
model.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
model.summary()
history_gc = model.fit(train_ds, epochs=10, verbose=1, callbacks=[time_callback_gc])
"""
## Comparing performance
"""
print("Not using Gradient Centralization")
print(f"Loss: {history_no_gc.history['loss'][-1]}")
print(f"Accuracy: {history_no_gc.history['accuracy'][-1]}")
print(f"Training Time: {sum(time_callback_no_gc.times)}")
print("Using Gradient Centralization")
print(f"Loss: {history_gc.history['loss'][-1]}")
print(f"Accuracy: {history_gc.history['accuracy'][-1]}")
print(f"Training Time: {sum(time_callback_gc.times)}")
"""
Readers are encouraged to try out Gradient Centralization on different datasets from
different domains and experiment with it's effect. You are strongly advised to check out
the [original paper](https://arxiv.org/abs/2004.01461) as well - the authors present
several studies on Gradient Centralization showing how it can improve general
performance, generalization, training time as well as more efficient.
Many thanks to [Ali Mustufa Shaikh](https://github.com/ialimustufa) for reviewing this
implementation.
"""
|
{
"content_hash": "f98cb6781b03e8fb950a319c0e4a2f0a",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 97,
"avg_line_length": 31.033834586466167,
"alnum_prop": 0.7182313749242883,
"repo_name": "keras-team/keras-io",
"id": "8e842ece0a4c8aad30000a13b9f195e739666641",
"size": "8255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/vision/gradient_centralization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15929"
},
{
"name": "Dockerfile",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "21968"
},
{
"name": "Jupyter Notebook",
"bytes": "718942"
},
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "680865"
}
],
"symlink_target": ""
}
|
"""
Read/Write Avro File Object Containers.
"""
import zlib
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from avro import schema
from avro import io
try:
import snappy
has_snappy = True
except ImportError:
has_snappy = False
#
# Constants
#
VERSION = 1
MAGIC = 'Obj' + chr(VERSION)
MAGIC_SIZE = len(MAGIC)
SYNC_SIZE = 16
SYNC_INTERVAL = 4000 * SYNC_SIZE # TODO(hammer): make configurable
META_SCHEMA = schema.parse("""\
{"type": "record", "name": "org.apache.avro.file.Header",
"fields" : [
{"name": "magic", "type": {"type": "fixed", "name": "magic", "size": %d}},
{"name": "meta", "type": {"type": "map", "values": "bytes"}},
{"name": "sync", "type": {"type": "fixed", "name": "sync", "size": %d}}]}
""" % (MAGIC_SIZE, SYNC_SIZE))
VALID_CODECS = ['null', 'deflate']
if has_snappy:
VALID_CODECS.append('snappy')
VALID_ENCODINGS = ['binary'] # not used yet
CODEC_KEY = "avro.codec"
SCHEMA_KEY = "avro.schema"
#
# Exceptions
#
class DataFileException(schema.AvroException):
"""
Raised when there's a problem reading or writing file object containers.
"""
def __init__(self, fail_msg):
schema.AvroException.__init__(self, fail_msg)
#
# Write Path
#
class DataFileWriter(object):
@staticmethod
def generate_sync_marker():
return generate_sixteen_random_bytes()
# TODO(hammer): make 'encoder' a metadata property
def __init__(self, writer, datum_writer, writers_schema=None, codec='null'):
"""
If the schema is not present, presume we're appending.
@param writer: File-like object to write into.
"""
self._writer = writer
self._encoder = io.BinaryEncoder(writer)
self._datum_writer = datum_writer
self._buffer_writer = StringIO()
self._buffer_encoder = io.BinaryEncoder(self._buffer_writer)
self._block_count = 0
self._meta = {}
self._header_written = False
if writers_schema is not None:
if codec not in VALID_CODECS:
raise DataFileException("Unknown codec: %r" % codec)
self._sync_marker = DataFileWriter.generate_sync_marker()
self.set_meta('avro.codec', codec)
self.set_meta('avro.schema', str(writers_schema))
self.datum_writer.writers_schema = writers_schema
else:
# open writer for reading to collect metadata
dfr = DataFileReader(writer, io.DatumReader())
# TODO(hammer): collect arbitrary metadata
# collect metadata
self._sync_marker = dfr.sync_marker
self.set_meta('avro.codec', dfr.get_meta('avro.codec'))
# get schema used to write existing file
schema_from_file = dfr.get_meta('avro.schema')
self.set_meta('avro.schema', schema_from_file)
self.datum_writer.writers_schema = schema.parse(schema_from_file)
# seek to the end of the file and prepare for writing
writer.seek(0, 2)
self._header_written = True
# read-only properties
writer = property(lambda self: self._writer)
encoder = property(lambda self: self._encoder)
datum_writer = property(lambda self: self._datum_writer)
buffer_writer = property(lambda self: self._buffer_writer)
buffer_encoder = property(lambda self: self._buffer_encoder)
sync_marker = property(lambda self: self._sync_marker)
meta = property(lambda self: self._meta)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Perform a close if there's no exception
if type is None:
self.close()
# read/write properties
def set_block_count(self, new_val):
self._block_count = new_val
block_count = property(lambda self: self._block_count, set_block_count)
# utility functions to read/write metadata entries
def get_meta(self, key):
return self._meta.get(key)
def set_meta(self, key, val):
self._meta[key] = val
def _write_header(self):
header = {'magic': MAGIC,
'meta': self.meta,
'sync': self.sync_marker}
self.datum_writer.write_data(META_SCHEMA, header, self.encoder)
self._header_written = True
# TODO(hammer): make a schema for blocks and use datum_writer
def _write_block(self):
if not self._header_written:
self._write_header()
if self.block_count > 0:
# write number of items in block
self.encoder.write_long(self.block_count)
# write block contents
uncompressed_data = self.buffer_writer.getvalue()
if self.get_meta(CODEC_KEY) == 'null':
compressed_data = uncompressed_data
compressed_data_length = len(compressed_data)
elif self.get_meta(CODEC_KEY) == 'deflate':
# The first two characters and last character are zlib
# wrappers around deflate data.
compressed_data = zlib.compress(uncompressed_data)[2:-1]
compressed_data_length = len(compressed_data)
elif self.get_meta(CODEC_KEY) == 'snappy':
compressed_data = snappy.compress(uncompressed_data)
compressed_data_length = len(compressed_data) + 4 # crc32
else:
fail_msg = '"%s" codec is not supported.' % self.get_meta(CODEC_KEY)
raise DataFileException(fail_msg)
# Write length of block
self.encoder.write_long(compressed_data_length)
# Write block
self.writer.write(compressed_data)
# Write CRC32 checksum for Snappy
if self.get_meta(CODEC_KEY) == 'snappy':
self.encoder.write_crc32(uncompressed_data)
# write sync marker
self.writer.write(self.sync_marker)
# reset buffer
self.buffer_writer.truncate(0)
self.block_count = 0
def append(self, datum):
"""Append a datum to the file."""
self.datum_writer.write(datum, self.buffer_encoder)
self.block_count += 1
# if the data to write is larger than the sync interval, write the block
if self.buffer_writer.tell() >= SYNC_INTERVAL:
self._write_block()
def sync(self):
"""
Return the current position as a value that may be passed to
DataFileReader.seek(long). Forces the end of the current block,
emitting a synchronization marker.
"""
self._write_block()
return self.writer.tell()
def flush(self):
"""Flush the current state of the file, including metadata."""
self._write_block()
self.writer.flush()
def close(self):
"""Close the file."""
self.flush()
self.writer.close()
class DataFileReader(object):
"""Read files written by DataFileWriter."""
# TODO(hammer): allow user to specify expected schema?
# TODO(hammer): allow user to specify the encoder
def __init__(self, reader, datum_reader):
self._reader = reader
self._raw_decoder = io.BinaryDecoder(reader)
self._datum_decoder = None # Maybe reset at every block.
self._datum_reader = datum_reader
# read the header: magic, meta, sync
self._read_header()
# ensure codec is valid
self.codec = self.get_meta('avro.codec')
if self.codec is None:
self.codec = "null"
if self.codec not in VALID_CODECS:
raise DataFileException('Unknown codec: %s.' % self.codec)
# get file length
self._file_length = self.determine_file_length()
# get ready to read
self._block_count = 0
self.datum_reader.writers_schema = schema.parse(self.get_meta(SCHEMA_KEY))
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Perform a close if there's no exception
if type is None:
self.close()
def __iter__(self):
return self
# read-only properties
reader = property(lambda self: self._reader)
raw_decoder = property(lambda self: self._raw_decoder)
datum_decoder = property(lambda self: self._datum_decoder)
datum_reader = property(lambda self: self._datum_reader)
sync_marker = property(lambda self: self._sync_marker)
meta = property(lambda self: self._meta)
file_length = property(lambda self: self._file_length)
# read/write properties
def set_block_count(self, new_val):
self._block_count = new_val
block_count = property(lambda self: self._block_count, set_block_count)
# utility functions to read/write metadata entries
def get_meta(self, key):
return self._meta.get(key)
def set_meta(self, key, val):
self._meta[key] = val
def determine_file_length(self):
"""
Get file length and leave file cursor where we found it.
"""
remember_pos = self.reader.tell()
# print "remember_pos", str(remember_pos)
self.reader.seek(0, 2)
file_length = self.reader.tell()
self.reader.seek(remember_pos)
return file_length
def is_EOF(self):
# print "self.file_length:", self.file_length
# print "self.tell():", self.reader.tell()
return self.reader.tell() == self.file_length
def _read_header(self):
# seek to the beginning of the file to get magic block
self.reader.seek(0, 0)
# read header into a dict
header = self.datum_reader.read_data(
META_SCHEMA, META_SCHEMA, self.raw_decoder)
# check magic number
if header.get('magic') != MAGIC:
fail_msg = "Not an Avro data file: %s doesn't match %s."\
% (header.get('magic'), MAGIC)
raise schema.AvroException(fail_msg)
# set metadata
self._meta = header['meta']
# set sync marker
self._sync_marker = header['sync']
def _read_block_header(self):
self.block_count = self.raw_decoder.read_long()
if self.codec == "null":
# Skip a long; we don't need to use the length.
self.raw_decoder.skip_long()
self._datum_decoder = self._raw_decoder
elif self.codec == 'deflate':
# Compressed data is stored as (length, data), which
# corresponds to how the "bytes" type is encoded.
data = self.raw_decoder.read_bytes()
# -15 is the log of the window size; negative indicates
# "raw" (no zlib headers) decompression. See zlib.h.
uncompressed = zlib.decompress(data, -15)
self._datum_decoder = io.BinaryDecoder(StringIO(uncompressed))
elif self.codec == 'snappy':
# Compressed data includes a 4-byte CRC32 checksum
length = self.raw_decoder.read_long()
data = self.raw_decoder.read(length - 4)
uncompressed = snappy.decompress(data)
self._datum_decoder = io.BinaryDecoder(StringIO(uncompressed))
self.raw_decoder.check_crc32(uncompressed);
else:
raise DataFileException("Unknown codec: %r" % self.codec)
def _skip_sync(self):
"""
Read the length of the sync marker; if it matches the sync marker,
return True. Otherwise, seek back to where we started and return False.
"""
proposed_sync_marker = self.reader.read(SYNC_SIZE)
# print "proposed_sync_marker:", proposed_sync_marker
# print "self.sync_marker:", self.sync_marker
if proposed_sync_marker != self.sync_marker:
self.reader.seek(-SYNC_SIZE, 1)
return False
else:
return True
# TODO(hammer): handle block of length zero
# TODO(hammer): clean this up with recursion
def next(self):
"""Return the next datum in the file."""
# print "reader in next self.block_count:", str(self.block_count)
# print "reader in next self.is_EOF():", str(self.is_EOF())
if self.block_count == 0:
if self.is_EOF():
# print "reader in next in branch is_EOF()"
raise StopIteration
elif self._skip_sync():
# print "reader in next in breadn #######self._skip_sync()#####"
if self.is_EOF(): raise StopIteration
# print "reader in next in self._skip_sync(): ########still not stop"
self._read_block_header()
else:
# print "reader in next branch ######else####"
self._read_block_header()
datum = self.datum_reader.read(self.datum_decoder)
self.block_count -= 1
return datum
def close(self):
"""Close this reader."""
self.reader.close()
def generate_sixteen_random_bytes():
try:
import os
return os.urandom(16)
except:
import random
return [ chr(random.randrange(256)) for i in range(16) ]
|
{
"content_hash": "f663eb27695c687e15ed2dfaa4dd8427",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 78,
"avg_line_length": 32.206989247311824,
"alnum_prop": 0.6534512978883231,
"repo_name": "wuranbo/goavro",
"id": "2602acb1dd25c05c25a0093780da07a996827f05",
"size": "12761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/avro/datafile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
'''
instruction: this script is used to write word document.
dependent: py-docx
'''
from docx import Document
from docx.shared import Inches
'''
reference method, not in use.
'''
def operDocx():
document = Document()
document.add_heading('Document Title', 0)
p = document.add_paragraph('A plain paragraph having some ')
p.add_run('bold').bold = True
p.add_run(' and some ')
p.add_run('italic.').italic = True
document.add_heading('Heading, level 1', level=1)
document.add_paragraph('Intense quote', style='IntenseQuote')
document.add_paragraph(
'first item in unordered list', style='ListBullet'
)
document.add_paragraph(
'first item in ordered list', style='ListNumber'
)
# document.add_picture('monty-truth.png', width=Inches(1.25))
table = document.add_table(rows=1, cols=3)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Qty'
hdr_cells[1].text = 'Id'
hdr_cells[2].text = 'Desc'
# for item in recordset:
# row_cells = table.add_row().cells
# row_cells[0].text = str(item.qty)
# row_cells[1].text = str(item.id)
# row_cells[2].text = item.desc
document.add_page_break()
document.save('demo.docx')
def writeDocx(contentList,docxName):
document = Document()
# p = document.add_paragraph(content)
# document.add_page_break()
total = contentList[0].__len__()
print 'amount of content: '+ str(total)
p = 0
q = 0
hasContent = True
# row = 20
# col = 3
t = 1
while(hasContent):
if p >= total:
hasContent = False
break
# if total - p > 60:
table = document.add_table(rows=20, cols=3)
# else:
# row = (total -p)//3 + 1
# table = document.add_table(rows=20, cols=3)
print 'table added ' + str(t)
num = 1
for i in range(20):
# print 'insert row data'
for j in range(3):
if p >= total:
hasContent = False
break
table.rows[i].cells[j].text = str(num) + ')\t' + contentList[0][p]
num = num +1
p = p + 1
# document.add_paragraph('----------------------------------------------------------------------------------------------------------------------')
# document.add_paragraph('----------------------------------------------------------------------------------------------------------------------')
# table = document.add_table(rows=3, cols=20)
# for m in range(3):
# for n in range(20):
# table.rows[m].cells[n].text = str(q + 1) +')'+ str(contentList[1][q])
# q = q + 1
# results = ''
# num = 1
# for m in range(60):
# results = results +'^'+ str(num)+'>'+str(contentList[1][q])+'$'
# q = q + 1
# num +=1
# par = document.add_paragraph(u'本页答案: ')
# par.add_run(results).italic = True
print 'now in the '+ str(p) + 'step'
t = t +1
# document.add_paragraph('----------------------------------------------------------------------------------------------------------------------')
document.add_page_break()
document.save(docxName)
# def id(id=0):
# return id+1
def saveExpression(Expressions,docxName):
document = Document()
total = len(Expressions)
print 'Total Expressions are: '+str(total)
index=0
while index<total:
table = document.add_table(rows=25, cols=4)
for i in range(25):
for j in range(4):
table.rows[i].cells[j].text = Expressions[index]
index = index+1
document.add_page_break()
document.save(docxName)
if __name__=="__main__":
print 'this is the main method'
operDocx()
# order = id()
# writeDocx( '\t'+str(order) + '.'+ ' 12 + 44 =\t' + str(order) + '.'+ '\t 12 + 44 =\t' + str(order) + '.'+ '\t 12 + 44 =\t'+'\n\r','quiz.docx')
|
{
"content_hash": "32f1d47cb4c8022d05406a3ae0507ffc",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 154,
"avg_line_length": 32.039370078740156,
"alnum_prop": 0.4863602850823298,
"repo_name": "clobob/generate_quizs",
"id": "40cd446c6553a2820cddbb4ecd95fcb413a9a0eb",
"size": "4102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "operateDocx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9428"
}
],
"symlink_target": ""
}
|
import sys
import os
import zlib
import functools
import threading
from typing import Any, Callable, IO, Iterable, Optional, Tuple, Union, TYPE_CHECKING
from urllib.parse import urlencode
import requests
from requests.utils import super_len
if TYPE_CHECKING:
from requests_toolbelt import MultipartEncoder
from .context import Environment
from .cli.dicts import MultipartRequestDataDict, RequestDataDict
from .compat import is_windows
class ChunkedStream:
def __iter__(self) -> Iterable[Union[str, bytes]]:
raise NotImplementedError
class ChunkedUploadStream(ChunkedStream):
def __init__(
self,
stream: Iterable,
callback: Callable,
event: Optional[threading.Event] = None
) -> None:
self.callback = callback
self.stream = stream
self.event = event
def __iter__(self) -> Iterable[Union[str, bytes]]:
for chunk in self.stream:
if self.event:
self.event.set()
self.callback(chunk)
yield chunk
class ChunkedMultipartUploadStream(ChunkedStream):
chunk_size = 100 * 1024
def __init__(
self,
encoder: 'MultipartEncoder',
event: Optional[threading.Event] = None
) -> None:
self.encoder = encoder
self.event = event
def __iter__(self) -> Iterable[Union[str, bytes]]:
while True:
chunk = self.encoder.read(self.chunk_size)
if self.event:
self.event.set()
if not chunk:
break
yield chunk
def as_bytes(data: Union[str, bytes]) -> bytes:
if isinstance(data, str):
return data.encode()
else:
return data
CallbackT = Callable[[bytes], bytes]
def _wrap_function_with_callback(
func: Callable[..., Any],
callback: CallbackT
) -> Callable[..., Any]:
@functools.wraps(func)
def wrapped(*args, **kwargs):
chunk = func(*args, **kwargs)
callback(chunk)
return chunk
return wrapped
def is_stdin(file: IO) -> bool:
try:
file_no = file.fileno()
except Exception:
return False
else:
return file_no == sys.stdin.fileno()
READ_THRESHOLD = float(os.getenv('HTTPIE_STDIN_READ_WARN_THRESHOLD', 10.0))
def observe_stdin_for_data_thread(env: Environment, file: IO, read_event: threading.Event) -> None:
# Windows unfortunately does not support select() operation
# on regular files, like stdin in our use case.
# https://docs.python.org/3/library/select.html#select.select
if is_windows:
return None
# If the user configures READ_THRESHOLD to be 0, then
# disable this warning.
if READ_THRESHOLD == 0:
return None
def worker(event: threading.Event) -> None:
if not event.wait(timeout=READ_THRESHOLD):
env.stderr.write(
f'> warning: no stdin data read in {READ_THRESHOLD}s '
f'(perhaps you want to --ignore-stdin)\n'
f'> See: https://httpie.io/docs/cli/best-practices\n'
)
# Making it a daemon ensures that if the user exits from the main program
# (e.g. either regularly or with Ctrl-C), the thread will not
# block them.
thread = threading.Thread(
target=worker,
args=(read_event,),
daemon=True
)
thread.start()
def _read_file_with_selectors(file: IO, read_event: threading.Event) -> bytes:
if is_windows or not is_stdin(file):
return as_bytes(file.read())
import select
# Try checking whether there is any incoming data for READ_THRESHOLD
# seconds. If there isn't anything in the given period, issue
# a warning about a misusage.
read_selectors, _, _ = select.select([file], [], [], READ_THRESHOLD)
if read_selectors:
read_event.set()
return as_bytes(file.read())
def _prepare_file_for_upload(
env: Environment,
file: Union[IO, 'MultipartEncoder'],
callback: CallbackT,
chunked: bool = False,
content_length_header_value: Optional[int] = None,
) -> Union[bytes, IO, ChunkedStream]:
read_event = threading.Event()
if not super_len(file):
if is_stdin(file):
observe_stdin_for_data_thread(env, file, read_event)
# Zero-length -> assume stdin.
if content_length_header_value is None and not chunked:
# Read the whole stdin to determine `Content-Length`.
#
# TODO: Instead of opt-in --chunked, consider making
# `Transfer-Encoding: chunked` for STDIN opt-out via
# something like --no-chunked.
# This would be backwards-incompatible so wait until v3.0.0.
#
file = _read_file_with_selectors(file, read_event)
else:
file.read = _wrap_function_with_callback(
file.read,
callback
)
if chunked:
from requests_toolbelt import MultipartEncoder
if isinstance(file, MultipartEncoder):
return ChunkedMultipartUploadStream(
encoder=file,
event=read_event,
)
else:
return ChunkedUploadStream(
stream=file,
callback=callback,
event=read_event
)
else:
return file
def prepare_request_body(
env: Environment,
raw_body: Union[str, bytes, IO, 'MultipartEncoder', RequestDataDict],
body_read_callback: CallbackT,
offline: bool = False,
chunked: bool = False,
content_length_header_value: Optional[int] = None,
) -> Union[bytes, IO, 'MultipartEncoder', ChunkedStream]:
is_file_like = hasattr(raw_body, 'read')
if isinstance(raw_body, (bytes, str)):
body = as_bytes(raw_body)
elif isinstance(raw_body, RequestDataDict):
body = as_bytes(urlencode(raw_body, doseq=True))
else:
body = raw_body
if offline:
if is_file_like:
return as_bytes(raw_body.read())
else:
return body
if is_file_like:
return _prepare_file_for_upload(
env,
body,
chunked=chunked,
callback=body_read_callback,
content_length_header_value=content_length_header_value
)
elif chunked:
return ChunkedUploadStream(
stream=iter([body]),
callback=body_read_callback
)
else:
return body
def get_multipart_data_and_content_type(
data: MultipartRequestDataDict,
boundary: str = None,
content_type: str = None,
) -> Tuple['MultipartEncoder', str]:
from requests_toolbelt import MultipartEncoder
encoder = MultipartEncoder(
fields=data.items(),
boundary=boundary,
)
if content_type:
content_type = content_type.strip()
if 'boundary=' not in content_type:
content_type = f'{content_type}; boundary={encoder.boundary_value}'
else:
content_type = encoder.content_type
data = encoder
return data, content_type
def compress_request(
request: requests.PreparedRequest,
always: bool,
):
deflater = zlib.compressobj()
if isinstance(request.body, str):
body_bytes = request.body.encode()
elif hasattr(request.body, 'read'):
body_bytes = request.body.read()
else:
body_bytes = request.body
deflated_data = deflater.compress(body_bytes)
deflated_data += deflater.flush()
is_economical = len(deflated_data) < len(body_bytes)
if is_economical or always:
request.body = deflated_data
request.headers['Content-Encoding'] = 'deflate'
request.headers['Content-Length'] = str(len(deflated_data))
|
{
"content_hash": "7dccde0c456aab7a6c1d9d2b934decd1",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 99,
"avg_line_length": 28.78810408921933,
"alnum_prop": 0.6122159090909091,
"repo_name": "PKRoma/httpie",
"id": "4a993b3a25001ad6e1dcd5b420d8d4c772c5a5c5",
"size": "7744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httpie/uploads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1148"
},
{
"name": "Makefile",
"bytes": "6658"
},
{
"name": "Python",
"bytes": "571783"
},
{
"name": "Roff",
"bytes": "31811"
},
{
"name": "Shell",
"bytes": "7629"
}
],
"symlink_target": ""
}
|
from SCons.Script import *
import os
import os.path
import glob
import platform
###############################################################################
# Modular library helpers
###############################################################################
import autoconfig
import toolsets
"""Make Root Environment
Create a root environment and set up unit tests etc.
This is some patchwork to make SCons work with platform and host-specific
configuration files, and also to allow setting up the toolsets via options.
@param additional_tools : additional tools to add
"""
def make_root_env(additional_tools=[]):
###############################################################################
# read options and configure directories
###############################################################################
# default options file name
optfilename = 'opts.py'
# Options file for current platform
optfilename_plat = 'opts_'+platform.uname()[0]+'_'+platform.uname()[4]+'.py'
# Try to find options file based on hostname
#
optfilename_local = 'opts_'+platform.uname()[1]+'_'+platform.uname()[0]+'_'+platform.uname()[4]+'.py'
if len(glob.glob(optfilename_local)) > 0:
optfilename = optfilename_local
# Second choice: platform file
elif len(glob.glob(optfilename_plat)) > 0:
optfilename = optfilename_plat
else:
# third choice: stick with opts.py
print 'To use specific options for this platform, use options file "'+optfilename_plat+'"'
print 'To use specific options for this system, use options file "'+optfilename_local+'"'
print 'Using options from ' + optfilename
opts = Variables(optfilename)
# these are the options that can be specified through the command line
opts.AddVariables(
EnumVariable('mode', 'Build mode: set to debug or release', 'debug',
allowed_values = ('debug', 'release'),
ignorecase = 1),
BoolVariable('profile', 'Enable profiling. Also enables debug information.', 0),
BoolVariable('runtests', 'Run tests.', 0),
BoolVariable('debuginfo', 'Include debug information also in release version.', 1),
('toolset', 'Specify compiler and linker tools: msvc|gnu|clang|intel', 'gnu'),
('_CXX', 'Replacement CXX', ''),
('_CC', 'Replacement CC', ''),
('_LINK', 'Replacement LINK', ''),
('additional_lflags', 'Additional linker flags', ''),
('additional_cflags', 'Additional compiler flags', ''),
('mpiexec', 'MPI exec command for testing', ''),
('mpiexec_params', 'MPI exec parameters for testing', '-n 3'),
('MATLAB_PATH', 'Path to Matlab for mex Builder', None),
('MATLAB_MEX_EXTRA', 'Extra switches for mex compiler.', '')
)
SCons.Defaults.DefaultEnvironment(tools = [])
# read options before creating root environment
readopts = Environment(tools = [], options = opts)
###############################################################################
# Set up the root environment
###############################################################################
toolset = readopts['toolset']
if toolset == 'msvc':
ttools = ['msvc', 'mslib', 'mslink', 'unit_tests']
elif toolset == 'gnu' or toolset == 'clang':
ttools = ['gnulink', 'gcc', 'g++', 'ar', 'unit_tests']
elif toolset == 'intel':
ttools = ['icc', 'ilink', 'intelc' ,'ar', 'unit_tests']
elif toolset == 'intel_windows':
ttools = ['ilink', 'icl', 'mslib', 'unit_tests']
else:
print "[W] Unknown toolset " + toolset + ", using default tools"
ttools = ['default', 'unit_tests']
ttools = ttools + additional_tools
## add included options
autoconfig.make_options(opts)
root = Environment(
tools = ttools,
options = opts,
)
# load toolset specific implementation of PrepareEnv
getattr(__import__('toolsets.%s' % toolset, fromlist = ['']), 'generate')(root)
Help(opts.GenerateHelpText(root))
###############################################################################
# Setup compiling parameters
###############################################################################
root.Append(
ENV = os.environ,
BINDIR = "#bin",
LIBDIR = "#lib",
SRCDIR = "#src",
)
# dependency optimization
root.SetOption('max_drift', 4)
root.SetOption('implicit_cache', 1)
root.SetOption('diskcheck', None)
root.Ignore('', '')
root.Append(
CCFLAGS = ' $CFLAGS',
CPPPATH = ['#include', '#src'],
LIBPATH = ['#lib'],
)
###############################################################################
# Setup debug / release mode flags
###############################################################################
if root['_CXX'] != '':
root['CXX'] = root['_CXX']
if root['_CC'] != '':
root['CC'] = root['_CC']
if root['_LINK'] != '':
root['LINK'] = root['_LINK']
###############################################################################
# Setup Boost, TBB and MPI library linking
###############################################################################
autoconfig.generate_env(root)
## additional flags not covered by any of the above
root.Append (
LINKFLAGS = root['additional_lflags'],
CCFLAGS = root['additional_cflags']
)
###############################################################################
# Automatic configuration code
###############################################################################
autoconfig.autoconfig( root )
###############################################################################
# Set up unit testing
###############################################################################
return root
|
{
"content_hash": "56a660e529789c3cb7e1d078a3266b22",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 102,
"avg_line_length": 33.101190476190474,
"alnum_prop": 0.5119582808847329,
"repo_name": "pkrusche/bsponmpi_template",
"id": "2e0a51b94264f6f8b1767322554a3c5a596dc19e",
"size": "5816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "site_scons/xscons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "407"
},
{
"name": "Python",
"bytes": "34924"
}
],
"symlink_target": ""
}
|
"""Test configs for gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_gather_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[1, 2, 20]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3], [5]],
"axis": [-1, 0, 1],
"batch_dims": [0],
"constant_params": [False, True],
}, {
"params_dtype": [tf.string],
"params_shape": [[8]],
"indices_dtype": [tf.int32],
"indices_shape": [[3], [3, 2]],
"axis": [0],
"batch_dims": [0],
"constant_params": [False, True],
}]
if options.use_experimental_converter:
test_parameters = test_parameters + [
# Test with batch_dims.
{
"params_dtype": [tf.float32, tf.int32],
"params_shape": [[2, 2, 3, 5]],
"indices_dtype": [tf.int32],
"indices_shape": [[2, 2, 2]],
"axis": [0, 2],
"batch_dims": [1, 2],
"constant_params": [False, True],
}
]
def build_graph(parameters):
"""Build the gather op testing graph."""
inputs = []
if parameters["constant_params"]:
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
else:
params = tf.compat.v1.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
inputs.append(params)
indices = tf.compat.v1.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
inputs.append(indices)
axis = min(len(parameters["params_shape"]), parameters["axis"])
out = tf.gather(
params, indices, axis=axis, batch_dims=parameters["batch_dims"])
return inputs, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if not parameters["constant_params"]:
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
input_values.append(params)
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
input_values.append(indices)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
|
{
"content_hash": "e202e2ba9f0f507c0672bdfbd670b4a4",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 33.24444444444445,
"alnum_prop": 0.5812165775401069,
"repo_name": "frreiss/tensorflow-fred",
"id": "bfb1d619f724c0b104db036fd66d25b594b13f4b",
"size": "3681",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/lite/testing/op_tests/gather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from subcollections.models import Collection, Syndication
class CollectionAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'is_public', 'is_syndicated', 'updated')
search_fields = ['title', 'description', 'author']
raw_id_fields = ('items',)
readonly_fields = ('author', 'updated', 'created', 'edit_url',
'last_published', 'date_published', 'last_syndicated',
'syndication_result')
autocomplete_lookup_fields = {
'm2m': ['items'],
'fk': ['agent'],
}
def save_model(self, request, obj, form, change):
obj.author = request.user
obj.save()
admin.site.register(Collection, CollectionAdmin)
class SyndicationAdmin(admin.ModelAdmin):
pass
admin.site.register(Syndication, SyndicationAdmin)
|
{
"content_hash": "d024aac7dfd953106c5256c404fc0a0c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 28.310344827586206,
"alnum_prop": 0.6516443361753959,
"repo_name": "uq-eresearch/uqam",
"id": "7ca5f27b3a5dad674e9d1b659e0814220883aa57",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subcollections/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "117676"
},
{
"name": "HTML",
"bytes": "108660"
},
{
"name": "JavaScript",
"bytes": "977528"
},
{
"name": "Python",
"bytes": "1297328"
},
{
"name": "Shell",
"bytes": "24566"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
import unittest
from .google_settings import get_file_picker_settings
class TestGoogleSettings(unittest.TestCase):
def setUp(self):
settings = frappe.get_single("Google Settings")
settings.client_id = "test_client_id"
settings.app_id = "test_app_id"
settings.api_key = "test_api_key"
settings.save()
def test_picker_disabled(self):
"""Google Drive Picker should be disabled if it is not enabled in Google Settings."""
frappe.db.set_value("Google Settings", None, "enable", 1)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 0)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_google_disabled(self):
"""Google Drive Picker should be disabled if Google integration is not enabled."""
frappe.db.set_value("Google Settings", None, "enable", 0)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 1)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_picker_enabled(self):
"""If picker is enabled, get_file_picker_settings should return the credentials."""
frappe.db.set_value("Google Settings", None, "enable", 1)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 1)
settings = get_file_picker_settings()
self.assertEqual(True, settings.get("enabled", False))
self.assertEqual("test_client_id", settings.get("clientId", ""))
self.assertEqual("test_app_id", settings.get("appId", ""))
self.assertEqual("test_api_key", settings.get("developerKey", ""))
|
{
"content_hash": "c132baabfddec19bcb6d0067078d545f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 87,
"avg_line_length": 37.976190476190474,
"alnum_prop": 0.722257053291536,
"repo_name": "mhbu50/frappe",
"id": "cddf9f3697fd1eabcaf3d7437c14361660f49a9e",
"size": "1706",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/integrations/doctype/google_settings/test_google_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "247122"
},
{
"name": "JavaScript",
"bytes": "2359670"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3464477"
},
{
"name": "SCSS",
"bytes": "248877"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
}
|
"""
Examples to use client api.
"""
from json import dumps
from os import environ
import requests
server = 'http://127.0.0.1:8000/'
root = server + 'api/'
# call client api requires valid user, you can register at first
auth = ('admin', 'admin')
def test_register_client():
"""use this api to register as a testcube client, it will return a name and token."""
api = 'client-register/'
data = {
'client_type': 'testcube_pytest_client', # must in this format 'testcube_xxx_client'
'client_name': 'test_client', # special chars and space are not allowed
'client_user': 'test',
'platform': 'windows'
}
response = requests.post(url=server + api, data=data)
result = response.json()
print(result)
assert 'client' in result
assert 'token' in result
def test_start_run():
"""use this api to start a run, will return run info if success."""
api = 'runs/start/'
data = {
'name': 'my test run',
'product': {
'name': 'TestCube',
'team': {
'name': 'ATeam'
}
},
# optional, if provided will be a link in run page
'source': {
'name': 'Jenkins',
'link': 'http://jenkins/run'
},
# optional, run variables can be saved to reset purpose
'variables': dumps(dict(environ)),
}
response = requests.post(url=root + api,
auth=auth,
json=data)
result = response.json()
assert result['success'], response.text
run = result['run']
print(run)
def test_stop_run():
"""call this api to update run status & state once run finished."""
api = 'runs/stop/'
data = {
'run_id': 13,
'state': 3, # default 3=>completed, (2=aborted)
'status': 1, # default 1=>failed, (0=passed, 3=abandoned)
}
response = requests.post(url=root + api,
auth=auth,
json=data)
result = response.json()
assert result['success'], response.text
run = result['run']
print(run)
def test_add_test_result():
"""use this api to create a test result for a run."""
api = 'results/new/'
data = {
'run_id': 13,
'outcome': 1, # 0=passed, 1=failed, 2=skipped, 3=error, 5=pending'
'stdout': 'my test output',
'duration': 23.5, # float, in seconds
'testcase': {
'name': 'VerifyLoginFailed',
'full_name': 'tests.login_tests.VerifyLoginFailed',
'description': 'ECS-1234, optional',
},
'test_client': {
'name': 'test-agent1',
'platform': 'windows 10.1',
},
'error': {
'exception_type': 'AssertError',
'message': 'the message of exception',
'stacktrace': 'the stack trace info',
}
}
response = requests.post(url=root + api,
auth=auth,
json=data)
result = response.json()
assert result['success'], response.text
test_result = result['result']
print(test_result)
def test_add_result_file():
"""
use this api to add a result file to a run.
to link result file to result, the file name should contains testcase name.
e.g. test1.png will link to result of test1 in the run.
"""
api = 'result_files/new/'
data = {'run_id': 13, 'case_full_name': 'tests.login.test1'}
files = {'file': open('./data/test1.png', 'rb')}
response = requests.post(url=root + api,
auth=auth,
data=data,
files=files)
result = response.json()
assert result['success'], response.text
result_file = result['file']
print(result_file)
def test_reset_test_result():
"""use this api to update a reset result."""
api = 'reset_results/new/'
data = {
'reset_id': 1,
'outcome': 1, # 0=passed, 1=failed, 2=skipped, 3=error, 5=pending'
'stdout': 'my test output',
'duration': 30.5, # float, in seconds
'testcase': {
'full_name': 'tests.etown.verify_login._.VerifyEtownLoginInvalidStudent'
},
'test_client': {
'name': 'test-agent1',
'platform': 'windows 10.1',
},
'error': {
'exception_type': 'AssertError',
'message': 'the message of exception',
'stacktrace': 'the stack trace info',
}
}
response = requests.post(url=root + api,
auth=auth,
json=data)
result = response.json()
assert result['success'], response.text
reset_result = result['reset']
print(reset_result)
|
{
"content_hash": "dad39a4fb2f2371cf9d3e1b75d6147cc",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 93,
"avg_line_length": 28.232558139534884,
"alnum_prop": 0.528830313014827,
"repo_name": "tobyqin/testcube",
"id": "0ce22ced749307410fe257e1318437d8681ffd02",
"size": "4856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testcube/tests/client_api_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "262"
},
{
"name": "CSS",
"bytes": "180930"
},
{
"name": "Dockerfile",
"bytes": "302"
},
{
"name": "HTML",
"bytes": "30516"
},
{
"name": "JavaScript",
"bytes": "328876"
},
{
"name": "Python",
"bytes": "126583"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
}
|
from chainer.dataset.tabular import _asmode # NOQA
from chainer.dataset.tabular import _concat # NOQA
from chainer.dataset.tabular import _join # NOQA
from chainer.dataset.tabular import _slice # NOQA
from chainer.dataset.tabular import _transform # NOQA
from chainer.dataset.tabular import _with_converter # NOQA
from chainer.dataset.tabular.delegate_dataset import DelegateDataset # NOQA
from chainer.dataset.tabular.from_data import from_data # NOQA
|
{
"content_hash": "f2d705bfdd497b0c3bbc82ea3fa87f58",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 51.333333333333336,
"alnum_prop": 0.7987012987012987,
"repo_name": "chainer/chainer",
"id": "eb78935b0e2c24957c27dca4683cef4c496ccf6b",
"size": "462",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "chainer/dataset/tabular/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3805"
},
{
"name": "C",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "1688016"
},
{
"name": "CMake",
"bytes": "51351"
},
{
"name": "Cuda",
"bytes": "191633"
},
{
"name": "Dockerfile",
"bytes": "6102"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6431941"
},
{
"name": "Shell",
"bytes": "50151"
}
],
"symlink_target": ""
}
|
import base64
import hashlib
import json
import os
import demistomock as demisto
import requests
from CommonServerPython import *
from CommonServerUserPython import *
handle_proxy()
VERIFY_CERTIFICATE = not demisto.params().get('insecure', False)
URL = demisto.params()['server']
XML_NS = demisto.params()['xml_ns']
USERNAME = demisto.params()['username']
PASSWORD = demisto.params()['password']
HEADERS = {
'Content-Type': 'text/xml',
'SOAPAction': ''
}
GET_TICKET_BODY = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:v1="{xml_ns}">
<soapenv:Header>
<wsse:Security soapenv:mustUnderstand="1"
xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<wsse:UsernameToken wsu:Id=\"\">
<wsse:Username>sams</wsse:Username>
<wsse:Password
Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordDigest">
{password_digest}</wsse:Password>
<wsse:Nonce EncodingType="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#
Base64Binary">{base64_binary}</wsse:Nonce>
<wsu:Created>{req_time}</wsu:Created>
</wsse:UsernameToken>
</wsse:Security>
</soapenv:Header>
<soapenv:Body>
<v1:get>
<!--Optional:-->
<v1:GetServiceRequestRequest>
<Header>
<Requester>?</Requester>
<Submitter>
<Type>Remedy</Type>
<Value>sams</Value>
</Submitter>
<TimeStamp>
<Date>{date}</Date>
<Time>{time}</Time>
<TimeZone>UTC</TimeZone>
</TimeStamp>
<TransactionId>1</TransactionId>
</Header>
<Body>
<!--Optional:-->
<ResponseOptions>
<ShowAssignment>?</ShowAssignment>
<ShowAttributeList>?</ShowAttributeList>
<ShowCategorization>?</ShowCategorization>
<ShowWorklogList>?</ShowWorklogList>
</ResponseOptions>
<!--Optional:-->
<ServiceRequestId>{service_request_id}</ServiceRequestId>
</Body>
</v1:GetServiceRequestRequest>
</v1:get>
</soapenv:Body>
</soapenv:Envelope>"""
CREATE_TICKET_BODY = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:v1="{xml_ns}">
<soapenv:Header> <wsse:Security soapenv:mustUnderstand="1"
xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<wsse:UsernameToken wsu:Id=\"\">
<wsse:Username>sams</wsse:Username>
<wsse:Password Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0
#PasswordDigest">{password_digest}</wsse:Password>
<wsse:Nonce EncodingType="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0
#Base64Binary">{base64_binary}</wsse:Nonce>
<wsu:Created>{req_time}</wsu:Created>
</wsse:UsernameToken>
</wsse:Security>
</soapenv:Header>
<soapenv:Body>
<v1:create>
<!--Optional:-->
<v1:CreateServiceRequestRequest>
<Header>
<Requester>?</Requester>
<Submitter>
<Type>Remedy</Type>
<Value>!svcautomationdev</Value>
</Submitter>
<TimeStamp>
<Date>{date}</Date>
<Time>{time}</Time>
<TimeZone>UTC</TimeZone>
</TimeStamp>
<TransactionId>1</TransactionId>
</Header>
<Body>
<!--Zero or more repetitions:-->
<!--Zero or more repetitions:-->
<AttributeList>
<Label/>
<Value/>
<Type/>
</AttributeList>
<!--Optional:-->
<CauseCode/>
<!--Optional:-->
<Details>{details}</Details>
<MasterTicket>True</MasterTicket>
<!--Optional:-->
<NextAction>test</NextAction>
<!--Optional:-->
<PendingReason>New</PendingReason>
<!--Optional:-->
<ProblemCode>47103</ProblemCode>
<!--Optional:-->
<Requester>
<!--Optional:-->
<RequesterNTId>{requester_ntid}</RequesterNTId>
<!--Optional:-->
<RequesterPERNR>{requester_pernr}</RequesterPERNR>
</Requester>
<!--Optional:-->
<RequesterContactInformation>
<!--Optional:-->
<ContactInformation>
<!--Optional:-->
<ContactEmail>{contact_email}</ContactEmail>
<!--Optional:-->
<ContactName>{contact_name}</ContactName>
<!--Optional:-->
<ContactPhone>{contact_phone}</ContactPhone>
</ContactInformation>
<!--Optional:-->
<RequesterEmail>{requester_email}</RequesterEmail>
<!--Optional:-->
<RequesterLocation/>
<!--Optional:-->
<RequesterName>{requester_name}</RequesterName>
<!--Optional:-->
<RequesterPhone>{requester_phone}</RequesterPhone>
<!--Optional:-->
<RequesterWorkCity>{requester_work_city}</RequesterWorkCity>
<!--Optional:-->
<RequesterWorkLocation>{requester_work_location}</RequesterWorkLocation>
<!--Optional:-->
<RequesterWorkStreet>{requester_work_street}</RequesterWorkStreet>
</RequesterContactInformation>
<!--Optional:-->
<SolutionCode/>
<Source>Web</Source>
<SourceReference>Demisto</SourceReference>
<!--Optional:-->
<Status>Pending</Status>
<!--Zero or more repetitions:-->
<WorklogList>
<!--Optional:-->
<Details>test</Details>
<!--Optional:-->
<Subject>test</Subject>
</WorklogList>
</Body>
</v1:CreateServiceRequestRequest>
</v1:create>
</soapenv:Body>
</soapenv:Envelope>"""
def http_request(body=''): # pragma: no cover
''' Makes an API call with the given arguments '''
response = requests.post(URL, data=body, headers=HEADERS, verify=VERIFY_CERTIFICATE)
if response.status_code < 200 or response.status_code >= 300:
if response.status_code == 404:
return_error('Request Failed. with status: 404. Cannot find the requested resource. Check your Server URL')
elif response.status_code == 500:
json_result = json.loads(xml2json(response.content))
return_error('Request Failed. with status: ' + str(response.status_code) + '. Reason is: ' + str(
json_result['Envelope']['Body']['Fault']['faultstring']))
else:
return_error(
'Request Failed. with status: ' + str(response.status_code) + '. Reason is: ' + str(response.reason))
json_result = json.loads(xml2json(response.content))
if 'Envelope' in json_result:
if 'Body' in json_result['Envelope']:
if 'Fault' in json_result['Envelope']['Body']:
return_error('Request Failed. Reason is: ' + json_result['Envelope']['Body']['Fault']['faultstring'])
return json_result
def prettify_get_ticket(json_result):
ticket = json_result['Envelope']['Body']['getResponse']['return']['Body']
if not ticket:
return_error(json_result['Envelope']['Body']['getResponse']['return']['Header'])
pretty_ticket = {
'ServiceRequestId': ticket['ServiceRequestId'],
'ServiceRequestStatus': ticket['ServiceRequestStatus'],
'Priority': ticket['Priority']
}
if 'Created' in ticket:
if 'When' in ticket['Created']:
pretty_ticket['Date'] = ticket['Created']['When']['Date']
pretty_ticket['Time'] = ticket['Created']['When']['Time']
if 'Details' in ticket:
pretty_ticket['Details'] = ticket['Details']
if 'SourceReference' in ticket:
pretty_ticket['SourceReference'] = ticket['SourceReference']
if 'RequesterContactInformation' in ticket:
if 'RequesterEmail' in ticket['RequesterContactInformation']:
pretty_ticket['RequesterEmail'] = ticket['RequesterContactInformation']['RequesterEmail']
if 'RequesterName' in ticket['RequesterContactInformation']:
pretty_ticket['RequesterName'] = ticket['RequesterContactInformation']['RequesterName']
if 'RequesterPhone' in ticket['RequesterContactInformation']:
pretty_ticket['RequesterPhone'] = ticket['RequesterContactInformation']['RequesterPhone']
if 'RequesterWorkCity' in ticket['RequesterContactInformation']:
pretty_ticket['RequesterWorkCity'] = ticket['RequesterContactInformation']['RequesterWorkCity']
if 'RequesterWorkLocation' in ticket['RequesterContactInformation']:
pretty_ticket['RequesterWorkLocation'] = ticket['RequesterContactInformation']['RequesterWorkLocation']
if 'RequesterWorkStreet' in ticket['RequesterContactInformation']:
pretty_ticket['RequesterWorkStreet'] = ticket['RequesterContactInformation']['RequesterWorkStreet']
if 'ContactInformation' in ticket['RequesterContactInformation']:
if 'ContactEmail' in ticket['RequesterContactInformation']['ContactInformation']:
pretty_ticket['ContactEmail'] = ticket['RequesterContactInformation']['ContactInformation'][
'ContactEmail']
if 'ContactPhone' in ticket['RequesterContactInformation']['ContactInformation']:
pretty_ticket['ContactPhone'] = ticket['RequesterContactInformation']['ContactInformation'][
'ContactPhone']
if 'ContactName' in ticket['RequesterContactInformation']['ContactInformation']:
pretty_ticket['ContactName'] = ticket['RequesterContactInformation']['ContactInformation'][
'ContactName']
return pretty_ticket
@logger
def remedy_get_ticket(service_request_id):
now = datetime.utcnow()
req_time = now.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
date = now.strftime('%Y-%m-%d')
time = now.strftime('%H:%M:%S')
nonce = os.urandom(16)
base64_binary = base64.b64encode(nonce).decode("ascii")
# Password_Digest = Base64 (SHA-1 (nonce + createtime + password))
hash_object = hashlib.sha1(nonce + req_time.encode("utf-8") + PASSWORD.encode("utf-8")) # nosec
digest_string = hash_object.digest()
password_digest = base64.b64encode(digest_string).decode("ascii")
body = GET_TICKET_BODY.format(xml_ns=XML_NS, password_digest=password_digest, base64_binary=base64_binary,
req_time=str(req_time), date=date, time=time,
service_request_id=service_request_id)
response = http_request(body)
return response
def remedy_get_ticket_command():
service_request_id = demisto.args()['service_request_id']
response = remedy_get_ticket(service_request_id)
pretty_ticket = prettify_get_ticket(response)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Ticket:', pretty_ticket,
['ServiceRequestId', 'Priority', 'ServiceRequestStatus', 'RequesterEmail',
'RequesterName', 'RequesterPhone']),
'EntryContext': {
"Remedy.Ticket(val.ServiceRequestId == obj.ServiceRequestId)": pretty_ticket
}
})
def prettify_create_ticket(json_result, requester_phone, requester_name, requester_email):
ticket = json_result['Envelope']['Body']['createResponse']['return']['Body']
if not ticket:
return_error(json_result['Envelope']['Body']['createResponse']['return']['Header'])
pretty_ticket = {'ServiceRequestId': ticket['ServiceRequestId']}
pretty_ticket['RequesterPhone'] = requester_phone
pretty_ticket['RequesterName'] = requester_name
pretty_ticket['RequesterEmail'] = requester_email
return pretty_ticket
@logger
def remedy_create_ticket(details, requester_ntid, requester_email, requester_name,
requester_phone, requester_work_city, requester_work_location,
requester_work_street, requester_pernr='?',
contact_email='?', contact_name='?', contact_phone='?'):
now = datetime.utcnow()
req_time = now.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
date = now.strftime('%Y-%m-%d')
time = now.strftime('%H:%M:%S')
nonce = os.urandom(16)
base64_binary = base64.b64encode(nonce).decode("ascii")
# Password_Digest = Base64 (SHA-1 (nonce + createtime + password))
hash_object = hashlib.sha1(nonce + req_time.encode("utf-8") + PASSWORD.encode("utf-8")) # nosec
digest_string = hash_object.digest()
password_digest = base64.b64encode(digest_string).decode("ascii")
body = CREATE_TICKET_BODY.format(xml_ns=XML_NS, password_digest=password_digest, base64_binary=base64_binary,
req_time=str(req_time), date=date, time=time, details=details,
requester_ntid=requester_ntid, requester_email=requester_email,
requester_name=requester_name, requester_phone=requester_phone,
requester_work_city=requester_work_city,
requester_work_location=requester_work_location,
requester_work_street=requester_work_street, requester_pernr=requester_pernr,
contact_email=contact_email, contact_name=contact_name,
contact_phone=contact_phone)
response = http_request(body)
return response
def remedy_create_ticket_command():
args = demisto.args()
details = args['details']
requester_ntid = args['requester_ntid']
requester_pernr = args['requester_pernr'] if 'requester_pernr' in args else None
contact_email = args['contact_email'] if 'contact_email' in args else None
contact_name = args['contact_name'] if 'contact_name' in args else None
contact_phone = args['contact_phone'] if 'contact_phone' in args else None
requester_email = args['requester_email']
requester_name = args['requester_name']
requester_phone = args['requester_phone']
requester_work_city = args['requester_work_city']
requester_work_location = args['requester_work_location']
requester_work_street = args['requester_work_street']
response = remedy_create_ticket(details, requester_ntid, requester_email, requester_name,
requester_phone, requester_work_city, requester_work_location,
requester_work_street, requester_pernr,
contact_email, contact_name, contact_phone)
pretty_ticket = prettify_create_ticket(response, requester_phone, requester_name, requester_email)
ec_create = {
'ServiceRequestId': response['Envelope']['Body']['createResponse']['return']['Body']['ServiceRequestId'],
'Details': details,
'RequesterNTID': requester_ntid,
'RequesterPERNR': requester_pernr,
'RequesterEmail': requester_email,
'RequesterName': requester_name,
'RequesterPhone': requester_phone,
'RequesterWorkCity': requester_work_city,
'RequesterWorkLocation': requester_work_location,
'RequesterWorkStreet': requester_work_street,
'ContactEmail': contact_email,
'ContactName': contact_name,
'ContactPhone': contact_phone
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Ticket:', pretty_ticket,
['ServiceRequestId', 'RequesterEmail', 'RequesterName', 'RequesterPhone']),
'EntryContext': {
"Remedy.Ticket(val.ServiceRequestId == obj.ServiceRequestId)": ec_create
}
})
def remedy_update_ticket_command():
raise DemistoException("This is a deprecated command")
''' EXECUTION CODE '''
LOG('command is %s' % (demisto.command(),))
try:
if demisto.command() == 'test-module':
remedy_get_ticket('SR000552078')
demisto.results('ok')
elif demisto.command() == 'remedy-get-ticket':
remedy_get_ticket_command()
elif demisto.command() == 'remedy-create-ticket':
remedy_create_ticket_command()
elif demisto.command() == 'remedy-update-ticket':
remedy_update_ticket_command()
except Exception as e:
return_error(str(e))
|
{
"content_hash": "17f8473997c87fcbe51595582eb37ab5",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 120,
"avg_line_length": 45.19804400977995,
"alnum_prop": 0.5695661581737531,
"repo_name": "VirusTotal/content",
"id": "3d79d52422c977d744515e79481eee95b2681a8d",
"size": "18486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/remedy_SR/Integrations/remedy_SR/remedy_SR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from anndata import read_csv, read_h5ad, read_loom, read_text
from ._anndata import (
get_from_registry,
register_tensor_from_anndata,
setup_anndata,
transfer_anndata_setup,
view_anndata_setup,
)
from ._datasets import (
annotation_simulation,
brainlarge_dataset,
breast_cancer_dataset,
cortex,
dataset_10x,
frontalcortex_dropseq,
mouse_ob_dataset,
pbmc_dataset,
pbmcs_10x_cite_seq,
prefrontalcortex_starmap,
purified_pbmc_dataset,
retina,
seqfish,
seqfishplus,
smfish,
spleen_lymph_cite_seq,
synthetic_iid,
)
from ._preprocessing import organize_cite_seq_10x, poisson_gene_selection
__all__ = [
"setup_anndata",
"get_from_registry",
"view_anndata_setup",
"poisson_gene_selection",
"organize_cite_seq_10x",
"pbmcs_10x_cite_seq",
"spleen_lymph_cite_seq",
"dataset_10x",
"purified_pbmc_dataset",
"brainlarge_dataset",
"synthetic_iid",
"pbmc_dataset",
"cortex",
"seqfish",
"seqfishplus",
"smfish",
"breast_cancer_dataset",
"mouse_ob_dataset",
"retina",
"prefrontalcortex_starmap",
"frontalcortex_dropseq",
"annotation_simulation",
"transfer_anndata_setup",
"register_tensor_from_anndata",
"read_h5ad",
"read_csv",
"read_loom",
"read_text",
]
|
{
"content_hash": "c870cc2711c63edaec82cd569a8ebdda",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 73,
"avg_line_length": 22.35,
"alnum_prop": 0.6413124533929903,
"repo_name": "YosefLab/scVI",
"id": "d30ffe2cdaf1b843568445c91c7c2e6dc1de11e5",
"size": "1341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scvi/data/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "582001"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('biblioteca', '0002_auto_20160328_2122'),
]
operations = [
migrations.AlterModelOptions(
name='temas',
options={'verbose_name_plural': 'Temas'},
),
migrations.AddField(
model_name='biblioteca',
name='tipo_documento',
field=models.IntegerField(blank=True, null=True, choices=[(1, b'Documento para biblioteca'), (2, b'Informe privado')]),
),
migrations.AlterField(
model_name='biblioteca',
name='descripcion',
field=ckeditor.fields.RichTextField(null=True, verbose_name=b'Sinopsis', blank=True),
),
]
|
{
"content_hash": "be2256c48974c4d5c9dc7788cc1ed8a8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 131,
"avg_line_length": 29.75,
"alnum_prop": 0.6002400960384153,
"repo_name": "ErickMurillo/plataforma_fadcanic",
"id": "6b8e1c2c56a7428131257b8ce260eab7592324a1",
"size": "857",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "biblioteca/migrations/0003_auto_20160422_2231.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266187"
},
{
"name": "HTML",
"bytes": "465808"
},
{
"name": "JavaScript",
"bytes": "1047064"
},
{
"name": "Python",
"bytes": "370621"
}
],
"symlink_target": ""
}
|
import os
import sys
import getpass
import subprocess
try:
from fabricate import *
except ImportError, e:
print "Couldn't find the fabricate module."
sys.exit(1)
sender_sources = ['sender.c']
receiver_sources = ['receiver.c']
includes = []
cflags = ['-ggdb', '-O2', '-fPIC',
'-std=gnu99', '-Wall', '-Werror'] + includes
def build():
objects = compile(sender_sources)
link(objects, "sender")
objects = compile(receiver_sources)
link(objects, "receiver")
def compile(sources):
for source in sources:
run('gcc', cflags, '-c', source, '-o', source.replace('.c', '.o'))
objects = [s.replace('.c', '.o') for s in sources]
return objects
def link(objects, target):
run('gcc', objects, '-o', target)
def clean():
autoclean()
if __name__ == '__main__':
main()
|
{
"content_hash": "5de86dd04c8dabff7c26fe0312410f99",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 68,
"avg_line_length": 19.55,
"alnum_prop": 0.6470588235294118,
"repo_name": "kklt92/NetworkingCodeExamples",
"id": "69d3fd311eaddb575bafae42201b5b4db722e443",
"size": "801",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PacketProcessing/EthernetParsing/runtime/test/build.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "58027"
},
{
"name": "Java",
"bytes": "137076"
},
{
"name": "Makefile",
"bytes": "1768"
},
{
"name": "Python",
"bytes": "23402"
},
{
"name": "Shell",
"bytes": "3617"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
urlpatterns = [
url(r'^event/', include('sedastrela.events.urls', namespace='events')),
url(r'^news/', include('sedastrela.news.urls', namespace='news')),
url(r'^page/', include('sedastrela.pages.urls', namespace='pages')),
url(r'^', include('sedastrela.home.urls', namespace='home')),
]
|
{
"content_hash": "fb547b3d944e8b1dcc90d6b3c8180a88",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 34.9,
"alnum_prop": 0.664756446991404,
"repo_name": "ondrejsika/sedastrela",
"id": "93a3d723458c506bc39d561288ff8d8c6fe6b28d",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sedastrela/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79180"
},
{
"name": "HTML",
"bytes": "22957"
},
{
"name": "JavaScript",
"bytes": "14988"
},
{
"name": "Python",
"bytes": "16657"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
import flopy
pth = os.path.join('..', 'data', 'mf2005_test')
opth = os.path.join('data')
mname = 'tr2k_s3'
ml = flopy.modflow.Modflow.load(mname, version='mf2005', model_ws=pth, verbose=True)
ml.change_model_ws(opth)
ml.write_input()
ml.set_exename('mf2005dbl')
ml.run_model()
print('finished...')
|
{
"content_hash": "ebe0bd25ad2d395dd5f91d3799da6606",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 84,
"avg_line_length": 17.526315789473685,
"alnum_prop": 0.6876876876876877,
"repo_name": "mrustl/flopy",
"id": "6fdfe659cb059570dfd75a2a716b1bcf671bdfa0",
"size": "333",
"binary": false,
"copies": "5",
"ref": "refs/heads/kwb",
"path": "examples/Testing/flopy3_loadSUB.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "1772821"
},
{
"name": "Visual Basic",
"bytes": "3938"
}
],
"symlink_target": ""
}
|
"""This mod flips the the data along x or y direction."""
from colorview2d import imod
class Flip(imod.IMod):
"""
The mod class to flip the data.
"""
def __init__(self):
imod.IMod.__init__(self)
self.default_args = (True, )
def do_apply(self, data, modargs):
if modargs:
data.flip_lr()
else:
data.flip_ud()
|
{
"content_hash": "126ab20ff3335ab2e80aee8788b4e3ea",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 57,
"avg_line_length": 19.45,
"alnum_prop": 0.5398457583547558,
"repo_name": "Loisel/colorview2d",
"id": "e7afb320978b8f11a963442b7c5e179ace2ba4c9",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colorview2d/mods/Flip.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "7642"
},
{
"name": "Python",
"bytes": "97807"
},
{
"name": "Shell",
"bytes": "2174"
}
],
"symlink_target": ""
}
|
import decimal, logging
from django.db import models
from shops.models import Shop
from preferences.models import ShippingItem, ShippingPrice, ShippingWeight, TaxState
class ShippingCalculator():
@classmethod
def get_charge(cls, cart):
total_price = cart.total()
total_weight = cart.total_weight()
total_items = cart.total_items()
charge_for_price = ShippingPrice.calculate_charge(cart.shop, total_price)
#logging.debug("Charge for price: %s" % charge_for_price)
charge_for_weight = ShippingWeight.calculate_charge(cart.shop, total_weight)
#logging.debug("Charge for weight: %s" % charge_for_weight)
charge_for_items = ShippingItem.calculate_charge(cart.shop, total_items)
#logging.debug("Charge for items: %s" % charge_for_items)
#return the largest charge of available shipping methods
maxim = max([charge_for_items, charge_for_price, charge_for_weight])
logging.debug("Charge for shipping that will be aplied: %s" % maxim)
try:
state = cart.shippingdata.state
tax_rate = TaxState.objects.filter(shop=cart.shop).filter(state=state).get()
if tax_rate.apply_tax_to_shipping:
logging.debug("Shop %s applies a tax of %s%% to shippings to %s" % (cart.shop, tax_rate.tax, state))
tax_for_shipping = maxim * (tax_rate.tax / 100)
return maxim + tax_for_shipping
except TaxState.DoesNotExist:
pass
logging.debug("State %s don't apply taxes to shipping for shop %s" % (state, cart.shop))
return maxim
|
{
"content_hash": "c8ed6549933d0a7849c0b95329e632c0",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 116,
"avg_line_length": 41.53658536585366,
"alnum_prop": 0.6259541984732825,
"repo_name": "codepython/CollectorCity-Market-Place",
"id": "44beeb3a7596aea6a455df7069501f3e5d8c676a",
"size": "1703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stores/apps/payments/shipping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "863646"
},
{
"name": "HTML",
"bytes": "475154"
},
{
"name": "JavaScript",
"bytes": "693720"
},
{
"name": "Python",
"bytes": "1860719"
},
{
"name": "Shell",
"bytes": "1174"
}
],
"symlink_target": ""
}
|
import os
import webapp2
IS_DEV = os.environ["SERVER_SOFTWARE"][:3] == "Dev"
allowed_users = set()
if IS_DEV:
allowed_users.add("dev-instance")
else:
# Add your Java App Engine proxy App Id here
allowed_users.add("your-java-appengine-proxy-app-id")
class LoggingHandler(webapp2.RequestHandler):
def post(self):
user = self.request.headers.get('X-Appengine-Inbound-Appid', None)
if user and user in allowed_users:
firebaseSnapshot = self.request.params['fbSnapshot']
print firebaseSnapshot
else:
print "Got unauthenticated user: %s" % user
app = webapp2.WSGIApplication([
webapp2.Route('/log', LoggingHandler),
])
|
{
"content_hash": "bc13120b561673d34b6ae5885dd8a004",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 28.541666666666668,
"alnum_prop": 0.6773722627737226,
"repo_name": "tswast/java-docs-samples",
"id": "49145bbd749beadc388ff913dff4ed2956e0ff6e",
"size": "1282",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "appengine/firebase-event-proxy/gae-firebase-listener-python/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "348"
},
{
"name": "HTML",
"bytes": "14365"
},
{
"name": "Java",
"bytes": "867172"
},
{
"name": "Python",
"bytes": "1282"
},
{
"name": "Shell",
"bytes": "8449"
},
{
"name": "XSLT",
"bytes": "1055"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from typing import Callable
from confirmation.models import Confirmation
from zerver.lib.actions import do_change_notification_settings, clear_scheduled_emails
from zerver.models import UserProfile, ScheduledEmail
from zerver.context_processors import common_context
def process_unsubscribe(request, token, subscription_type, unsubscribe_function):
# type: (HttpRequest, str, str, Callable[[UserProfile], None]) -> HttpResponse
try:
confirmation = Confirmation.objects.get(confirmation_key=token)
except Confirmation.DoesNotExist:
return render(request, 'zerver/unsubscribe_link_error.html')
user_profile = confirmation.content_object
unsubscribe_function(user_profile)
context = common_context(user_profile)
context.update({"subscription_type": subscription_type})
return render(request, 'zerver/unsubscribe_success.html', context=context)
# Email unsubscribe functions. All have the function signature
# processor(user_profile).
def do_missedmessage_unsubscribe(user_profile):
# type: (UserProfile) -> None
do_change_notification_settings(user_profile, 'enable_offline_email_notifications', False)
def do_welcome_unsubscribe(user_profile):
# type: (UserProfile) -> None
clear_scheduled_emails(user_profile.id, ScheduledEmail.WELCOME)
def do_digest_unsubscribe(user_profile):
# type: (UserProfile) -> None
do_change_notification_settings(user_profile, 'enable_digest_emails', False)
# The keys are part of the URL for the unsubscribe link and must be valid
# without encoding.
# The values are a tuple of (display name, unsubscribe function), where the
# display name is what we call this class of email in user-visible text.
email_unsubscribers = {
"missed_messages": ("missed messages", do_missedmessage_unsubscribe),
"welcome": ("welcome", do_welcome_unsubscribe),
"digest": ("digest", do_digest_unsubscribe)
}
# Login NOT required. These are for one-click unsubscribes.
def email_unsubscribe(request, email_type, confirmation_key):
# type: (HttpRequest, str, str) -> HttpResponse
if email_type in email_unsubscribers:
display_name, unsubscribe_function = email_unsubscribers[email_type]
return process_unsubscribe(request, confirmation_key, display_name, unsubscribe_function)
return render(request, 'zerver/unsubscribe_link_error.html')
|
{
"content_hash": "6c3a7f88bcc7e8bb2510090e0d57a1c8",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 97,
"avg_line_length": 43.51724137931034,
"alnum_prop": 0.7603011093502378,
"repo_name": "verma-varsha/zulip",
"id": "08647cb681e88ccdf6b348d3ab1c36277ab55c05",
"size": "2524",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/views/unsubscribe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "426706"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "489996"
},
{
"name": "JavaScript",
"bytes": "2151770"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "85239"
},
{
"name": "Python",
"bytes": "3780334"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "45134"
}
],
"symlink_target": ""
}
|
import argparse
import itertools
import json
import logging
import os
import re
import shutil
import signal
import socket
import subprocess
import sys
import time
import traceback
import logging.handlers
from scapy import config as scapy_config
scapy_config.logLevel = 40
scapy_config.use_pcap = True
import scapy.all as scapy
from scapy.utils import rdpcap
class ActorFabric(object):
@classmethod
def getInstance(cls, config):
if config.get('action') not in ('listen', 'generate'):
raise Exception(
'Wrong config, you need define '
'valid action instead of {0}'.format(config.get('action')))
if config['action'] in ('listen',):
return Listener(config)
elif config['action'] in ('generate',):
return Sender(config)
class ActorException(Exception):
def __init__(self, logger, message='', level='error'):
getattr(logger, level, logger.error)(message)
super(ActorException, self).__init__(message)
class Actor(object):
def __init__(self, config=None):
self.config = {
'src_mac': None,
'src': '198.18.1.1',
'dst': '198.18.1.2',
'sport': 31337,
'dport': 31337,
'cookie': "Nailgun:",
'pcap_dir': "/var/run/pcap_dir/",
'duration': 5,
'repeat': 1
}
if config:
self.config.update(config)
self.logger.debug("Running with config: %s", json.dumps(self.config))
self._execute(["modprobe", "8021q"])
self.iface_down_after = {}
self.viface_remove_after = {}
def _define_logger(self, filename=None,
appname='netprobe', level=logging.DEBUG):
logger = logging.getLogger(appname)
logger.setLevel(level)
syslog_formatter = logging.Formatter(
'{appname}: %(message)s'.format(appname=appname)
)
syslog_handler = logging.handlers.SysLogHandler('/dev/log')
syslog_handler.setFormatter(syslog_formatter)
logger.addHandler(syslog_handler)
# A syslog handler should be always. But a file handler is the option.
# If you don't want it you can keep 'filename' variable as None to skip
# this handler.
if filename:
file_formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'
)
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
return logger
def _execute(self, command, expected_exit_codes=(0,)):
self.logger.debug("Running command: %s" % " ".join(command))
env = os.environ
env["PATH"] = "/bin:/usr/bin:/sbin:/usr/sbin"
p = subprocess.Popen(command, shell=False,
env=env, stdout=subprocess.PIPE)
output, _ = p.communicate()
if p.returncode not in expected_exit_codes:
raise ActorException(
self.logger,
"Command exited with error: %s: %s" % (" ".join(command),
p.returncode)
)
return output.split('\n')
def _viface_by_iface_vid(self, iface, vid):
return (self._try_viface_create(iface, vid) or "%s.%d" % (iface, vid))
def _iface_name(self, iface, vid=None):
if vid:
return self._viface_by_iface_vid(iface, vid)
return iface
def _look_for_link(self, iface, vid=None):
viface = None
if vid:
viface = self._viface_by_iface_vid(iface, vid)
command = ['ip', 'link']
r = re.compile(ur"(\d+?):\s+((?P<viface>[^:@]+)@)?(?P<iface>[^:]+?):"
".+?(?P<state>UP|DOWN|UNKNOWN).*$")
for line in self._execute(command):
m = r.search(line)
if m:
md = m.groupdict()
if (iface == md.get('iface') and
viface == md.get('viface') and md.get('state')):
return (iface, viface, md.get('state'))
# If we are here we aren't able to say if iface with vid is up
raise ActorException(
self.logger,
"Cannot find interface %s with vid=%s" % (iface, vid)
)
def _try_iface_up(self, iface, vid=None):
if vid and not self._try_viface_create(iface, vid):
# if viface does not exist we raise exception
raise ActorException(
self.logger,
"Vlan %s on interface %s does not exist" % (str(vid), iface)
)
self.logger.debug("Checking if interface %s with vid %s is up",
iface, str(vid))
_, _, state = self._look_for_link(iface, vid)
return (state == 'UP')
def _iface_up(self, iface, vid=None):
"""Brings interface with vid up
"""
if vid and not self._try_viface_create(iface, vid):
# if viface does not exist we raise exception
raise ActorException(
self.logger,
"Vlan %s on interface %s does not exist" % (str(vid), iface)
)
set_iface = self._iface_name(iface, vid)
self.logger.debug("Brining interface %s with vid %s up",
set_iface, str(vid))
self._execute([
"ip",
"link", "set",
"dev", set_iface,
"up"])
def _ensure_iface_up(self, iface, vid=None):
"""Ensures interface is with vid up.
"""
if not self._try_iface_up(iface, vid):
# if iface is not up we try to bring it up
self._iface_up(iface, vid)
if self._try_iface_up(iface, vid):
# if iface was down and we have brought it up
# we should mark it to be brought down after probing
self.iface_down_after[self._iface_name(iface, vid)] = True
else:
# if viface is still down we raise exception
raise ActorException(
self.logger,
"Can not bring interface %s with vid %s up" % (iface,
str(vid))
)
def _ensure_iface_down(self, iface, vid=None):
set_iface = self._iface_name(iface, vid)
if self.iface_down_after.get(set_iface, False):
# if iface with vid have been marked to be brought down
# after probing we try to bring it down
self.logger.debug("Brining down interface %s with vid %s",
iface, str(vid))
self._execute([
"ip",
"link", "set",
"dev", set_iface,
"down"])
self.iface_down_after.pop(set_iface)
def _try_viface_create(self, iface, vid):
"""Tries to find vlan interface on iface with VLAN_ID=vid and return it
:returns: name of vlan interface if it exists or None
"""
self.logger.debug("Checking if vlan %s on interface %s exists",
str(vid), iface)
with open("/proc/net/vlan/config", "r") as f:
for line in f:
m = re.search(ur'(.+?)\s+\|\s+(.+?)\s+\|\s+(.+?)\s*$', line)
if m and m.group(2) == str(vid) and m.group(3) == iface:
return m.group(1)
def _viface_create(self, iface, vid):
"""Creates VLAN interface with VLAN_ID=vid on interface iface
:returns: None
"""
self.logger.debug("Creating vlan %s on interface %s", str(vid), iface)
self._execute([
"ip",
"link", "add",
"link", iface,
"name", self._viface_by_iface_vid(iface, vid),
"type", "vlan",
"id", str(vid)])
def _ensure_viface_create(self, iface, vid):
"""Ensures that vlan interface exists. If it does not already
exist, then we need it to be created. It also marks newly created
vlan interface to remove it after probing procedure.
"""
if not self._try_viface_create(iface, vid):
# if viface does not exist we try to create it
self._viface_create(iface, vid)
if self._try_viface_create(iface, vid):
# if viface had not existed and have been created
# we mark it to be removed after probing procedure
self.viface_remove_after[
self._viface_by_iface_vid(iface, vid)
] = True
else:
# if viface had not existed and still does not
# we raise exception
raise ActorException(
self.logger,
"Can not create vlan %d on interface %s" % (vid, iface)
)
def _ensure_viface_remove(self, iface, vid):
viface = self._viface_by_iface_vid(iface, vid)
if self.viface_remove_after.get(viface, False):
# if viface have been marked to be removed after probing
# we try to remove it
self.logger.debug("Removing vlan %s on interface %s",
str(vid), iface)
self._execute([
"ip",
"link", "del",
"dev", viface])
self.viface_remove_after.pop(viface)
def _parse_vlan_list(self, vlan_string):
self.logger.debug("Parsing vlan list: %s", vlan_string)
validate = lambda x: (x >= 0) and (x < 4095)
chunks = vlan_string.split(",")
vlan_list = []
for chunk in chunks:
delim = chunk.find("-")
try:
if delim > 0:
left = int(chunk[:delim])
right = int(chunk[delim + 1:])
if validate(left) and validate(right):
vlan_list.extend(xrange(left, right + 1))
else:
raise ValueError
else:
vlan = int(chunk)
if validate(vlan):
vlan_list.append(vlan)
else:
raise ValueError
except ValueError:
raise ActorException(self.logger, "Incorrect vlan: %s" % chunk)
self.logger.debug("Parsed vlans: %s", str(vlan_list))
return vlan_list
def _ensure_viface_create_and_up(self, iface, vid):
self._ensure_viface_create(iface, vid)
self._ensure_iface_up(iface, vid)
def _ensure_viface_down_and_remove(self, iface, vid):
self._ensure_iface_down(iface, vid)
self._ensure_viface_remove(iface, vid)
def _iface_vlan_iterator(self):
for iface, vlan_list in self.config['interfaces'].iteritems():
# Variables iface and vlan_list are getted from decoded JSON
# and json.dump convert all string data to Python unicode string.
# We use these variables in logging messages later.
# CentOS 6.4 uses Python 2.6 and logging module 0.5.0.5 which has
# a bug with converting unicode strings to message in
# SysLogHandler. So we need to convert all unicode to plain
# strings to avoid syslog message corruption.
for vlan in self._parse_vlan_list(str(vlan_list)):
yield (str(iface), vlan)
def _iface_iterator(self):
for iface in self.config['interfaces']:
yield iface
def _log_ifaces(self, prefix="Current interfaces"):
self.logger.debug("%s: ", prefix)
for line in self._execute(['ip', 'address']):
self.logger.debug(line.rstrip())
class Sender(Actor):
def __init__(self, config=None):
self.logger = self._define_logger('/var/log/netprobe_sender.log',
'netprobe_sender')
super(Sender, self).__init__(config)
self.logger.info("=== Starting Sender ===")
self._log_ifaces("Interfaces just before sending probing packages")
def run(self):
try:
self._run()
except Exception as e:
self.logger.error("An internal error occured: %s\n%s", str(e),
traceback.format_exc())
def _get_iface_mac(self, iface):
path = '/sys/class/net/{iface}/address'.format(iface=iface)
with open(path, 'r') as address:
return address.read().strip('\n')
def _run(self):
for iface, vlan in self._iface_vlan_iterator():
self._ensure_iface_up(iface)
self._send_packets()
self._log_ifaces("Interfaces just after sending probing packages")
for iface in self._iface_iterator():
self._ensure_iface_down(iface)
self._log_ifaces("Interfaces just after ensuring them down in sender")
self.logger.info("=== Sender Finished ===")
def _send_packets(self):
start_time = time.time()
for iface, vlan in itertools.cycle(self._iface_vlan_iterator()):
self.logger.debug("Sending packets: iface=%s vlan=%s",
iface, str(vlan))
for _ in xrange(self.config['repeat']):
self._sendp(iface, vlan)
if time.time() - start_time >= self.config['duration']:
break
def _sendp(self, iface, vlan):
try:
data = str(''.join((self.config['cookie'], iface, ' ',
self.config['uid'])))
p = scapy.Ether(src=self._get_iface_mac(iface),
dst="ff:ff:ff:ff:ff:ff")
if vlan > 0:
p = p / scapy.Dot1Q(vlan=vlan)
p = p / scapy.IP(src=self.config['src'], dst=self.config['dst'])
p = p / scapy.UDP(sport=self.config['sport'],
dport=self.config['dport']) / data
scapy.sendp(p, iface=iface)
except socket.error as e:
self.logger.error("Socket error: %s, %s", e, iface)
class Listener(Actor):
def __init__(self, config=None):
self.logger = self._define_logger('/var/log/netprobe_listener.log',
'netprobe_listener')
super(Listener, self).__init__(config)
self.logger.info("=== Starting Listener ===")
self._log_ifaces("Interfaces just before starting listerning "
"for probing packages")
self.pidfile = self.addpid('/var/run/net_probe')
self.neighbours = {}
self._define_pcap_dir()
def addpid(self, piddir):
pid = os.getpid()
if not os.path.exists(piddir):
os.mkdir(piddir)
pidfile = os.path.join(piddir, str(pid))
with open(pidfile, 'w') as fo:
fo.write('')
return pidfile
def _define_pcap_dir(self):
if os.path.exists(self.config['pcap_dir']):
shutil.rmtree(self.config['pcap_dir'])
os.mkdir(self.config['pcap_dir'])
def run(self):
try:
self._run()
except Exception as e:
self.logger.error("An internal error occured: %s\n%s", str(e),
traceback.format_exc())
def _run(self):
sniffers = set()
listeners = []
for iface in self._iface_iterator():
self._ensure_iface_up(iface)
if iface not in sniffers:
listeners.append(self.get_probe_frames(iface))
listeners.append(self.get_probe_frames(iface, vlan=True))
sniffers.add(iface)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.config.get('ready_address', 'locahost'),
self.config.get('ready_port', 31338)))
except socket.error as e:
self.logger.error("Socket error: %s", e)
else:
self.logger.debug("Listener threads have been launched. "
"Reporting READY.")
msg = "READY"
total_sent = 0
while total_sent < len(msg):
sent = s.send(msg[total_sent:])
if sent == 0:
raise ActorException(
self.logger,
"Socket broken. Cannot send %s status." % msg
)
total_sent += sent
s.shutdown(socket.SHUT_RDWR)
s.close()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.logger.debug("Interruption signal catched")
except SystemExit:
self.logger.debug("TERM signal catched")
for listener in listeners:
# terminate and flush pipes
listener.terminate()
listener.communicate()
self.logger.debug('Start reading dumped information.')
self.read_packets()
self._log_ifaces("Interfaces just before ensuring interfaces down")
for iface in self._iface_iterator():
self._ensure_iface_down(iface)
self._log_ifaces(
"Interfaces just after ensuring them down in listener")
with open(self.config['dump_file'], 'w') as fo:
fo.write(json.dumps(self.neighbours))
os.unlink(self.pidfile)
self.logger.info("=== Listener Finished ===")
def read_packets(self):
for iface in self._iface_iterator():
filenames = ['{0}.pcap'.format(iface),
'vlan_{0}.pcap'.format(iface)]
for filename in filenames:
self.read_pcap_file(iface, filename)
def read_pcap_file(self, iface, filename):
try:
pcap_file = os.path.join(self.config['pcap_dir'], filename)
for pkt in rdpcap(pcap_file):
self.fprn(pkt, iface)
except Exception:
self.logger.exception('Cant read pcap file %s', pcap_file)
def fprn(self, p, iface):
if scapy.Dot1Q in p:
vlan = p[scapy.Dot1Q].vlan
else:
vlan = 0
self.logger.debug("Catched packet: vlan=%s len=%s payload=%s",
str(vlan), p[scapy.UDP].len, p[scapy.UDP].payload)
received_msg, _ = p[scapy.UDP].extract_padding(p[scapy.UDP].load)
decoded_msg = received_msg.decode()
riface, uid = decoded_msg[len(self.config["cookie"]):].split(' ', 1)
self.neighbours[iface].setdefault(vlan, {})
if riface not in self.neighbours[iface][vlan].setdefault(uid, []):
self.neighbours[iface][vlan][uid].append(riface)
def get_probe_frames(self, iface, vlan=False):
if iface not in self.neighbours:
self.neighbours[iface] = {}
filter_string = 'udp and dst port {0}'.format(self.config['dport'])
filename = '{0}.pcap'.format(iface)
if vlan:
filter_string = '{0} {1}'.format('vlan and', filter_string)
filename = '{0}_{1}'.format('vlan', filename)
pcap_file = os.path.join(self.config['pcap_dir'], filename)
return subprocess.Popen(
['tcpdump', '-i', iface, '-w', pcap_file, filter_string],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# -------------- main ---------------
def define_parser():
config_examples = """
Config file examples:
Capture frames config file example is:
{"action": "listen", "interfaces": {"eth0": "1-4094"},
"dump_file": "/var/tmp/net-probe-dump-eth0"}
Simple frame generation config file example is:
{"action": "generate", "uid": "aaa-bb-cccccc",
"interfaces": { "eth0": "1-4094"}}
Full frame generation config file example is:
{ "action": "generate",
"uid": "aaa-bb-cccccc", "cookie": "Some cookie",
"src_mac": "11:22:33:44:55:66",
"src": "10.0.0.1", "dst": "10.255.255.255",
"sport": 4056, "dport": 4057,
"interfaces": {
"eth0": "10, 15, 20, 201-210, 301-310, 1000-2000",
"eth1": "1-4094"
}
}
"""
parser = argparse.ArgumentParser(epilog=config_examples)
parser.add_argument(
'-c', '--config', dest='config', action='store', type=str,
help='config file', default=None
)
return parser
def define_subparsers(parser):
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
listen_parser = subparsers.add_parser(
'listen', help='listen for probe packets'
)
listen_parser.add_argument(
'-i', '--interface', dest='interface', action='store', type=str,
help='interface to listen on', required=True
)
listen_parser.add_argument(
'-v', '--vlans', dest='vlan_list', action='store', type=str,
help='vlan list to send tagged packets ("100,200-300")', required=True
)
listen_parser.add_argument(
'-k', '--cookie', dest='cookie', action='store', type=str,
help='cookie string to insert into probe packets payload',
default='Nailgun:'
)
listen_parser.add_argument(
'-o', '--file', dest='dump_file', action='store', type=str,
help='file to dump captured packets', default=None
)
listen_parser.add_argument(
'-a', '--address', dest='ready_address', action='store', type=str,
help='address to report listener ready state', default='localhost'
)
listen_parser.add_argument(
'-p', '--port', dest='ready_port', action='store', type=int,
help='port to report listener ready state', default=31338
)
generate_parser = subparsers.add_parser(
'generate', help='generate and send probe packets'
)
generate_parser.add_argument(
'-i', '--interface', dest='interface', action='store', type=str,
help='interface to send packets from', required=True
)
generate_parser.add_argument(
'-v', '--vlans', dest='vlan_list', action='store', type=str,
help='vlan list to send tagged packets ("100,200-300")', required=True
)
generate_parser.add_argument(
'-k', '--cookie', dest='cookie', action='store', type=str,
help='cookie string to insert into probe packets payload',
default='Nailgun:'
)
generate_parser.add_argument(
'-u', '--uid', dest='uid', action='store', type=str,
help='uid to insert into probe packets payload', default='1'
)
generate_parser.add_argument(
'-d', '--duration', dest='duration', type=int, default=5,
help='Amount of time to generate network packets. In seconds',
)
generate_parser.add_argument(
'-r', '--repeat', dest='repeat', type=int, default=1,
help='Amount of packets sended in one iteration.',
)
def term_handler(signum, sigframe):
sys.exit()
def main():
signal.signal(signal.SIGTERM, term_handler)
parser = define_parser()
params, other_params = parser.parse_known_args()
config = {}
if params.config:
# if config file is set then we discard all other
# command line parameters
try:
if params.config == '-':
fo = sys.stdin
else:
fo = open(params.config, 'r')
config = json.load(fo)
fo.close()
except IOError:
print("Can not read config file %s" % params.config)
exit(1)
except ValueError as e:
print("Can not parse config file: %s" % str(e))
exit(1)
else:
define_subparsers(parser)
params, other_params = parser.parse_known_args()
if params.action == 'listen':
config['action'] = 'listen'
config['interfaces'] = {}
config['interfaces'][params.interface] = params.vlan_list
config['cookie'] = params.cookie
config['ready_address'] = params.ready_address
config['ready_port'] = params.ready_port
if params.dump_file:
config['dump_file'] = params.dump_file
else:
config['dump_file'] = "/var/tmp/net-probe-dump-%s" %\
config['interface']
elif params.action == 'generate':
config['action'] = 'generate'
config['interfaces'] = {}
config['interfaces'][params.interface] = params.vlan_list
config['uid'] = params.uid
config['cookie'] = params.cookie
config['duration'] = params.duration
config['repeat'] = params.repeat
actor = ActorFabric.getInstance(config)
actor.run()
if __name__ == "__main__":
main()
|
{
"content_hash": "1da2012417588dd0271604e283ca0ecd",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 79,
"avg_line_length": 36.7085798816568,
"alnum_prop": 0.5405198468668144,
"repo_name": "Axam/nsx-web",
"id": "8400e9580b22d0a9f31782bdc4dfc1468c2a8b94",
"size": "25610",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "network_checker/network_checker/net_check/api.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99402"
},
{
"name": "JavaScript",
"bytes": "553275"
},
{
"name": "Python",
"bytes": "2623980"
},
{
"name": "Ruby",
"bytes": "33345"
},
{
"name": "Shell",
"bytes": "29681"
}
],
"symlink_target": ""
}
|
import numpy as np
import scipy.ndimage as ndimage
#import matplotlib
import matplotlib.pyplot as plt
import os
import glob
import datetime
from PIL import Image
from PIL import ImageEnhance
import webbrowser
import sys
ALPHA=.5
DELTA=r'$\Delta$'
COLORS=["b","g","orange","r","m"]
def COL(frac):
return plt.cm.get_cmap('nipy_spectral')(frac*.4+.10)
#return plt.cm.get_cmap('winter')(frac)
#return plt.cm.get_cmap('plasma')(frac*.8+.1)
#return plt.cm.get_cmap('jet')(frac*.5)
def COL2(frac):
#return plt.cm.get_cmap('nipy_spectral')(frac*.4+.10)
#return plt.cm.get_cmap('winter')(frac)
#return plt.cm.get_cmap('plasma')(frac)
return plt.cm.get_cmap('jet')(1-frac)
class LineScan:
def __init__(self,folder,verbose=False,baseline=None,marks=None,sigma=5,lock_scale=False):
"""
The LineScan class provides an easy object to load and analyze data from PrairieView linescan folders.
By convension Ch1 is red (calcuim insensitive fluorophore) and Ch2 is green (calcium indicator).
The main objects to access are green (G), red (R), and green over red (GoR).
Baseline inputs (in seconds) are used to convert G/R to Delta(G/R)
"""
# with out the path and name of the linescan
self.folder=os.path.abspath(folder)
self.folderOut=os.path.abspath(os.path.join(self.folder,"analysis/"))
print("\n\nLOADING [%s]\n%s"%(os.path.basename(self.folder),self.folder))
if not os.path.exists(self.folderOut):
os.mkdir(self.folderOut)
self.verbose=verbose
self.baselineSec=baseline
self.marks=marks
self.sigma=sigma
self.lock_scale=lock_scale
assert(os.path.exists(self.folder)), self.folder+" doesn't exist"
self.name=os.path.basename(self.folder)
if verbose: print("loading linescan",self.name)
# figure out which files are linescans, XML data, etc
self.files=sorted(os.listdir(self.folder))
assert len([x for x in self.files if x.endswith(".env")]), "no .env file found"
self.fileEnv=[x for x in self.files if x.endswith(".env")]
assert len([x for x in self.files if x.endswith(".xml")]), "no .xml file found"
self.fileXml=[x for x in self.files if x.endswith(".xml")][0]
self.filesR=[x for x in self.files if x.endswith(".tif") and "_Ch1_" in x]
self.filesG=[x for x in self.files if x.endswith(".tif") and "_Ch2_" in x]
assert len(self.filesR)==len(self.filesG), "number of Ch1 and Ch2 tifs must match"
if verbose: print("linescans found: %d red and %d green"%(len(self.filesR),len(self.filesG)))
self.frames = len(self.filesR)
self.dpi=100 # change this externally as desired
### do these things automatically when the class is loaded
self.confLoad() # load the configuration
self.dataLoad() # load image data as 2D arrays
self.markAuto() # figure out where the brightest structure is and outline it
self.dataFlatten() # converts data to 1D arrays (traces) and handle baseline subtraction
def markAuto(self,spread=5):
"""
Collapse the red channel in the time domain leaving only a space domain 1D array.
Find the point of peak intensity (brightest structure).
Set markers a certain distance on each side of the peak structure.
"""
if not self.marks:
self.m1,self.m2=0,self.dataG[0].shape[1]
vertAvg=np.average(np.average(self.dataR,axis=0),axis=0) # collapse the red channel to 1D (space domain)
maxValue=max(vertAvg)
minValue=min(vertAvg)
cutoff=(maxValue-minValue)*.25+minValue # bottom 25%
peakPos=np.where(vertAvg==maxValue)[0][0]
print("pixel row with peak intensity:",peakPos)
self.m1,self.m2=peakPos,peakPos
for y in range(peakPos,len(vertAvg)):
if vertAvg[y]>cutoff:self.m2=y+2
else:break
for y in range(peakPos,0,-1):
if vertAvg[y]>cutoff:self.m1=y
else:break
print("marks automatically set to %d - %d"%(self.m1,self.m2))
def _xml_getValue(self,s):
"""return the value from an XML line ('<PVStateValue key="dwellTime" value="7.2" />' becomes 7.2)"""
s=s.split("value=")[1].split('"')[1]
try:return(int(s))
except:pass
try:return(float(s))
except:return(s)
def confLoad(self):
"""Load the content of the .env and .xml files to determine the parameters used to acquire the data."""
keys=["dwellTime","scanLinePeriod","linesPerFrame","pixelsPerLine"]
self.conf={}
with open(os.path.join(self.folder,self.fileXml)) as f:
for line in f.readlines():
for key in keys:
if key in line:
self.conf[key]=self._xml_getValue(line)
#TODO: add code to support multiple linescan time points
if self.verbose:
print("CONFIGURATION:")
for key in self.conf.keys():
print(" "+key,"=",self.conf[key])
self.Xs=np.arange(self.conf['linesPerFrame'])*self.conf['scanLinePeriod']
if self.baselineSec:
self.baselineIs=[int(self.baselineSec[0]/self.conf['scanLinePeriod']),
int(self.baselineSec[1]/self.conf['scanLinePeriod'])]
else:
self.baselineIs=[int(len(self.Xs)*.05),int(len(self.Xs)*.15)] # default to first 5-15% of the window
self.baselineSec=[self.Xs[self.baselineIs[0]],self.Xs[self.baselineIs[1]]]
def dataLoad(self):
"""load TIF data as a 2d array and store it in the lists self.dataG and self.dataR"""
self.dataR,self.dataG,self.dataGoR=[None]*self.frames,[None]*self.frames,[None]*self.frames
for frame in range(self.frames):
print(" loading frame %d of %d ..."%(frame+1,self.frames))
self.dataR[frame]=plt.imread(os.path.join(self.folder,self.filesR[frame]))
self.dataG[frame]=plt.imread(os.path.join(self.folder,self.filesG[frame]))
if self.sigma>1:
# gaussian smoothing of image in the time domain
self.dataR[frame]=ndimage.gaussian_filter(self.dataR[frame],sigma=(self.sigma,0))
self.dataG[frame]=ndimage.gaussian_filter(self.dataG[frame],sigma=(self.sigma,0))
self.dataGoR[frame]=self.dataG[frame]/self.dataR[frame]
def dataFlatten(self):
"""Flatten 2d data into 1d data. Creates traceG, traceR, and traceGoR."""
self.traceG=np.array([None]*self.frames)
self.traceR=np.array([None]*self.frames)
self.traceGoR=np.array([None]*self.frames)
self.dGoR=np.array([None]*self.frames)
self.bGoR=np.array([None]*self.frames)
self.bG=np.array([None]*self.frames)
self.bR=np.array([None]*self.frames)
for frame in range(self.frames):
self.traceG[frame]=np.average(self.dataG[frame][:,self.m1:self.m2],axis=1)
self.traceR[frame]=np.average(self.dataR[frame][:,self.m1:self.m2],axis=1)
self.traceGoR[frame]=np.average(self.dataGoR[frame][:,self.m1:self.m2],axis=1)
self.bGoR[frame]=np.average(self.traceGoR[frame][self.baselineIs[0]:self.baselineIs[1]])
self.bG[frame]=np.average(self.traceG[frame][self.baselineIs[0]:self.baselineIs[1]])
self.bR[frame]=np.average(self.traceR[frame][self.baselineIs[0]:self.baselineIs[1]])
self.dGoR[frame]=self.traceGoR[frame]-self.bGoR[frame]
self.AVGdGoR=np.average(self.dGoR,axis=0)
### FILE STUFF
def clean(self):
"""delete everything in the analysis folder."""
for fname in glob.glob(self.folderOut+"/*.*"):
print("deleting",os.path.basename(fname),'...')
os.remove(fname)
def saveData(self,offset=2.46872):
"""generate CSV files of all data and save them in the analysis folder."""
datadGoR=np.flipud(np.rot90(np.vstack((self.Xs+offset,np.array(self.dGoR.tolist())))))
dataR=np.flipud(np.rot90(np.vstack((self.Xs+offset,np.array(self.traceR.tolist())))))
dataG=np.flipud(np.rot90(np.vstack((self.Xs+offset,np.array(self.traceG.tolist())))))
dataGoR=np.flipud(np.rot90(np.vstack((self.Xs+offset,np.array(self.traceGoR.tolist())))))
np.savetxt(self.folderOut+"/data_dGoR.csv",datadGoR,delimiter=',',fmt='%.05f')
np.savetxt(self.folderOut+"/data_dataR.csv",dataR,delimiter=',',fmt='%.05f')
np.savetxt(self.folderOut+"/data_dataG.csv",dataG,delimiter=',',fmt='%.05f')
np.savetxt(self.folderOut+"/data_GoR.csv",dataGoR,delimiter=',',fmt='%.05f')
print("saved multiple raw data CSV files")
### PLOTTING ACTIONS
def shadeBaseline(self):
plt.axvspan(self.baselineSec[0],self.baselineSec[1],alpha=.1,color='k')
def markBounds(self,color='y'):
for xpos in [self.m1,self.m2]:
plt.axhline(xpos,color=color,ls='--',lw=2)
for i in self.baselineIs:
plt.axvline(self.Xs[i],color=color,ls='--',lw=2)
def saveFig(self,saveAs=None):
"""call this to save a figure. Make saveAs None to display figure. Make it a filename to save it."""
if saveAs:
saveAs=os.path.abspath(os.path.join(self.folderOut,saveAs))
plt.savefig(saveAs,dpi=self.dpi)
print("saved figure",os.path.basename(saveAs))
else:
plt.show()
plt.close()
### FIGURES ####################
def refFig(self):
"""convert a TIF reference figure showing the linescan path to a PNG in the analysis folder."""
fnames=sorted(glob.glob(self.folder+"/References/*Window*.tif"))
for i,fname in enumerate(fnames):
fname=os.path.abspath(fname)
saveAs=os.path.abspath(self.folder+"/analysis/fig_00%d_ref.png"%i)
print("converting",fname,'...')
im = Image.open(fname)
print("enhancing contrast...")
contrast = ImageEnhance.Contrast(im)
im=contrast.enhance(1.5)
im.save(saveAs)
print('saved',saveAs)
def figureDriftDGOR(self,saveAs=False):
"""create a figure to assess drift of dGoR over time."""
plt.figure(figsize=(6,6))
plt.grid(alpha=.5)
plt.axhline(0,color='k',ls='--')
plt.title(DELTA+"[G/R] traces by frame")
for frame in range(self.frames):
plt.plot(self.Xs,self.dGoR[frame]*100,alpha=ALPHA,label=frame+1,
color=COL(frame/self.frames))
self.shadeBaseline()
plt.legend(fontsize=6,loc=1)
plt.ylabel(DELTA+" [G/R] (%)")
plt.xlabel("linescan duration (seconds)")
plt.margins(0,.1)
if self.lock_scale:
plt.axis([None,None,-self.lock_scale*.1,self.lock_scale])
plt.tight_layout()
self.saveFig(saveAs)
def figureDriftGOR(self,saveAs=False):
"""create a figure to assess drift of dGoR over time."""
plt.figure(figsize=(6,6))
plt.grid(alpha=.5)
plt.title("raw [G/R] traces by frame")
for frame in range(self.frames):
plt.plot(self.Xs,self.traceGoR[frame]*100,alpha=ALPHA,label=frame+1,
color=COL(frame/self.frames))
plt.legend(fontsize=6,loc=1)
plt.ylabel("raw G/R (%)")
plt.xlabel("linescan duration (seconds)")
plt.margins(0,.1)
if self.lock_scale:
plt.axis([None,None,0,self.lock_scale*2])
plt.tight_layout()
self.saveFig(saveAs)
def figureDriftGOR2(self,saveAs=False):
"""create a figure to assess drift of dGoR over time."""
plt.figure(figsize=(6,6))
plt.grid(alpha=.5)
plt.title("raw [G/R] traces by frame")
for frame in range(self.frames):
offset=self.Xs[-1]*frame
plt.plot(self.Xs+offset,self.traceGoR[frame]*100,alpha=ALPHA,label=frame+1,
color=COL(frame/self.frames))
plt.legend(fontsize=6,loc=1)
plt.ylabel("raw G/R (%)")
plt.xlabel("linescan data only (seconds)")
plt.margins(0,.1)
if self.lock_scale:
plt.axis([None,None,0,self.lock_scale*2])
plt.tight_layout()
self.saveFig(saveAs)
def figureDriftRAW(self,saveAs=False):
"""create a figure to assess drift of R and G over time."""
plt.figure(figsize=(6,6))
plt.subplot(211)
plt.title("average baseline R and G by frame")
plt.grid(alpha=.5)
plt.plot(self.bG,'.-',color='g',ms=20)
plt.plot(self.bR,'.-',color='r',ms=20)
plt.axis([None,None,0,None])
plt.ylabel("pixel intensity (AFU)")
plt.subplot(212)
plt.title("average baseline G/R ratio by frame")
plt.grid(alpha=.5)
plt.plot(self.bGoR,'.-',color='b',ms=20)
plt.ylabel("raw [G/R]")
plt.xlabel("frame number")
if self.lock_scale:
plt.axis([None,None,-self.lock_scale*.1,self.lock_scale])
plt.tight_layout()
self.saveFig(saveAs)
def figureAvg(self,saveAs=False):
"""create a figure showing raw pixel values and dGoR as an average"""
plt.figure(figsize=(6,6))
ax1=plt.subplot(211)
plt.subplot(212,sharex=ax1)
plt.subplot(211)
plt.title(self.name)
plt.ylabel("raw pixel intensity (AFU)")
plt.grid(alpha=.5)
self.shadeBaseline()
for frame in range(self.frames):
plt.subplot(211)
plt.plot(self.Xs,self.traceG[frame],'-',color='G',alpha=.5)
plt.plot(self.Xs,self.traceR[frame],'-',color='R',alpha=.5)
plt.subplot(212)
plt.plot(self.Xs,self.dGoR[frame]*100.0,'-',color='b',alpha=.2)
plt.subplot(211)
plt.setp(plt.gca().get_xticklabels(), visible=False)
plt.subplot(212)
title=DELTA+" [G/R] (%)"
if self.frames>1:
title+=" (avg n=%d)"%self.frames
plt.ylabel(title)
plt.grid(alpha=.5)
self.shadeBaseline()
plt.axhline(0,color='k',ls='--')
plt.plot(self.Xs,self.AVGdGoR*100,'-',color='b',alpha=.5)
plt.margins(0,.1)
plt.xlabel("linescan duration (seconds)")
plt.tight_layout()
self.saveFig(saveAs)
def figureImg(self,saveAs=False):
"""create a figure showing the actual linescan image with outlined ROI"""
plt.figure(figsize=(6,6))
plt.subplot(311)
plt.title("Line Scan Structure Auto-Detection (avg n=%d)"%self.frames)
plt.axis([0,self.Xs[-1],0,np.shape(self.dataG)[2]])
plt.imshow(np.rot90(np.average(self.dataG,axis=0)),cmap='gray',aspect='auto',extent=plt.axis())
self.markBounds()
plt.setp(plt.gca().get_yticklabels(), visible=False)
plt.setp(plt.gca().get_xticklabels(), visible=False)
plt.ylabel("green channel")
plt.colorbar()
plt.subplot(312)
plt.axis([0,self.Xs[-1],0,np.shape(self.dataR)[2]])
plt.imshow(np.rot90(np.average(self.dataR,axis=0)),cmap='gray',aspect='auto',extent=plt.axis())
self.markBounds()
plt.setp(plt.gca().get_yticklabels(), visible=False)
plt.setp(plt.gca().get_xticklabels(), visible=False)
plt.ylabel("red channel")
plt.colorbar()
plt.subplot(313)
plt.axis([0,self.Xs[-1],0,np.shape(self.dataR)[2]])
data=np.rot90(np.average(self.dataG,axis=0))/np.rot90(np.average(self.dataR,axis=0))*100
data=data-np.average(data[self.baselineIs[0]:self.baselineIs[1],:])
plt.imshow(data,cmap='jet',aspect='auto',extent=plt.axis())
self.markBounds('k')
plt.setp(plt.gca().get_yticklabels(), visible=False)
plt.ylabel(DELTA+" [G/R] (%)")
plt.colorbar()
plt.xlabel("linescan duration (seconds)")
plt.tight_layout()
self.saveFig(saveAs)
def figure_dGoR_peak(self,saveAs=False,freq=True):
"""create a scatter plot showing the peak dGoR vs frame number."""
freqs = [1,5,10,15,20,25]
if freq==True and self.frames==len(freqs):
Xs=freqs
xlabel="AP Frequency (Hz)"
else:
Xs=np.arange(self.frames)+1
xlabel="line scan frame number"
plt.figure(figsize=(6,6))
plt.grid(alpha=.5)
Ys=np.ones(self.frames)*np.nan
plt.title("Calcium Response Curve (peak)")
for frame in range(self.frames):
Ys[frame]=np.max(self.dGoR[frame])*100
print("creating data_dGoR_byframe_peak ...")
np.savetxt(self.folderOut+"/data_dGoR_byframe_peak.csv",Ys,delimiter=',',fmt='%.05f')
plt.ylabel("peak d[G/R] (%)")
plt.xlabel(xlabel)
plt.plot(Xs,Ys,'.-',ms=20)
plt.margins(.1,.1)
if self.lock_scale:
plt.axis([None,None,-self.lock_scale*.1,self.lock_scale])
plt.tight_layout()
self.saveFig(saveAs)
def figure_dGoR_area(self,saveAs=False,freq=True):
"""create a scatter plot showing the dGoR area vs frame number."""
freqs = [1,5,10,15,20,25]
if freq==True and self.frames==len(freqs):
Xs=freqs
xlabel="AP Frequency (Hz)"
else:
Xs=np.arange(self.frames)+1
xlabel="line scan frame number"
plt.figure(figsize=(6,6))
plt.grid(alpha=.5)
Ys=np.ones(self.frames)*np.nan
plt.title("Calcium Response Curve (area)")
for frame in range(self.frames):
Ys[frame]=np.sum(self.dGoR[frame])*100/self.Xs[-1]/1000
print("creating data_dGoR_byframe_area ...")
np.savetxt(self.folderOut+"/data_dGoR_byframe_area.csv",Ys,delimiter=',',fmt='%.05f')
plt.ylabel("d[G/R] area (% * ms)")
plt.xlabel(xlabel)
plt.plot(Xs,Ys,'.-',ms=20,color='r')
plt.margins(.1,.1)
if self.lock_scale:
plt.axis([None,None,-self.lock_scale*.1,self.lock_scale])
plt.tight_layout()
self.saveFig(saveAs)
### END OF INDIVIDUAL FIGURES ####################
def allFigures(self):
"""automatically generate every figure for a given linescan."""
self.clean()
#self.refFig()
self.saveData()
self.figureImg("fig_01_img.png")
self.figureAvg("fig_02_avg.png")
if self.frames<3: return
self.figureDriftRAW("fig_03_drift1.png")
self.figureDriftDGOR("fig_04_drift2.png")
self.figureDriftGOR("fig_05_drift3.png")
self.figureDriftGOR2("fig_05_drift32.png")
self.figure_dGoR_peak("fig_06_peak.png")
self.figure_dGoR_area("fig_07_area.png")
### END OF FIGURES ####################
def index(folderParent):
"""make index.html and stick it in the parent directory."""
timestamp=datetime.datetime.now().strftime("%I:%M %p on %B %d, %Y")
folders=os.listdir(folderParent)
out="<html><style>"
out+="""
img{
margin: 10px;
border: 1px solid black;
box-shadow: 5px 5px 10px rgba(0, 0, 0, .2);
}
"""
out+="</style><body>"
out+="<b style='font-size: 300%%'>pyLineScan</b><br><i>automatic linescan index generated at %s</i><hr><br>"%timestamp
for folder in sorted(folders):
if not folder.startswith("LineScan-"):
continue
path=os.path.abspath(folderParent+"/"+folder)
rel=folderParent+"/"+folder
out+="<div style='background-color: #336699; color: white; padding: 10px; page-break-before: always;'>"
out+="<span style='font-size: 200%%; font-weight: bold;'>%s</span><br>"%folder
out+="<code>%s</code></div>"%path
for fname in sorted(glob.glob(folderParent+"/"+folder+"/analysis/*.png")):
fname=os.path.basename(fname)
out+='<a href="%s/analysis/%s"><img src="%s/analysis/%s" height=300></a>'%(rel,fname,rel,fname)
out+="<br><br><code><b>These data are stored in the following CSV files:</b></code><br>"
for fname in sorted(glob.glob(folderParent+"/"+folder+"/analysis/*.csv")):
out+='<code>%s</code><br>'%os.path.abspath(fname)
out+="<br>"*6
out+="</code></body></html>"
fileOut=os.path.abspath(folderParent+"/index.html")
with open(fileOut,'w') as f:
f.write(out)
print("\nSAVED HTML REPORT:\n"+fileOut+'\n')
webbrowser.open(fileOut)
def analyzeFolderOfLinescans(folderParent,reanalyze=False,matching=False):
"""analyze every linescan folder in a parent directory and generate a report."""
print("Analyzing folder of linescans:",folderParent)
print(" reanalyze =",reanalyze)
folders=sorted(glob.glob(folderParent+'/LineScan-*'))
for folder in folders:
if os.path.exists(folder+"/analysis/data_GoR.csv") and not reanalyze:
print("not re-analyzing",folder)
continue
if matching and not matching in os.path.basename(folder):
continue
LS=LineScan(folder)
LS.allFigures()
#index(folderParent)
return
if __name__=="__main__":
if len(sys.argv)==1:
print("### RUNNING WITHOUT ARGUMENTS - ASSUMING YOU ARE A DEVELOPER ###\n"*20)
#folder=r'X:\Data\SCOTT\2017-06-16 OXT-Tom\2p'
#analyzeFolderOfLinescans(folder,reanalyze=True,matching="-661")
LS = LineScan(R"X:\Data\SCOTT\2017-06-16 OXT-Tom\2p\LineScan-07062017-1730-720")
LS.allFigures()
else:
reanalyze=False
if "reanalyze" in sys.argv:
print("FORCING RE-ANALYSIS OF ALL FILES.")
reanalyze=True
folder=os.path.abspath(sys.argv[1])
print("FOLDER TO ANALYZE:\n%s"%folder)
assert os.path.exists(folder)
LS=LineScan(folder)
LS.allFigures()
#analyzeFolderOfLinescans(folder,reanalyze=reanalyze)
print("FINISHED ANALYSIS SUCCESSFULLY")
|
{
"content_hash": "7c9ea149f158467d252357fd399d489b",
"timestamp": "",
"source": "github",
"line_count": 512,
"max_line_length": 122,
"avg_line_length": 42.96484375,
"alnum_prop": 0.6023274843167561,
"repo_name": "swharden/ROI-Analysis-Pipeline",
"id": "accba07ca4c773b06f031ba3109b5ed965df77af",
"size": "22064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyLS/pyLineScan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2541"
},
{
"name": "C#",
"bytes": "8497"
},
{
"name": "ImageJ Macro",
"bytes": "464"
},
{
"name": "PHP",
"bytes": "3901"
},
{
"name": "Python",
"bytes": "157076"
},
{
"name": "R",
"bytes": "54228"
},
{
"name": "Shell",
"bytes": "558054"
}
],
"symlink_target": ""
}
|
import pandas
import numpy as np
from google.cloud import datastore
from math import floor
import pdb
RATING_KIND = 'Rating'
MOVIE_KIND = 'Movie'
PROJECT_ID = 'cf-mr-service'
client = datastore.Client(PROJECT_ID)
def load_from_store():
query = client.query(kind=RATING_KIND)
result = query.fetch()
rating = list(result)
read_rating = None
for entity in rating:
arr = np.fromstring(entity['data_str'], dtype=entity['dtype']).reshape(entity['rows'], entity['cols'])
if read_rating is not None:
read_rating = np.append(read_rating, arr, axis=0)
else:
read_rating = arr
def save_to_store():
print 'save to store'
header = ['user_id', 'item_id', 'rating', 'timestamp']
rating_data = pandas.read_csv('u.data', sep='\t', names=header)
n_users = rating_data.user_id.unique().shape[0]
n_items = rating_data.item_id.unique().shape[0]
print 'Number of users = ' + str(n_users) + ' | Number of movies = ' + str(n_items)
user_rating = np.zeros((n_users, n_items), dtype='uint8')
for line in rating_data.itertuples():
user_rating[line[1] - 1, line[2] - 1] = line[3]
# split_size = int(floor(1048487.0 * 3 / (4 * n_items)))
split_size = int(floor(1048487.0 / n_items))
entity_list = []
print 'config split size = ' + str(split_size)
config_key = client.key('Config', 'v1.0')
entity = client.get(key=config_key)
if entity is None:
entity = datastore.Entity(key=config_key, exclude_from_indexes=['user_rating_split_size'])
entity.update({
'user_rating_split_size': split_size
})
entity_list.append(entity)
for i in xrange(0, n_users + 1, split_size):
print 'split rating data from ' + str(i) + ' to ' + str(i + split_size)
entity = datastore.Entity(key=client.key(RATING_KIND, str(i / split_size)),
exclude_from_indexes=['rows', 'cols', 'dtype', 'data_str'])
sub_arr = user_rating[i : i + split_size]
entity.update({
'rows': sub_arr.shape[0],
'cols': sub_arr.shape[1],
'dtype': str(sub_arr.dtype),
'data_str': sub_arr.tostring()
})
entity_list.append(entity)
print 'prepare deleting indexed users'
query = client.query(kind='User')
query.keys_only()
user_keys = []
for user in query.fetch():
print 'users to be delete ' + user.key.name
user_keys.append(user.key)
with client.transaction():
print 'run transaction'
client.put_multi(entity_list)
client.delete_multi(user_keys)
print 'initialization finished'
if __name__ == '__main__':
save_to_store()
# load_from_store()
|
{
"content_hash": "1b605e8b0503b3b129b3a2a97b1b6292",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 110,
"avg_line_length": 33.25609756097561,
"alnum_prop": 0.6017601760176018,
"repo_name": "komod/cf-movie-recommend",
"id": "2e764231cf330a2c514be79457e8bde518fb1578",
"size": "2727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "initialize_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2401"
},
{
"name": "JavaScript",
"bytes": "7754"
},
{
"name": "Python",
"bytes": "16617"
}
],
"symlink_target": ""
}
|
"""
This module is deprecated.
Please use :mod:`airflow.providers.apache.hive.transfers.vertica_to_hive`.
"""
import warnings
from airflow.providers.apache.hive.transfers.vertica_to_hive import VerticaToHiveOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.hive.transfers.vertica_to_hive`.",
DeprecationWarning,
stacklevel=2,
)
class VerticaToHiveTransfer(VerticaToHiveOperator):
"""This class is deprecated.
Please use:
`airflow.providers.apache.hive.transfers.vertica_to_hive.VerticaToHiveOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.apache.hive.transfers.vertica_to_hive.VerticaToHiveOperator`.""",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
|
{
"content_hash": "50fc507f15cfbfc0f79bafa5a49fc7fc",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 103,
"avg_line_length": 28.59375,
"alnum_prop": 0.6786885245901639,
"repo_name": "bolkedebruin/airflow",
"id": "49ddea172f374f6c6480ff8137ee841f63a8f782",
"size": "1702",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/vertica_to_hive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
}
|
"""
Set covering problem in Google CP Solver.
This example is from the OPL example covering.mod
'''
Consider selecting workers to build a house. The construction of a
house can be divided into a number of tasks, each requiring a number of
skills (e.g., plumbing or masonry). A worker may or may not perform a
task, depending on skills. In addition, each worker can be hired for a
cost that also depends on his qualifications. The problem consists of
selecting a set of workers to perform all the tasks, while minimizing the
cost. This is known as a set-covering problem. The key idea in modeling
a set-covering problem as an integer program is to associate a 0/1
variable with each worker to represent whether the worker is hired.
To make sure that all the tasks are performed, it is sufficient to
choose at least one worker by task. This constraint can be expressed by a
simple linear inequality.
'''
Solution from the OPL model (1-based)
'''
Optimal solution found with objective: 14
crew= {23 25 26}
'''
Solution from this model (0-based):
'''
Total cost 14
We should hire these workers: 22 24 25
'''
Compare with the following models:
* Comet: http://hakank.org/comet/covering_opl.co
* MiniZinc: http://hakank.org/minizinc/covering_opl.mzn
* ECLiPSe: http://hakank.org/eclipse/covering_opl.ecl
* Gecode: http://hakank.org/gecode/covering_opl.cpp
* SICStus: http://hakank.org/sicstus/covering_opl.pl
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Set covering")
#
# data
#
nb_workers = 32
Workers = list(range(nb_workers))
num_tasks = 15
Tasks = list(range(num_tasks))
# Which worker is qualified for each task.
# Note: This is 1-based and will be made 0-base below.
Qualified = [[1, 9, 19, 22, 25, 28, 31],
[2, 12, 15, 19, 21, 23, 27, 29, 30, 31, 32],
[3, 10, 19, 24, 26, 30, 32], [4, 21, 25, 28, 32],
[5, 11, 16, 22, 23, 27, 31], [6, 20, 24, 26, 30, 32],
[7, 12, 17, 25, 30, 31], [8, 17, 20, 22, 23],
[9, 13, 14, 26, 29, 30, 31], [10, 21, 25, 31, 32],
[14, 15, 18, 23, 24, 27, 30, 32], [18, 19, 22, 24, 26, 29, 31],
[11, 20, 25, 28, 30, 32], [16, 19, 23, 31],
[9, 18, 26, 28, 31, 32]]
Cost = [
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5,
5, 6, 6, 6, 7, 8, 9
]
#
# variables
#
Hire = [solver.IntVar(0, 1, "Hire[%i]" % w) for w in Workers]
total_cost = solver.IntVar(0, nb_workers * sum(Cost), "total_cost")
#
# constraints
#
solver.Add(total_cost == solver.ScalProd(Hire, Cost))
for j in Tasks:
# Sum the cost for hiring the qualified workers
# (also, make 0-base)
b = solver.Sum([Hire[c - 1] for c in Qualified[j]])
solver.Add(b >= 1)
# objective: Minimize total cost
objective = solver.Minimize(total_cost, 1)
#
# search and result
#
db = solver.Phase(Hire, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print("Total cost", total_cost.Value())
print("We should hire these workers: ", end=" ")
for w in Workers:
if Hire[w].Value() == 1:
print(w, end=" ")
print()
print()
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
if __name__ == "__main__":
main()
|
{
"content_hash": "8b9be02cca6b0f4035ce0bdd36cac2c8",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 80,
"avg_line_length": 29.78125,
"alnum_prop": 0.6240818467995802,
"repo_name": "or-tools/or-tools",
"id": "bcdf574c0fad6d4ad130f6f661b2af7e0658194f",
"size": "4412",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "examples/contrib/covering_opl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18599"
},
{
"name": "C",
"bytes": "11382"
},
{
"name": "C#",
"bytes": "498888"
},
{
"name": "C++",
"bytes": "14071164"
},
{
"name": "CMake",
"bytes": "219723"
},
{
"name": "Dockerfile",
"bytes": "149476"
},
{
"name": "Java",
"bytes": "459136"
},
{
"name": "Lex",
"bytes": "2271"
},
{
"name": "Makefile",
"bytes": "207007"
},
{
"name": "Python",
"bytes": "629275"
},
{
"name": "SWIG",
"bytes": "414259"
},
{
"name": "Shell",
"bytes": "83555"
},
{
"name": "Starlark",
"bytes": "235950"
},
{
"name": "Yacc",
"bytes": "26027"
},
{
"name": "sed",
"bytes": "45"
}
],
"symlink_target": ""
}
|
import gc
import os
import time
from datetime import datetime, date, timedelta
from optparse import make_option
from django.core.files.storage import get_storage_class
from django.core.management.base import BaseCommand
from easy_thumbnails.conf import settings
from easy_thumbnails.models import Source
class ThumbnailCollectionCleaner(object):
"""
Remove thumbnails and DB references to non-existing source images.
"""
sources = 0
thumbnails = 0
thumbnails_deleted = 0
source_refs_deleted = 0
execution_time = 0
def _get_absolute_path(self, path):
return os.path.join(settings.MEDIA_ROOT, path)
def _get_relative_path(self, path):
return os.path.relpath(path, settings.MEDIA_ROOT)
def _check_if_exists(self, storage, path):
try:
return storage.exists(path)
except Exception as e:
print("Something went wrong when checking existance of %s:" % path)
print(str(e))
def _delete_sources_by_id(self, ids):
Source.objects.all().filter(id__in=ids).delete()
def clean_up(self, dry_run=False, verbosity=1, last_n_days=0,
cleanup_path=None, storage=None):
"""
Iterate through sources. Delete database references to sources
not existing, including its corresponding thumbnails (files and
database references).
"""
if dry_run:
print ("Dry run...")
if not storage:
storage = get_storage_class(settings.THUMBNAIL_DEFAULT_STORAGE)()
sources_to_delete = []
time_start = time.time()
query = Source.objects.all()
if last_n_days > 0:
today = date.today()
query = query.filter(
modified__range=(today - timedelta(days=last_n_days), today))
if cleanup_path:
query = query.filter(name__startswith=cleanup_path)
for source in queryset_iterator(query):
self.sources += 1
abs_source_path = self._get_absolute_path(source.name)
if not self._check_if_exists(storage, abs_source_path):
if verbosity > 0:
print ("Source not present:", abs_source_path)
self.source_refs_deleted += 1
sources_to_delete.append(source.id)
for thumb in source.thumbnails.all():
self.thumbnails_deleted += 1
abs_thumbnail_path = self._get_absolute_path(thumb.name)
if self._check_if_exists(storage, abs_thumbnail_path):
if not dry_run:
storage.delete(abs_thumbnail_path)
if verbosity > 0:
print ("Deleting thumbnail:", abs_thumbnail_path)
if len(sources_to_delete) >= 1000 and not dry_run:
self._delete_sources_by_id(sources_to_delete)
sources_to_delete = []
if not dry_run:
self._delete_sources_by_id(sources_to_delete)
self.execution_time = round(time.time() - time_start)
def print_stats(self):
"""
Print statistics about the cleanup performed.
"""
print(
"{0:-<48}".format(str(datetime.now().strftime('%Y-%m-%d %H:%M '))))
print("{0:<40} {1:>7}".format("Sources checked:", self.sources))
print("{0:<40} {1:>7}".format(
"Source references deleted from DB:", self.source_refs_deleted))
print("{0:<40} {1:>7}".format("Thumbnails deleted from disk:",
self.thumbnails_deleted))
print("(Completed in %s seconds)\n" % self.execution_time)
def queryset_iterator(queryset, chunksize=1000):
"""
The queryset iterator helps to keep the memory consumption down.
And also making it easier to process for weaker computers.
"""
primary_key = 0
last_pk = queryset.order_by('-pk')[0].pk
queryset = queryset.order_by('pk')
while primary_key < last_pk:
for row in queryset.filter(pk__gt=primary_key)[:chunksize]:
primary_key = row.pk
yield row
gc.collect()
class Command(BaseCommand):
help = """ Deletes thumbnails that no longer have an original file. """
option_list = BaseCommand.option_list + (
make_option(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Dry run the execution.'),
make_option(
'--last-n-days',
action='store',
dest='last_n_days',
default=0,
type='int',
help='The number of days back in time to clean thumbnails for.'),
make_option(
'--path',
action='store',
dest='cleanup_path',
type='string',
help='Specify a path to clean up.'),
)
def handle(self, *args, **options):
tcc = ThumbnailCollectionCleaner()
tcc.clean_up(
dry_run=options.get('dry_run', False),
verbosity=int(options.get('verbosity', 1)),
last_n_days=int(options.get('last_n_days', 0)),
cleanup_path=options.get('cleanup_path'))
tcc.print_stats()
|
{
"content_hash": "1f0ea74084d9a7f11c8e653f16a1adec",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 79,
"avg_line_length": 34.627450980392155,
"alnum_prop": 0.569271423178558,
"repo_name": "siovene/easy-thumbnails",
"id": "be28b55e36bc7fffc263c77f764983e7824c4455",
"size": "5298",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "easy_thumbnails/management/commands/thumbnail_cleanup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "246440"
}
],
"symlink_target": ""
}
|
from collections import (
OrderedDict,
abc,
)
from datetime import (
date,
datetime,
timedelta,
)
import functools
import itertools
import re
import warnings
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
import pytest
import pytz
from pandas.compat import np_version_under1p19
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer_dtype
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
IntervalDtype,
PandasDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
MultiIndex,
Period,
RangeIndex,
Series,
Timedelta,
Timestamp,
cut,
date_range,
isna,
)
import pandas._testing as tm
from pandas.arrays import (
DatetimeArray,
IntervalArray,
PeriodArray,
SparseArray,
)
from pandas.core.api import Int64Index
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameConstructors:
def test_construct_ndarray_with_nas_and_int_dtype(self):
# GH#26919 match Series by not casting np.nan to meaningless int
arr = np.array([[1, np.nan], [2, 3]])
df = DataFrame(arr, dtype="i8")
assert df.values.dtype == arr.dtype
assert isna(df.iloc[0, 1])
# check this matches Series behavior
ser = Series(arr[0], dtype="i8", name=0)
expected = df.iloc[0]
tm.assert_series_equal(ser, expected)
def test_construct_from_list_of_datetimes(self):
df = DataFrame([datetime.now(), datetime.now()])
assert df[0].dtype == np.dtype("M8[ns]")
def test_constructor_from_tzaware_datetimeindex(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH#6032
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
def test_array_of_dt64_nat_with_td64dtype_raises(self, frame_or_series):
# GH#39462
nat = np.datetime64("NaT", "ns")
arr = np.array([nat], dtype=object)
if frame_or_series is DataFrame:
arr = arr.reshape(1, 1)
msg = "|".join(
[
"Could not convert object to NumPy timedelta",
"Invalid type for timedelta scalar: <class 'numpy.datetime64'>",
]
)
with pytest.raises(ValueError, match=msg):
frame_or_series(arr, dtype="m8[ns]")
@pytest.mark.parametrize("kind", ["m", "M"])
def test_datetimelike_values_with_object_dtype(self, kind, frame_or_series):
# with dtype=object, we should cast dt64 values to Timestamps, not pydatetimes
if kind == "M":
dtype = "M8[ns]"
scalar_type = Timestamp
else:
dtype = "m8[ns]"
scalar_type = Timedelta
arr = np.arange(6, dtype="i8").view(dtype).reshape(3, 2)
if frame_or_series is Series:
arr = arr[:, 0]
obj = frame_or_series(arr, dtype=object)
assert obj._mgr.arrays[0].dtype == object
assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type)
# go through a different path in internals.construction
obj = frame_or_series(frame_or_series(arr), dtype=object)
assert obj._mgr.arrays[0].dtype == object
assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type)
obj = frame_or_series(frame_or_series(arr), dtype=PandasDtype(object))
assert obj._mgr.arrays[0].dtype == object
assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type)
if frame_or_series is DataFrame:
# other paths through internals.construction
sers = [Series(x) for x in arr]
obj = frame_or_series(sers, dtype=object)
assert obj._mgr.arrays[0].dtype == object
assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type)
def test_series_with_name_not_matching_column(self):
# GH#9232
x = Series(range(5), name=1)
y = Series(range(5), name=0)
result = DataFrame(x, columns=[0])
expected = DataFrame([], columns=[0])
tm.assert_frame_equal(result, expected)
result = DataFrame(y, columns=[1])
expected = DataFrame([], columns=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"constructor",
[
lambda: DataFrame(),
lambda: DataFrame(None),
lambda: DataFrame({}),
lambda: DataFrame(()),
lambda: DataFrame([]),
lambda: DataFrame(_ for _ in []),
lambda: DataFrame(range(0)),
lambda: DataFrame(data=None),
lambda: DataFrame(data={}),
lambda: DataFrame(data=()),
lambda: DataFrame(data=[]),
lambda: DataFrame(data=(_ for _ in [])),
lambda: DataFrame(data=range(0)),
],
)
def test_empty_constructor(self, constructor):
expected = DataFrame()
result = constructor()
assert len(result.index) == 0
assert len(result.columns) == 0
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"emptylike,expected_index,expected_columns",
[
([[]], RangeIndex(1), RangeIndex(0)),
([[], []], RangeIndex(2), RangeIndex(0)),
([(_ for _ in [])], RangeIndex(1), RangeIndex(0)),
],
)
def test_emptylike_constructor(self, emptylike, expected_index, expected_columns):
expected = DataFrame(index=expected_index, columns=expected_columns)
result = DataFrame(emptylike)
tm.assert_frame_equal(result, expected)
def test_constructor_mixed(self, float_string_frame):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert float_string_frame["foo"].dtype == np.object_
def test_constructor_cast_failure(self):
msg = "either all columns will be cast to that dtype, or a TypeError will"
with tm.assert_produces_warning(FutureWarning, match=msg):
foo = DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64)
assert foo["a"].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df["foo"] = np.ones((4, 2)).tolist()
# this is not ok
msg = "Expected a 1D array, got an array with shape \\(4, 2\\)"
with pytest.raises(ValueError, match=msg):
df["test"] = np.ones((4, 2))
# this is ok
df["foo2"] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({"col1": [1.0], "col2": [2.0], "col3": [3.0]})
new_df = DataFrame(orig_df, dtype=float, copy=True)
new_df["col1"] = 200.0
assert orig_df["col1"][0] == 1.0
def test_constructor_dtype_nocast_view_dataframe(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
@td.skip_array_manager_invalid_test # TODO(ArrayManager) keep view on 2D array?
def test_constructor_dtype_nocast_view_2d_array(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
@td.skip_array_manager_invalid_test
def test_1d_object_array_does_not_copy(self):
# https://github.com/pandas-dev/pandas/issues/39272
arr = np.array(["a", "b"], dtype="object")
df = DataFrame(arr)
assert np.shares_memory(df.values, arr)
@td.skip_array_manager_invalid_test
def test_2d_object_array_does_not_copy(self):
# https://github.com/pandas-dev/pandas/issues/39272
arr = np.array([["a", "b"], ["c", "d"]], dtype="object")
df = DataFrame(arr)
assert np.shares_memory(df.values, arr)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, "2"], [None, "a"]], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == "2"
@pytest.mark.skipif(np_version_under1p19, reason="NumPy change.")
def test_constructor_list_of_2d_raises(self):
# https://github.com/pandas-dev/pandas/issues/32289
a = DataFrame()
b = np.empty((0, 0))
with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"):
DataFrame([a])
with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"):
DataFrame([b])
a = DataFrame({"A": [1, 2]})
with pytest.raises(ValueError, match=r"shape=\(2, 2, 1\)"):
DataFrame([a, a])
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == "int":
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d) for d in dtypes]
elif typ == "float":
dtypes = MIXED_FLOAT_DTYPES
arrays = [
np.array(np.random.randint(10, size=10), dtype=d) for d in dtypes
]
for d, a in zip(dtypes, arrays):
assert a.dtype == d
if ad is None:
ad = {}
ad.update({d: a for d, a in zip(dtypes, arrays)})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert df.dtypes[d] == d
# mixed floating and integer coexist in the same frame
df = _make_mixed_dtypes_df("float")
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df("float", {"A": 1, "B": "foo", "C": "bar"})
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df("int")
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({"a": a, "b": b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({"A": ["x", None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({"A": ["x", np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self, float_frame):
rec = float_frame.to_records(index=False)
rec.dtype.names = list(rec.dtype.names)[::-1]
index = float_frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
# case with columns != the ones we would infer from the data
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=["C", "B"])
expected = DataFrame(rec, index=rng).reindex(columns=["C", "B"])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool), 1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)], dtype=np.uint64)
result = DataFrame({"a": values})
assert result["a"].dtype == np.uint64
# see gh-2355
data_scores = [
(6311132704823138710, 273),
(2685045978526272070, 23),
(8921811264899370420, 45),
(17019687244989530680, 270),
(9930107427299601010, 273),
]
dtype = [("uid", "u8"), ("score", "u8")]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls["uid"].dtype == np.uint64
@pytest.mark.parametrize(
"values",
[
np.array([2 ** 64], dtype=object),
np.array([2 ** 65]),
[2 ** 64 + 1],
np.array([-(2 ** 63) - 4], dtype=object),
np.array([-(2 ** 64) - 1]),
[-(2 ** 65) - 2],
],
)
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = list(range(nitems))
random.shuffle(nums)
expected = [f"A{i:d}" for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
datetime_series = tm.makeTimeSeries(nper=30)
# test expects index shifted by 5
datetime_series_short = tm.makeTimeSeries(nper=30)[5:]
frame = DataFrame({"col1": datetime_series, "col2": datetime_series_short})
# col2 is padded with NaN
assert len(datetime_series) == 30
assert len(datetime_series_short) == 25
tm.assert_series_equal(frame["col1"], datetime_series.rename("col1"))
exp = Series(
np.concatenate([[np.nan] * 5, datetime_series_short.values]),
index=datetime_series.index,
name="col2",
)
tm.assert_series_equal(exp, frame["col2"])
frame = DataFrame(
{"col1": datetime_series, "col2": datetime_series_short},
columns=["col2", "col3", "col4"],
)
assert len(frame) == len(datetime_series_short)
assert "col1" not in frame
assert isna(frame["col3"]).all()
# Corner cases
assert len(DataFrame()) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
with pytest.raises(ValueError, match=msg):
DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]})
def test_constructor_dict_length1(self):
# Length-one dict micro-optimization
frame = DataFrame({"A": {"1": 1, "2": 2}})
tm.assert_index_equal(frame.index, Index(["1", "2"]))
def test_constructor_dict_with_index(self):
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
def test_constructor_dict_with_index_and_columns(self):
# empty dict with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
def test_constructor_dict_of_empty_lists(self):
# with dict of empty list and Series
frame = DataFrame({"A": [], "B": []}, columns=["A", "B"])
tm.assert_index_equal(frame.index, RangeIndex(0), exact=True)
def test_constructor_dict_with_none(self):
# GH 14381
# Dict with None value
frame_none = DataFrame({"a": None}, index=[0])
frame_none_list = DataFrame({"a": [None]}, index=[0])
assert frame_none._get_value(0, "a") is None
assert frame_none_list._get_value(0, "a") is None
tm.assert_frame_equal(frame_none, frame_none_list)
def test_constructor_dict_errors(self):
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame({"a": 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({"a": 0.7}, columns=["a"])
@pytest.mark.parametrize("scalar", [2, np.nan, None, "D"])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({"a": scalar}, columns=["b"])
expected = DataFrame(columns=["b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ["a", value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values("a", axis=1)
expected = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols
)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values("a", axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float("nan")])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([("a", value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values((11, 21)).sort_values(("a", value), axis=1)
expected = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols
)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(("a", value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
def test_constructor_dict_order_insertion(self):
datetime_series = tm.makeTimeSeries(nper=30)
datetime_series_short = tm.makeTimeSeries(nper=25)
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {"b": datetime_series_short, "a": datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list("ba"))
tm.assert_frame_equal(frame, expected)
def test_constructor_dict_nan_key_and_columns(self):
# GH 16894
result = DataFrame({np.nan: [1, 2], 2: [2, 3]}, columns=[np.nan, 2])
expected = DataFrame([[1, 2], [2, 3]], columns=[np.nan, 2])
tm.assert_frame_equal(result, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert isna(df).values.ravel().all()
def test_constructor_2d_index(self):
# GH 25416
# handling of 2d index in construction
df = DataFrame([[1]], columns=[[1]], index=[1, 2])
expected = DataFrame(
[1, 1],
index=Int64Index([1, 2], dtype="int64"),
columns=MultiIndex(levels=[[1]], codes=[[0]]),
)
tm.assert_frame_equal(df, expected)
df = DataFrame([[1]], columns=[[1]], index=[[1, 2]])
expected = DataFrame(
[1, 1],
index=MultiIndex(levels=[[1, 2]], codes=[[0, 1]]),
columns=MultiIndex(levels=[[1]], codes=[[0]]),
)
tm.assert_frame_equal(df, expected)
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list("abc"))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(4, 3\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(
np.arange(12).reshape((4, 3)),
columns=["foo", "bar", "baz"],
index=date_range("2000-01-01", periods=3),
)
arr = np.array([[4, 5, 6]])
msg = r"Shape of passed values is \(1, 3\), indices imply \(1, 4\)"
with pytest.raises(ValueError, match=msg):
DataFrame(index=[0], columns=range(0, 4), data=arr)
arr = np.array([4, 5, 6])
msg = r"Shape of passed values is \(3, 1\), indices imply \(1, 4\)"
with pytest.raises(ValueError, match=msg):
DataFrame(index=[0], columns=range(0, 4), data=arr)
# higher dim raise exception
with pytest.raises(ValueError, match="Must pass 2-d input"):
DataFrame(np.zeros((3, 3, 3)), columns=["A", "B", "C"], index=[1])
# wrong size axis labels
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=["A", "B", "C"], index=[1])
msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=["A", "B"], index=[1, 2])
# gh-26429
msg = "2 columns passed, passed data had 10 columns"
with pytest.raises(ValueError, match=msg):
DataFrame((range(10), range(10, 20)), columns=("ones", "twos"))
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame({"a": False, "b": True})
def test_constructor_subclass_dict(self, dict_subclass):
# Test for passing dict subclass to constructor
data = {
"col1": dict_subclass((x, 10.0 * x) for x in range(10)),
"col2": dict_subclass((x, 20.0 * x) for x in range(10)),
}
df = DataFrame(data)
refdf = DataFrame({col: dict(val.items()) for col, val in data.items()})
tm.assert_frame_equal(refdf, df)
data = dict_subclass(data.items())
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
def test_constructor_defaultdict(self, float_frame):
# try with defaultdict
from collections import defaultdict
data = {}
float_frame["B"][:10] = np.nan
for k, v in float_frame.items():
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
expected = frame.reindex(index=float_frame.index)
tm.assert_frame_equal(float_frame, expected)
def test_constructor_dict_block(self):
expected = np.array([[4.0, 3.0, 2.0, 1.0]])
df = DataFrame(
{"d": [4.0], "c": [3.0], "b": [2.0], "a": [1.0]},
columns=["d", "c", "b", "a"],
)
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame["B"].dtype == np.float64
assert frame["A"].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame["B"].dtype == np.object_
assert frame["A"].dtype == np.float64
def test_constructor_dict_cast2(self):
# can't cast to float
test_data = {
"A": dict(zip(range(20), tm.makeStringIndex(20))),
"B": dict(zip(range(15), np.random.randn(15))),
}
msg = "either all columns will be cast to that dtype, or a TypeError will"
with tm.assert_produces_warning(FutureWarning, match=msg):
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame["A"].dtype == np.object_
assert frame["B"].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {"Col1": {"Row1": "A String", "Row2": np.nan}}
df = DataFrame(d)
assert isinstance(df["Col1"]["Row2"], float)
def test_constructor_dict_dont_upcast2(self):
dm = DataFrame([[1, 2], ["a", "b"]], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {"a": (1, 2, 3), "b": (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in data.items()})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_of_ranges(self):
# GH 26356
data = {"a": range(3), "b": range(3, 6)}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [3, 4, 5]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_of_iterators(self):
# GH 26349
data = {"a": iter(range(3)), "b": reversed(range(3))}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_of_generators(self):
# GH 26349
data = {"a": (i for i in (range(3))), "b": (i for i in reversed(range(3)))}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(
result,
expected,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True,
)
d = {
("a", "a"): {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2},
("b", "a"): {("i", "i"): 6, ("i", "j"): 5, ("j", "i"): 4},
("b", "c"): {("i", "i"): 7, ("i", "j"): 8, ("j", "i"): 9},
}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d])
).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d["z"] = {"y": 123.0, ("i", "i"): 111, ("i", "j"): 111, ("j", "i"): 111}
_d.insert(0, ("z", d["z"]))
expected = DataFrame(
[x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False)
).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
def create_data(constructor):
return {i: {constructor(s): 2 * i} for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = DataFrame(
[
{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6},
],
index=[Timestamp(dt) for dt in dates_as_str],
)
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i} for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, "D"))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, "D"))
expected = DataFrame(
[
{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6},
],
index=[Timedelta(td, "D") for td in td_as_int],
)
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period_dict(self):
# PeriodIndex
a = pd.PeriodIndex(["2012-01", "NaT", "2012-04"], freq="M")
b = pd.PeriodIndex(["2012-02-01", "2012-03-01", "NaT"], freq="D")
df = DataFrame({"a": a, "b": b})
assert df["a"].dtype == a.dtype
assert df["b"].dtype == b.dtype
# list of periods
df = DataFrame({"a": a.astype(object).tolist(), "b": b.astype(object).tolist()})
assert df["a"].dtype == a.dtype
assert df["b"].dtype == b.dtype
def test_constructor_dict_extension_scalar(self, ea_scalar_and_dtype):
ea_scalar, ea_dtype = ea_scalar_and_dtype
df = DataFrame({"a": ea_scalar}, index=[0])
assert df["a"].dtype == ea_dtype
expected = DataFrame(index=[0], columns=["a"], data=ea_scalar)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_extension_scalar_data(self, data, dtype):
# GH 34832
df = DataFrame(index=[0, 1], columns=["a", "b"], data=data)
assert df["a"].dtype == dtype
assert df["b"].dtype == dtype
arr = pd.array([data] * 2, dtype=dtype)
expected = DataFrame({"a": arr, "b": arr})
tm.assert_frame_equal(df, expected)
def test_nested_dict_frame_constructor(self):
rng = pd.period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df._get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df._get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=["A"], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64)
if empty is np.ones:
# passing dtype casts
assert frame.values.dtype == np.int64
else:
# i.e. ma.masked_all
# Since we have NaNs, refuse to cast to int dtype, which would take NaN
# to meaningless integers. This matches Series behavior. GH#26919
assert frame.isna().all().all()
assert frame.values.dtype == np.float64
assert isna(frame.values).all()
# wrong size axis labels
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=["A", "B", "C"], index=[1])
msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=["A", "B"], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match="Must pass 2-d input"):
DataFrame(empty((3, 3, 3)), columns=["A", "B", "C"], index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, Index(range(2)), exact=True)
tm.assert_index_equal(frame.columns, Index(range(3)), exact=True)
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, Index(range(3)), exact=True)
frame = DataFrame(mat, columns=["A", "B", "C"])
tm.assert_index_equal(frame.index, Index(range(2)), exact=True)
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(["foo", "bar"], index=[0, 1], columns=["A"])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert 1.0 == frame["A"][1]
assert 2.0 == frame["C"][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert 1 == frame["A"][1]
assert 2 == frame["C"][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype="M8[ns]")
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
msg = r"datetime64\[ns\] values and dtype=int64"
with tm.assert_produces_warning(FutureWarning, match=msg):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="elementwise comparison failed",
)
frame = DataFrame(
mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64
)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert 1 == frame["A"].view("i8")[1]
assert 2 == frame["C"].view("i8")[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert frame["A"][1] is True
assert frame["C"][2] is False
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask()
result = DataFrame(mat_hard, columns=["A", "B"], index=[1, 2])
expected = DataFrame(
{"A": [np.nan, np.nan], "B": [np.nan, np.nan]},
columns=["A", "B"],
index=[1, 2],
dtype=float,
)
tm.assert_frame_equal(result, expected)
# Check case where mask is hard but no data are masked
mat_hard = ma.ones((2, 2), dtype=float).harden_mask()
result = DataFrame(mat_hard, columns=["A", "B"], index=[1, 2])
expected = DataFrame(
{"A": [1.0, 1.0], "B": [1.0, 1.0]},
columns=["A", "B"],
index=[1, 2],
dtype=float,
)
tm.assert_frame_equal(result, expected)
def test_constructor_maskedrecarray_dtype(self):
# Ensure constructor honors dtype
data = np.ma.array(
np.ma.zeros(5, dtype=[("date", "<f8"), ("price", "<f8")]), mask=[False] * 5
)
data = data.view(mrecords.mrecarray)
with tm.assert_produces_warning(FutureWarning):
# Support for MaskedRecords deprecated
result = DataFrame(data, dtype=int)
expected = DataFrame(np.zeros((5, 2), dtype=int), columns=["date", "price"])
tm.assert_frame_equal(result, expected)
# GH#40363 check that the alternative suggested in the deprecation
# warning behaves as expected
alt = DataFrame({name: data[name] for name in data.dtype.names}, dtype=int)
tm.assert_frame_equal(result, alt)
@pytest.mark.slow
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(
tm.assert_frame_equal, check_index_type=True, check_column_type=True
)
arrays = [
("float", np.array([1.5, 2.0])),
("int", np.array([1, 2])),
("str", np.array(["abc", "def"])),
]
for name, arr in arrays[:]:
arrays.append(
("masked1_" + name, np.ma.masked_array(arr, mask=[False, True]))
)
arrays.append(("masked_all", np.ma.masked_all((2,))))
arrays.append(("masked_none", np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, "filled") else v) for k, v in comb}
with tm.assert_produces_warning(FutureWarning):
# Support for MaskedRecords deprecated
result = DataFrame(mrecs)
expected = DataFrame(comb, columns=names)
assert_fr_equal(result, expected)
# specify columns
with tm.assert_produces_warning(FutureWarning):
# Support for MaskedRecords deprecated
result = DataFrame(mrecs, columns=names[::-1])
expected = DataFrame(comb, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
with tm.assert_produces_warning(FutureWarning):
# Support for MaskedRecords deprecated
result = DataFrame(mrecs, index=[1, 2])
expected = DataFrame(comb, columns=names, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize(
"data, index, columns, dtype, expected",
[
(None, list(range(10)), ["a", "b"], object, np.object_),
(None, None, ["a", "b"], "int64", np.dtype("int64")),
(None, list(range(10)), ["a", "b"], int, np.dtype("float64")),
({}, None, ["foo", "bar"], None, np.object_),
({"b": 1}, list(range(10)), list("abc"), int, np.dtype("float64")),
],
)
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
@pytest.mark.parametrize(
"data,input_dtype,expected_dtype",
(
([True, False, None], "boolean", pd.BooleanDtype),
([1.0, 2.0, None], "Float64", pd.Float64Dtype),
([1, 2, None], "Int64", pd.Int64Dtype),
(["a", "b", "c"], "string", pd.StringDtype),
),
)
def test_constructor_dtype_nullable_extension_arrays(
self, data, input_dtype, expected_dtype
):
df = DataFrame({"a": data}, dtype=input_dtype)
assert df["a"].dtype == expected_dtype()
def test_constructor_scalar_inference(self):
data = {"int": 1, "bool": True, "float": 3.0, "complex": 4j, "object": "foo"}
df = DataFrame(data, index=np.arange(10))
assert df["int"].dtype == np.int64
assert df["bool"].dtype == np.bool_
assert df["float"].dtype == np.float64
assert df["complex"].dtype == np.complex128
assert df["object"].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({"a": np.random.randn(10), "b": True})
exp = DataFrame({"a": df["a"].values, "b": [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match="must pass an index"):
DataFrame({"a": False, "b": True})
def test_constructor_DataFrame(self, float_frame):
df = DataFrame(float_frame)
tm.assert_frame_equal(df, float_frame)
df_casted = DataFrame(float_frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_empty_dataframe(self):
# GH 20624
actual = DataFrame(DataFrame(), dtype="object")
expected = DataFrame([], dtype="object")
tm.assert_frame_equal(actual, expected)
def test_constructor_more(self, float_frame):
# used to be in test_matrix.py
arr = np.random.randn(10)
dm = DataFrame(arr, columns=["A"], index=np.arange(10))
assert dm.values.ndim == 2
arr = np.random.randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=["A", "B"], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=["A", "B"])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(["foo", "bar"], dtype=object).reshape(2, 1)
msg = "could not convert string to float: 'foo'"
with pytest.raises(ValueError, match=msg):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(float_frame._series))
tm.assert_frame_equal(dm, float_frame)
# int cast
dm = DataFrame(
{"A": np.ones(10, dtype=int), "B": np.ones(10, dtype=np.float64)},
index=np.arange(10),
)
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=["A", "B"])
expected = DataFrame({}, columns=["A", "B"])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=["A", "B"])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, "a"], [2, "b"]], columns=["num", "str"])
assert is_integer_dtype(df["num"])
assert df["str"].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_list_like_data_nested_list_column(self):
# GH 32173
arrays = [list("abcd"), list("cdef")]
result = DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays)
mi = MultiIndex.from_arrays(arrays)
expected = DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=mi)
tm.assert_frame_equal(result, expected)
def test_constructor_wrong_length_nested_list_column(self):
# GH 32173
arrays = [list("abc"), list("cde")]
msg = "3 columns passed, passed data had 4"
with pytest.raises(ValueError, match=msg):
DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays)
def test_constructor_unequal_length_nested_list_column(self):
# GH 32173
arrays = [list("abcd"), list("cde")]
# exception raised inside MultiIndex constructor
msg = "all arrays must be same length"
with pytest.raises(ValueError, match=msg):
DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays)
@pytest.mark.parametrize(
"data",
[
[[Timestamp("2021-01-01")]],
[{"x": Timestamp("2021-01-01")}],
{"x": [Timestamp("2021-01-01")]},
{"x": Timestamp("2021-01-01")},
],
)
def test_constructor_one_element_data_list(self, data):
# GH#42810
result = DataFrame(data, index=[0, 1, 2], columns=["x"])
expected = DataFrame({"x": [Timestamp("2021-01-01")] * 3})
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Sequence like
class DummyContainer(abc.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, "a"]), DummyContainer([2, "b"])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, "a"], [2, "b"]], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_stdlib_array(self):
# GH 4297
# support Array
import array
result = DataFrame({"A": array.array("i", range(10))})
expected = DataFrame({"A": list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array("i", range(10)), array.array("i", range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_range(self):
# GH26342
result = DataFrame(range(10))
expected = DataFrame(list(range(10)))
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_ranges(self):
result = DataFrame([range(10), range(10)])
expected = DataFrame([list(range(10)), list(range(10))])
tm.assert_frame_equal(result, expected)
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
result = DataFrame(iter(range(10)))
expected = DataFrame(list(range(10)))
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_iterators(self):
result = DataFrame([iter(range(10)), iter(range(10))])
expected = DataFrame([list(range(10)), list(range(10))])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, "a"] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: "a"})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dict_type", [dict, OrderedDict])
def test_constructor_ordered_dict_preserve_order(self, dict_type):
# see gh-13304
expected = DataFrame([[2, 1]], columns=["b", "a"])
data = dict_type()
data["b"] = [2]
data["a"] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = dict_type()
data["b"] = 2
data["a"] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dict_type", [dict, OrderedDict])
def test_constructor_ordered_dict_conflicting_orders(self, dict_type):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = dict_type()
row_one["b"] = 2
row_one["a"] = 1
row_two = dict_type()
row_two["a"] = 1
row_two["b"] = 2
row_three = {"b": 2, "a": 1}
expected = DataFrame([[2, 1], [2, 1]], columns=["b", "a"])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=["b", "a"])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [Series(i, index=["b", "a", "c"], name=str(i)) for i in range(3)]
result = DataFrame(series)
expected = DataFrame(
{"b": [0, 1, 2], "a": [0, 1, 2], "c": [0, 1, 2]},
columns=["b", "a", "c"],
index=["0", "1", "2"],
)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {"a": 1.5, "b": 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {"A": np.random.randn(10), "B": np.random.randn(8)}
with pytest.raises(ValueError, match="All arrays must be of the same length"):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(range(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self, float_frame):
df = DataFrame(float_frame["A"], index=float_frame.index, columns=["A"])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data["A"] = {"foo": 1, "bar": 2, "baz": 3}
data["B"] = Series([4, 3, 2, 1], index=["bar", "qux", "baz", "foo"])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match="ambiguous ordering"):
DataFrame({"A": ["a", "b"], "B": {"a": "a", "b": "b"}})
# this is OK though
result = DataFrame({"A": ["a", "b"], "B": Series(["a", "b"], index=["a", "b"])})
expected = DataFrame({"A": ["a", "b"], "B": ["a", "b"]}, index=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_constructor_mixed_type_rows(self):
# Issue 25075
data = [[1, 2], (3, 4)]
result = DataFrame(data)
expected = DataFrame([[1, 2], [3, 4]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"tuples,lists",
[
((), []),
((()), []),
(((), ()), [(), ()]),
(((), ()), [[], []]),
(([], []), [[], []]),
(([1], [2]), [[1], [2]]), # GH 32776
(([1, 2, 3], [4, 5, 6]), [[1, 2, 3], [4, 5, 6]]),
],
)
def test_constructor_tuple(self, tuples, lists):
# GH 25691
result = DataFrame(tuples)
expected = DataFrame(lists)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_tuples(self):
result = DataFrame({"A": [(1, 2), (3, 4)]})
expected = DataFrame({"A": Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list("ab"))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({"y": [1, 2], "z": [3, 4]})
result = DataFrame(tuples, columns=["y", "z"])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_dataclasses(self):
# GH21910
from dataclasses import make_dataclass
Point = make_dataclass("Point", [("x", int), ("y", int)])
data = [Point(0, 3), Point(1, 3)]
expected = DataFrame({"x": [0, 1], "y": [3, 3]})
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_dataclasses_with_varying_types(self):
# GH21910
from dataclasses import make_dataclass
# varying types
Point = make_dataclass("Point", [("x", int), ("y", int)])
HLine = make_dataclass("HLine", [("x0", int), ("x1", int), ("y", int)])
data = [Point(0, 3), HLine(1, 3, 3)]
expected = DataFrame(
{"x": [0, np.nan], "y": [3, 3], "x0": [np.nan, 1], "x1": [np.nan, 3]}
)
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_dataclasses_error_thrown(self):
# GH21910
from dataclasses import make_dataclass
Point = make_dataclass("Point", [("x", int), ("y", int)])
# expect TypeError
msg = "asdict() should be called on dataclass instances"
with pytest.raises(TypeError, match=re.escape(msg)):
DataFrame([Point(0, 0), {"x": 1, "y": 0}])
def test_constructor_list_of_dict_order(self):
# GH10056
data = [
{"First": 1, "Second": 4, "Third": 7, "Fourth": 10},
{"Second": 5, "First": 2, "Fourth": 11, "Third": 8},
{"Second": 6, "First": 3, "Fourth": 12, "Third": 9, "YYY": 14, "XXX": 13},
]
expected = DataFrame(
{
"First": [1, 2, 3],
"Second": [4, 5, 6],
"Third": [7, 8, 9],
"Fourth": [10, 11, 12],
"YYY": [None, None, 14],
"XXX": [None, None, 13],
}
)
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=["a", "b", "c"], name="x")
df = DataFrame(a)
assert df.columns[0] == "x"
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name="x")
df = DataFrame(s)
expected = DataFrame({"x": s})
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
msg = r"Shape of passed values is \(10, 1\), indices imply \(10, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(s, columns=[1, 2])
# #2234
a = Series([], name="x", dtype=object)
df = DataFrame(a)
assert df.columns[0] == "x"
# series with name and w/o
s1 = Series(arr, name="x")
df = DataFrame([s1, arr]).T
expected = DataFrame({"x": s1, "Unnamed 0": arr}, columns=["x", "Unnamed 0"])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(DataFrame(s0, columns=[0]), s0.to_frame())
tm.assert_frame_equal(DataFrame(s1, columns=[1]), s1.to_frame())
# non-matching produces empty frame
assert DataFrame(s0, columns=[1]).empty
assert DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=["a", "b", "c"], name="x")
# no name
s2 = Series([1, 2, 3], index=["a", "b", "c"])
other_index = Index(["a", "b"])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == "x"
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_constructor_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
series = {
c: Series([0, 1, 2], index=i) for i, c in zip(indices, ["x", "y", "z"])
}
result = DataFrame(series)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_constructor_manager_resize(self, float_frame):
index = list(float_frame.index[:5])
columns = list(float_frame.columns[:3])
result = DataFrame(float_frame._mgr, index=index, columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_mix_series_nonseries(self, float_frame):
df = DataFrame(
{"A": float_frame["A"], "B": list(float_frame["B"])}, columns=["A", "B"]
)
tm.assert_frame_equal(df, float_frame.loc[:, ["A", "B"]])
msg = "does not match index length"
with pytest.raises(ValueError, match=msg):
DataFrame({"A": float_frame["A"], "B": list(float_frame["B"])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=["a", "a"])
edf = DataFrame([[8, 5]])
edf.columns = ["a", "a"]
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)], columns=["a", "a"])
tm.assert_frame_equal(idf, edf)
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype="U5")
tm.assert_frame_equal(df, expected)
def test_constructor_empty_with_string_extension(self, nullable_string_dtype):
# GH 34915
expected = DataFrame(index=[], columns=["c1"], dtype=nullable_string_dtype)
df = DataFrame(columns=["c1"], dtype=nullable_string_dtype)
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0.0, index=[1, 2, 3], columns=["a", "b", "c"])
tm.assert_frame_equal(
df, DataFrame(np.zeros(df.shape).astype("float64"), df.index, df.columns)
)
df = DataFrame(0, index=[1, 2, 3], columns=["a", "b", "c"])
tm.assert_frame_equal(
df, DataFrame(np.zeros(df.shape).astype("int64"), df.index, df.columns)
)
df = DataFrame("a", index=[1, 2], columns=["a", "c"])
tm.assert_frame_equal(
df,
DataFrame(
np.array([["a", "a"], ["a", "a"]], dtype=object),
index=[1, 2],
columns=["a", "c"],
),
)
msg = "DataFrame constructor not properly called!"
with pytest.raises(ValueError, match=msg):
DataFrame("a", [1, 2])
with pytest.raises(ValueError, match=msg):
DataFrame("a", columns=["a", "c"])
msg = "incompatible data and dtype"
with pytest.raises(TypeError, match=msg):
DataFrame("a", [1, 2], ["a", "c"], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype("M8[ns]").name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame(
{
"A": 1,
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime(2001, 1, 2, 0, 0),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("int64")]
+ [np.dtype(objectname)] * 2
+ [np.dtype(datetime64name)] * 2,
index=list("ABCDE"),
)
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
floatname: np.array(1.0, dtype=floatname),
intname: np.array(1, dtype=intname),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("object")]
+ [np.dtype("float64")]
+ [np.dtype(intname)],
index=["a", "b", "c", floatname, intname],
)
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
floatname: np.array([1.0] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("object")]
+ [np.dtype("float64")]
+ [np.dtype(intname)],
index=["a", "b", "c", floatname, intname],
)
tm.assert_series_equal(result, expected)
def test_constructor_with_datetimes1(self):
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == "M8[ns]"
def test_constructor_with_datetimes2(self):
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame(datetimes, columns=["datetimes"])
df["dates"] = dates
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("object")],
index=["datetimes", "dates"],
)
tm.assert_series_equal(result, expected)
def test_constructor_with_datetimes3(self):
# GH 7594
# don't coerce tz-aware
tz = pytz.timezone("US/Eastern")
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({"End Date": dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(
df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"})
)
df = DataFrame([{"End Date": dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(
df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"})
)
def test_constructor_with_datetimes4(self):
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
df = DataFrame({"value": dr})
assert df.iat[0, 0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
df = DataFrame({"value": dr})
assert str(df.iat[0, 0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
df = DataFrame({"value": dr})
assert str(df.iat[0, 0].tz) == "US/Eastern"
def test_constructor_with_datetimes5(self):
# GH 7822
# preserver an index with a tz on dict construction
i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern")
expected = DataFrame({"a": i.to_series().reset_index(drop=True)})
df = DataFrame()
df["a"] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({"a": i})
tm.assert_frame_equal(df, expected)
def test_constructor_with_datetimes6(self):
# multiples
i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern")
i_no_tz = date_range("1/1/2011", periods=5, freq="10s")
df = DataFrame({"a": i, "b": i_no_tz})
expected = DataFrame({"a": i.to_series().reset_index(drop=True), "b": i_no_tz})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"arr",
[
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
[[np.datetime64("NaT")], [None]],
[[np.datetime64("NaT")], [pd.NaT]],
[[None], [np.datetime64("NaT")]],
[[None], [pd.NaT]],
[[pd.NaT], [np.datetime64("NaT")]],
[[pd.NaT], [None]],
],
)
def test_constructor_datetimes_with_nulls(self, arr):
# gh-15869, GH#11220
result = DataFrame(arr).dtypes
expected = Series([np.dtype("datetime64[ns]")])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("order", ["K", "A", "C", "F"])
@pytest.mark.parametrize(
"dtype",
[
"datetime64[M]",
"datetime64[D]",
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_constructor_datetimes_non_ns(self, order, dtype):
na = np.array(
[
["2015-01-01", "2015-01-02", "2015-01-03"],
["2017-01-01", "2017-01-02", "2017-02-03"],
],
dtype=dtype,
order=order,
)
df = DataFrame(na)
expected = DataFrame(
[
["2015-01-01", "2015-01-02", "2015-01-03"],
["2017-01-01", "2017-01-02", "2017-02-03"],
]
)
expected = expected.astype(dtype=dtype)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("order", ["K", "A", "C", "F"])
@pytest.mark.parametrize(
"dtype",
[
"timedelta64[D]",
"timedelta64[h]",
"timedelta64[m]",
"timedelta64[s]",
"timedelta64[ms]",
"timedelta64[us]",
"timedelta64[ns]",
],
)
def test_constructor_timedelta_non_ns(self, order, dtype):
na = np.array(
[
[np.timedelta64(1, "D"), np.timedelta64(2, "D")],
[np.timedelta64(4, "D"), np.timedelta64(5, "D")],
],
dtype=dtype,
order=order,
)
df = DataFrame(na).astype("timedelta64[ns]")
expected = DataFrame(
[
[Timedelta(1, "D"), Timedelta(2, "D")],
[Timedelta(4, "D"), Timedelta(5, "D")],
],
)
tm.assert_frame_equal(df, expected)
def test_constructor_for_list_with_dtypes(self):
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.dtypes
expected = Series([np.dtype("int")] * 5)
tm.assert_series_equal(result, expected)
df = DataFrame([np.array(np.arange(5), dtype="int32") for x in range(5)])
result = df.dtypes
expected = Series([np.dtype("int32")] * 5)
tm.assert_series_equal(result, expected)
# overflow issue? (we always expected int64 upcasting here)
df = DataFrame({"a": [2 ** 31, 2 ** 31 + 1]})
assert df.dtypes.iloc[0] == np.dtype("int64")
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame([1.0, 2.0])
assert df.dtypes.iloc[0] == np.dtype("float64")
df = DataFrame({"a": [1, 2]})
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame({"a": [1.0, 2.0]})
assert df.dtypes.iloc[0] == np.dtype("float64")
df = DataFrame({"a": 1}, index=range(3))
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame({"a": 1.0}, index=range(3))
assert df.dtypes.iloc[0] == np.dtype("float64")
# with object list
df = DataFrame(
{
"a": [1, 2, 4, 7],
"b": [1.2, 2.3, 5.1, 6.3],
"c": list("abcd"),
"d": [datetime(2000, 1, 1) for i in range(4)],
"e": [1.0, 2, 4.0, 7],
}
)
result = df.dtypes
expected = Series(
[
np.dtype("int64"),
np.dtype("float64"),
np.dtype("object"),
np.dtype("datetime64[ns]"),
np.dtype("float64"),
],
index=list("abcde"),
)
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self, float_frame):
cop = DataFrame(float_frame, copy=True)
cop["A"] = 5
assert (cop["A"] == 5).all()
assert not (float_frame["A"] == 5).all()
# TODO(ArrayManager) keep view on 2D array?
@td.skip_array_manager_not_yet_implemented
def test_constructor_ndarray_copy(self, float_frame):
df = DataFrame(float_frame.values)
float_frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(float_frame.values, copy=True)
float_frame.values[6] = 6
assert not (df.values[6] == 6).all()
# TODO(ArrayManager) keep view on Series?
@td.skip_array_manager_not_yet_implemented
def test_constructor_series_copy(self, float_frame):
series = float_frame._series
df = DataFrame({"A": series["A"]}, copy=True)
df["A"][:] = 5
assert not (series["A"] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
with pytest.raises(KeyError, match="^nan$"):
df.loc[:, np.nan]
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]], df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer], df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame(
[[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]
)
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({"a": [np.nan, False]})
assert d["a"].dtype == np.object_
assert not d["a"][1]
def test_constructor_ndarray_categorical_dtype(self):
cat = Categorical(["A", "B", "C"])
arr = np.array(cat).reshape(-1, 1)
arr = np.broadcast_to(arr, (3, 4))
result = DataFrame(arr, dtype=cat.dtype)
expected = DataFrame({0: cat, 1: cat, 2: cat, 3: cat})
tm.assert_frame_equal(result, expected)
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({"A": list("abc")}, dtype="category")
expected = Series(list("abc"), dtype="category", name="A")
tm.assert_series_equal(df["A"], expected)
# to_frame
s = Series(list("abc"), dtype="category")
result = s.to_frame()
expected = Series(list("abc"), dtype="category", name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name="foo")
expected = Series(list("abc"), dtype="category", name="foo")
tm.assert_series_equal(result["foo"], expected)
# list-like creation
df = DataFrame(list("abc"), dtype="category")
expected = Series(list("abc"), dtype="category", name=0)
tm.assert_series_equal(df[0], expected)
def test_construct_from_1item_list_of_categorical(self):
# ndim != 1
msg = "will be changed to match the behavior"
with tm.assert_produces_warning(FutureWarning, match=msg):
df = DataFrame([Categorical(list("abc"))])
expected = DataFrame({0: Series(list("abc"), dtype="category")})
tm.assert_frame_equal(df, expected)
def test_construct_from_list_of_categoricals(self):
msg = "will be changed to match the behavior"
with tm.assert_produces_warning(FutureWarning, match=msg):
df = DataFrame([Categorical(list("abc")), Categorical(list("abd"))])
expected = DataFrame(
{
0: Series(list("abc"), dtype="category"),
1: Series(list("abd"), dtype="category"),
},
columns=[0, 1],
)
tm.assert_frame_equal(df, expected)
def test_from_nested_listlike_mixed_types(self):
# mixed
msg = "will be changed to match the behavior"
with tm.assert_produces_warning(FutureWarning, match=msg):
df = DataFrame([Categorical(list("abc")), list("def")])
expected = DataFrame(
{0: Series(list("abc"), dtype="category"), 1: list("def")}, columns=[0, 1]
)
tm.assert_frame_equal(df, expected)
def test_construct_from_listlikes_mismatched_lengths(self):
# invalid (shape)
msg = "|".join(
[
r"Length of values \(6\) does not match length of index \(3\)",
]
)
msg2 = "will be changed to match the behavior"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, match=msg2):
DataFrame([Categorical(list("abc")), Categorical(list("abdefg"))])
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype("category")
res = Series(items, dtype="category")
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype("category")
res = Series(items, dtype="category")
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range("20000101", periods=3)
expected = Series(
Categorical(values=[np.nan, np.nan, np.nan], categories=["a", "b", "c"])
)
expected.index = index
expected = DataFrame({"x": expected})
df = DataFrame({"x": Series(["a", "b", "c"], dtype="category")}, index=index)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"dtype",
tm.ALL_INT_NUMPY_DTYPES
+ tm.ALL_INT_EA_DTYPES
+ tm.FLOAT_NUMPY_DTYPES
+ tm.COMPLEX_DTYPES
+ tm.DATETIME64_DTYPES
+ tm.TIMEDELTA64_DTYPES
+ tm.BOOL_DTYPES,
)
def test_check_dtype_empty_numeric_column(self, dtype):
# GH24386: Ensure dtypes are set correctly for an empty DataFrame.
# Empty DataFrame is generated via dictionary data with non-overlapping columns.
data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype)
assert data.b.dtype == dtype
# TODO(ArrayManager) astype to bytes dtypes does not yet give object dtype
@td.skip_array_manager_not_yet_implemented
@pytest.mark.parametrize(
"dtype", tm.STRING_DTYPES + tm.BYTES_DTYPES + tm.OBJECT_DTYPES
)
def test_check_dtype_empty_string_column(self, dtype):
# GH24386: Ensure dtypes are set correctly for an empty DataFrame.
# Empty DataFrame is generated via dictionary data with non-overlapping columns.
data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype)
assert data.b.dtype.name == "object"
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0, dtype=object).to_frame().dtypes
expected = Series({0: object})
tm.assert_series_equal(result, expected)
result = DataFrame(Series(name=0, dtype=object)).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.arm_slow
@pytest.mark.parametrize("dtype", [None, "uint8", "category"])
def test_constructor_range_dtype(self, dtype):
expected = DataFrame({"A": [0, 1, 2, 3, 4]}, dtype=dtype or "int64")
# GH 26342
result = DataFrame(range(5), columns=["A"], dtype=dtype)
tm.assert_frame_equal(result, expected)
# GH 16804
result = DataFrame({"A": range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_frame_from_list_subclass(self):
# GH21226
class List(list):
pass
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"extension_arr",
[
Categorical(list("aabbc")),
SparseArray([1, np.nan, np.nan, np.nan]),
IntervalArray([Interval(0, 1), Interval(1, 5)]),
PeriodArray(pd.period_range(start="1/1/2017", end="1/1/2018", freq="M")),
],
)
def test_constructor_with_extension_array(self, extension_arr):
# GH11363
expected = DataFrame(Series(extension_arr))
result = DataFrame(extension_arr)
tm.assert_frame_equal(result, expected)
def test_datetime_date_tuple_columns_from_dict(self):
# GH 10863
v = date.today()
tup = v, v
result = DataFrame({tup: Series(range(3), index=range(3))}, columns=[tup])
expected = DataFrame([0, 1, 2], columns=Index(Series([tup])))
tm.assert_frame_equal(result, expected)
def test_construct_with_two_categoricalindex_series(self):
# GH 14600
s1 = Series([39, 6, 4], index=CategoricalIndex(["female", "male", "unknown"]))
s2 = Series(
[2, 152, 2, 242, 150],
index=CategoricalIndex(["f", "female", "m", "male", "unknown"]),
)
result = DataFrame([s1, s2])
expected = DataFrame(
np.array([[39, 6, 4, np.nan, np.nan], [152.0, 242.0, 150.0, 2.0, 2.0]]),
columns=["female", "male", "unknown", "f", "m"],
)
tm.assert_frame_equal(result, expected)
def test_constructor_series_nonexact_categoricalindex(self):
# GH 42424
ser = Series(range(0, 100))
ser1 = cut(ser, 10).value_counts().head(5)
ser2 = cut(ser, 10).value_counts().tail(5)
result = DataFrame({"1": ser1, "2": ser2})
index = CategoricalIndex(
[
Interval(-0.099, 9.9, closed="right"),
Interval(9.9, 19.8, closed="right"),
Interval(19.8, 29.7, closed="right"),
Interval(29.7, 39.6, closed="right"),
Interval(39.6, 49.5, closed="right"),
Interval(49.5, 59.4, closed="right"),
Interval(59.4, 69.3, closed="right"),
Interval(69.3, 79.2, closed="right"),
Interval(79.2, 89.1, closed="right"),
Interval(89.1, 99, closed="right"),
],
ordered=True,
)
expected = DataFrame(
{"1": [10] * 5 + [np.nan] * 5, "2": [np.nan] * 5 + [10] * 5}, index=index
)
tm.assert_frame_equal(expected, result)
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates, dtype=[("Date", "M8[us]"), ("Forecasting", "M8[us]")])
df = DataFrame(arr)
assert df["Date"][0] == dates[0][0]
assert df["Forecasting"][0] == dates[0][1]
s = Series(arr["Date"])
assert isinstance(s[0], Timestamp)
assert s[0] == dates[0][0]
def test_from_datetime_subclass(self):
# GH21142 Verify whether Datetime subclasses are also of dtype datetime
class DatetimeSubclass(datetime):
pass
data = DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]})
assert data.datetime.dtype == "datetime64[ns]"
def test_with_mismatched_index_length_raises(self):
# GH#33437
dti = date_range("2016-01-01", periods=3, tz="US/Pacific")
msg = "Shape of passed values|Passed arrays should have the same length"
with pytest.raises(ValueError, match=msg):
DataFrame(dti, index=range(4))
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_dataframe_constructor_infer_multiindex(self):
index_lists = [["a", "a", "b", "b"], ["x", "y", "x", "y"]]
multi = DataFrame(
np.random.randn(4, 4),
index=[np.array(x) for x in index_lists],
)
assert isinstance(multi.index, MultiIndex)
assert not isinstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4), columns=index_lists)
assert isinstance(multi.columns, MultiIndex)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(date_range("1/1/2011", periods=2, freq="H"))),
(list(date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH#16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
tm.assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("copy", [False, True])
@td.skip_array_manager_not_yet_implemented
def test_dict_nocopy(self, copy, any_numeric_ea_dtype, any_numpy_dtype):
a = np.array([1, 2], dtype=any_numpy_dtype)
b = np.array([3, 4], dtype=any_numpy_dtype)
if b.dtype.kind in ["S", "U"]:
# These get cast, making the checks below more cumbersome
return
c = pd.array([1, 2], dtype=any_numeric_ea_dtype)
df = DataFrame({"a": a, "b": b, "c": c}, copy=copy)
def get_base(obj):
if isinstance(obj, np.ndarray):
return obj.base
elif isinstance(obj.dtype, np.dtype):
# i.e. DatetimeArray, TimedeltaArray
return obj._ndarray.base
else:
raise TypeError
def check_views():
# written to work for either BlockManager or ArrayManager
assert sum(x is c for x in df._mgr.arrays) == 1
assert (
sum(
get_base(x) is a
for x in df._mgr.arrays
if isinstance(x.dtype, np.dtype)
)
== 1
)
assert (
sum(
get_base(x) is b
for x in df._mgr.arrays
if isinstance(x.dtype, np.dtype)
)
== 1
)
if not copy:
# constructor preserves views
check_views()
df.iloc[0, 0] = 0
df.iloc[0, 1] = 0
if not copy:
# Check that the underlying data behind df["c"] is still `c`
# after setting with iloc. Since we don't know which entry in
# df._mgr.arrays corresponds to df["c"], we just check that exactly
# one of these arrays is `c`. GH#38939
assert sum(x is c for x in df._mgr.arrays) == 1
# TODO: we can call check_views if we stop consolidating
# in setitem_with_indexer
# FIXME(GH#35417): until GH#35417, iloc.setitem into EA values does not preserve
# view, so we have to check in the other direction
# df.iloc[0, 2] = 0
# if not copy:
# check_views()
c[0] = 0
if copy:
if a.dtype.kind == "M":
assert a[0] == a.dtype.type(1, "ns")
assert b[0] == b.dtype.type(3, "ns")
else:
assert a[0] == a.dtype.type(1)
assert b[0] == b.dtype.type(3)
# FIXME(GH#35417): enable after GH#35417
# assert c[0] == 1
assert df.iloc[0, 2] == 1
else:
# TODO: we can call check_views if we stop consolidating
# in setitem_with_indexer
# FIXME(GH#35417): enable after GH#35417
# assert b[0] == 0
assert df.iloc[0, 2] == 0
def test_from_series_with_name_with_columns(self):
# GH 7893
result = DataFrame(Series(1, name="foo"), columns=["bar"])
expected = DataFrame(columns=["bar"])
tm.assert_frame_equal(result, expected)
def test_nested_list_columns(self):
# GH 14467
result = DataFrame(
[[1, 2, 3], [4, 5, 6]], columns=[["A", "A", "A"], ["a", "b", "c"]]
)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6]],
columns=MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")]),
)
tm.assert_frame_equal(result, expected)
def test_from_2d_object_array_of_periods_or_intervals(self):
# Period analogue to GH#26825
pi = pd.period_range("2016-04-05", periods=3)
data = pi._data.astype(object).reshape(1, -1)
df = DataFrame(data)
assert df.shape == (1, 3)
assert (df.dtypes == pi.dtype).all()
assert (df == pi).all().all()
ii = pd.IntervalIndex.from_breaks([3, 4, 5, 6])
data2 = ii._data.astype(object).reshape(1, -1)
df2 = DataFrame(data2)
assert df2.shape == (1, 3)
assert (df2.dtypes == ii.dtype).all()
assert (df2 == ii).all().all()
# mixed
data3 = np.r_[data, data2, data, data2].T
df3 = DataFrame(data3)
expected = DataFrame({0: pi, 1: ii, 2: pi, 3: ii})
tm.assert_frame_equal(df3, expected)
@pytest.mark.parametrize(
"col_a, col_b",
[
([[1], [2]], np.array([[1], [2]])),
(np.array([[1], [2]]), [[1], [2]]),
(np.array([[1], [2]]), np.array([[1], [2]])),
],
)
def test_error_from_2darray(self, col_a, col_b):
msg = "Per-column arrays must each be 1-dimensional"
with pytest.raises(ValueError, match=msg):
DataFrame({"a": col_a, "b": col_b})
class TestDataFrameConstructorIndexInference:
def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self):
rng1 = pd.period_range("1/1/1999", "1/1/2012", freq="M")
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = pd.period_range("1/1/1980", "12/1/2001", freq="M")
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({"s1": s1, "s2": s2})
exp = pd.period_range("1/1/1980", "1/1/2012", freq="M")
tm.assert_index_equal(df.index, exp)
class TestDataFrameConstructorWithDtypeCoercion:
def test_floating_values_integer_dtype(self):
# GH#40110 make DataFrame behavior with arraylike floating data and
# inty dtype match Series behavior
arr = np.random.randn(10, 5)
msg = "if they cannot be cast losslessly"
with tm.assert_produces_warning(FutureWarning, match=msg):
DataFrame(arr, dtype="i8")
with tm.assert_produces_warning(None):
# if they can be cast losslessly, no warning
DataFrame(arr.round(), dtype="i8")
# with NaNs, we already have the correct behavior, so no warning
arr[0, 0] = np.nan
with tm.assert_produces_warning(None):
DataFrame(arr, dtype="i8")
class TestDataFrameConstructorWithDatetimeTZ:
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_construction_preserves_tzaware_dtypes(self, tz):
# after GH#7822
# these retain the timezones on dict construction
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
dr_tz = dr.tz_localize(tz)
df = DataFrame({"A": "foo", "B": dr_tz}, index=dr)
tz_expected = DatetimeTZDtype("ns", dr_tz.tzinfo)
assert df["B"].dtype == tz_expected
# GH#2810 (with timezones)
datetimes_naive = [ts.to_pydatetime() for ts in dr]
datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz]
df = DataFrame({"dr": dr})
df["dr_tz"] = dr_tz
df["datetimes_naive"] = datetimes_naive
df["datetimes_with_tz"] = datetimes_with_tz
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype(tz=tz),
np.dtype("datetime64[ns]"),
DatetimeTZDtype(tz=tz),
],
index=["dr", "dr_tz", "datetimes_naive", "datetimes_with_tz"],
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("pydt", [True, False])
def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt):
# GH#25843, GH#41555, GH#33401
tz = tz_aware_fixture
ts = Timestamp("2019", tz=tz)
if pydt:
ts = ts.to_pydatetime()
ts_naive = Timestamp("2019")
with tm.assert_produces_warning(FutureWarning):
result = DataFrame({0: [ts]}, dtype="datetime64[ns]")
expected = DataFrame({0: [ts_naive]})
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = DataFrame({0: ts}, index=[0], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = DataFrame([ts], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = DataFrame(np.array([ts], dtype=object), dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = DataFrame(ts, index=[0], columns=[0], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df = DataFrame([Series([ts])], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df = DataFrame([[ts]], columns=[0], dtype="datetime64[ns]")
tm.assert_equal(df, expected)
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
# construction
df = DataFrame({"A": idx, "B": dr})
assert df["A"].dtype, "M8[ns, US/Eastern"
assert df["A"].name == "A"
tm.assert_series_equal(df["A"], Series(idx, name="A"))
tm.assert_series_equal(df["B"], Series(dr, name="B"))
def test_from_index(self):
# from index
idx2 = date_range("20130101", periods=3, tz="US/Eastern", name="foo")
df2 = DataFrame(idx2)
tm.assert_series_equal(df2["foo"], Series(idx2, name="foo"))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2["foo"], Series(idx2, name="foo"))
idx2 = date_range("20130101", periods=3, tz="US/Eastern")
df2 = DataFrame(idx2)
tm.assert_series_equal(df2[0], Series(idx2, name=0))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2[0], Series(idx2, name=0))
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range("1/1/2012", periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({"a": "foo", "b": s}, index=dr)
DataFrame({"a": "foo", "b": s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
ts = Series(dr)
# it works!
d = DataFrame({"A": "foo", "B": ts}, index=dr)
assert d["B"].isna().all()
def test_frame_timeseries_column(self):
# GH19157
dr = date_range(start="20130101T10:00:00", periods=3, freq="T", tz="US/Eastern")
result = DataFrame(dr, columns=["timestamps"])
expected = DataFrame(
{
"timestamps": [
Timestamp("20130101T10:00:00", tz="US/Eastern"),
Timestamp("20130101T10:01:00", tz="US/Eastern"),
Timestamp("20130101T10:02:00", tz="US/Eastern"),
]
}
)
tm.assert_frame_equal(result, expected)
def test_nested_dict_construction(self):
# GH22227
columns = ["Nevada", "Ohio"]
pop = {
"Nevada": {2001: 2.4, 2002: 2.9},
"Ohio": {2000: 1.5, 2001: 1.7, 2002: 3.6},
}
result = DataFrame(pop, index=[2001, 2002, 2003], columns=columns)
expected = DataFrame(
[(2.4, 1.7), (2.9, 3.6), (np.nan, np.nan)],
columns=columns,
index=Index([2001, 2002, 2003]),
)
tm.assert_frame_equal(result, expected)
def test_from_tzaware_object_array(self):
# GH#26825 2D object array of tzaware timestamps should not raise
dti = date_range("2016-04-05 04:30", periods=3, tz="UTC")
data = dti._data.astype(object).reshape(1, -1)
df = DataFrame(data)
assert df.shape == (1, 3)
assert (df.dtypes == dti.dtype).all()
assert (df == dti).all().all()
def test_from_tzaware_mixed_object_array(self):
# GH#26825
arr = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
res = DataFrame(arr, columns=["A", "B", "C"])
expected_dtypes = [
"datetime64[ns]",
"datetime64[ns, US/Eastern]",
"datetime64[ns, CET]",
]
assert (res.dtypes == expected_dtypes).all()
def test_from_2d_ndarray_with_dtype(self):
# GH#12513
array_dim2 = np.arange(10).reshape((5, 2))
df = DataFrame(array_dim2, dtype="datetime64[ns, UTC]")
expected = DataFrame(array_dim2).astype("datetime64[ns, UTC]")
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("typ", [set, frozenset])
def test_construction_from_set_raises(self, typ):
# https://github.com/pandas-dev/pandas/issues/32582
values = typ({1, 2, 3})
msg = f"'{typ.__name__}' type is unordered"
with pytest.raises(TypeError, match=msg):
DataFrame({"a": values})
with pytest.raises(TypeError, match=msg):
Series(values)
def test_construction_from_ndarray_datetimelike(self):
# ensure the underlying arrays are properly wrapped as EA when
# constructed from 2D ndarray
arr = np.arange(0, 12, dtype="datetime64[ns]").reshape(4, 3)
df = DataFrame(arr)
assert all(isinstance(arr, DatetimeArray) for arr in df._mgr.arrays)
def test_construction_from_ndarray_with_eadtype_mismatched_columns(self):
arr = np.random.randn(10, 2)
dtype = pd.array([2.0]).dtype
msg = r"len\(arrays\) must match len\(columns\)"
with pytest.raises(ValueError, match=msg):
DataFrame(arr, columns=["foo"], dtype=dtype)
arr2 = pd.array([2.0, 3.0, 4.0])
with pytest.raises(ValueError, match=msg):
DataFrame(arr2, columns=["foo", "bar"])
def get1(obj):
if isinstance(obj, Series):
return obj.iloc[0]
else:
return obj.iloc[0, 0]
class TestFromScalar:
@pytest.fixture(params=[list, dict, None])
def constructor(self, request, frame_or_series):
box = request.param
extra = {"index": range(2)}
if frame_or_series is DataFrame:
extra["columns"] = ["A"]
if box is None:
return functools.partial(frame_or_series, **extra)
elif box is dict:
if frame_or_series is Series:
return lambda x, **kwargs: frame_or_series(
{0: x, 1: x}, **extra, **kwargs
)
else:
return lambda x, **kwargs: frame_or_series({"A": x}, **extra, **kwargs)
else:
if frame_or_series is Series:
return lambda x, **kwargs: frame_or_series([x, x], **extra, **kwargs)
else:
return lambda x, **kwargs: frame_or_series(
{"A": [x, x]}, **extra, **kwargs
)
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_from_nat_scalar(self, dtype, constructor):
obj = constructor(pd.NaT, dtype=dtype)
assert np.all(obj.dtypes == dtype)
assert np.all(obj.isna())
def test_from_timedelta_scalar_preserves_nanos(self, constructor):
td = Timedelta(1)
obj = constructor(td, dtype="m8[ns]")
assert get1(obj) == td
def test_from_timestamp_scalar_preserves_nanos(self, constructor):
ts = Timestamp.now() + Timedelta(1)
obj = constructor(ts, dtype="M8[ns]")
assert get1(obj) == ts
def test_from_timedelta64_scalar_object(self, constructor):
td = Timedelta(1)
td64 = td.to_timedelta64()
obj = constructor(td64, dtype=object)
assert isinstance(get1(obj), np.timedelta64)
@pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64])
def test_from_scalar_datetimelike_mismatched(self, constructor, cls, request):
node = request.node
params = node.callspec.params
if params["frame_or_series"] is DataFrame and params["constructor"] is dict:
mark = pytest.mark.xfail(
reason="DataFrame incorrectly allows mismatched datetimelike"
)
node.add_marker(mark)
scalar = cls("NaT", "ns")
dtype = {np.datetime64: "m8[ns]", np.timedelta64: "M8[ns]"}[cls]
msg = "Cannot cast"
if cls is np.datetime64:
msg = "|".join(
[
r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]",
"Cannot cast",
]
)
with pytest.raises(TypeError, match=msg):
constructor(scalar, dtype=dtype)
scalar = cls(4, "ns")
with pytest.raises(TypeError, match=msg):
constructor(scalar, dtype=dtype)
@pytest.mark.parametrize("cls", [datetime, np.datetime64])
def test_from_out_of_bounds_datetime(self, constructor, cls):
scalar = datetime(9999, 1, 1)
if cls is np.datetime64:
scalar = np.datetime64(scalar, "D")
result = constructor(scalar)
assert type(get1(result)) is cls
@pytest.mark.parametrize("cls", [timedelta, np.timedelta64])
def test_from_out_of_bounds_timedelta(self, constructor, cls):
scalar = datetime(9999, 1, 1) - datetime(1970, 1, 1)
if cls is np.timedelta64:
scalar = np.timedelta64(scalar, "D")
result = constructor(scalar)
assert type(get1(result)) is cls
def test_tzaware_data_tznaive_dtype(self, constructor):
tz = "US/Eastern"
ts = Timestamp("2019", tz=tz)
ts_naive = Timestamp("2019")
with tm.assert_produces_warning(
FutureWarning, match="Data is timezone-aware", check_stacklevel=False
):
result = constructor(ts, dtype="M8[ns]")
assert np.all(result.dtypes == "M8[ns]")
assert np.all(result == ts_naive)
|
{
"content_hash": "500862f45cde956cfc1b6668ae9c05fd",
"timestamp": "",
"source": "github",
"line_count": 2962,
"max_line_length": 88,
"avg_line_length": 36.33828494260635,
"alnum_prop": 0.5428489139119609,
"repo_name": "jorisvandenbossche/pandas",
"id": "f92bbe1c718ab08132b2dd4279c65b426bfc55bd",
"size": "107634",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/frame/test_constructors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360342"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1083849"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17541583"
},
{
"name": "Shell",
"bytes": "10719"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
}
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def DasAgentUnavailableEvent(vim, *args, **kwargs):
'''This event records that VirtualCenter cannot contact any primary host in this
HA cluster. HA designates some hosts as primary hosts in the HA cluster. When
adding a new host to an existing cluster, HA needs to contact one of the
primary hosts to finish the configuration. VirtualCenter has lost contact with
all primary nodes in the connected state. Attempts to configure HA on a host in
this cluster will fail until a DasAgentFoundEvent is logged or unless this is
the first node to be configured. For example, if all the other hosts are
disconnected first.'''
obj = vim.client.factory.create('ns0:DasAgentUnavailableEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
{
"content_hash": "afd47ce1aeaffc99babc87c928d0178b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 124,
"avg_line_length": 42.275,
"alnum_prop": 0.6540508574807806,
"repo_name": "xuru/pyvisdk",
"id": "ed574b2f9f02461a68c90a0e438864d9495ad2ae",
"size": "1692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/das_agent_unavailable_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
from model import *
from pins import *
from boards import *
from pinners import *
from attributions import *
|
{
"content_hash": "326fc15014b33c5cdc0eea34902b1f18",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 26,
"avg_line_length": 21.6,
"alnum_prop": 0.7777777777777778,
"repo_name": "svenfraeys/py-pinterest",
"id": "7c8e383d6200323669c43941d2db5a77ec272909",
"size": "108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinterest/types/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9099"
}
],
"symlink_target": ""
}
|
"""
A custom directive that allows alternative contents to be generated
on odd and even pages.
"""
from docutils.parsers import rst
from docutils.nodes import Admonition, Element
from docutils.parsers.rst import directives
class OddEvenNode(Admonition, Element):
pass
class OddEven(rst.Directive):
"""
A custom directive that allows alternative contents to be generated
on odd and even pages. It can contain only two children, so use containers
to group them. The first one is odd, the second is even.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = True
def run(self):
self.assert_has_content()
node = OddEvenNode()
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
directives.register_directive("oddeven", OddEven)
|
{
"content_hash": "5f88bd016386768d86085b4b8001460d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 25.055555555555557,
"alnum_prop": 0.70509977827051,
"repo_name": "aquavitae/rst2pdf-py3-dev",
"id": "efabd033e9785ac428e0443f0c15f70090ba123e",
"size": "927",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rst2pdf/oddeven_directive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35249"
},
{
"name": "Makefile",
"bytes": "94343"
},
{
"name": "Python",
"bytes": "989625"
},
{
"name": "Shell",
"bytes": "1706"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import banyan_c
class RankUpdator(object):
"""
Updates nodes by the size of their subtree. Allows trees employing this to efficiently answer
queries such as "what is the k-th key?" and "what is the order of key 'a' in the
collection?".
Example:
>>> t = SortedSet(['hao', 'jiu', 'mei', 'jian'], updator = RankUpdator)
>>> t
SortedSet(['hao', 'jian', 'jiu', 'mei'])
>>>
>>> # 'hao' is item no. 0
>>> t.kth(0)
'hao'
>>> t.order('hao')
0
>>>
>>> # 'mei' is item no. 3
>>> t.kth(3)
'mei'
>>> t.order('mei')
3
"""
# Metadata appended to each node.
class Metadata(banyan_c.RankMetadata):
# Overwritten by C implementation
pass
def kth(self, k):
"""
:returns: k-th key
:raises: :py:exc:`IndexError` if k is too small (negative) or too large (exceeds the
order of the largest item).
"""
return self._rank_updator_kth(k)
def order(self, key):
"""
:returns: The order of key in the keys of the container.
"""
return self._rank_updator_order(key)
|
{
"content_hash": "a4d346609005fa04047beec5de8d2743",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 98,
"avg_line_length": 24.862745098039216,
"alnum_prop": 0.5110410094637224,
"repo_name": "pyannote/pyannote-banyan",
"id": "2d8f193395f89f447593f206469f78b2744258d0",
"size": "1268",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "banyan/_rank_updator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "324494"
},
{
"name": "Python",
"bytes": "185400"
}
],
"symlink_target": ""
}
|
from functools import wraps
from pycloudia.utils.defer import inline_callbacks, return_value, maybe_deferred
from pycloudia.cluster.consts import HEADER, STATUS
from pycloudia.cluster.interfaces import IRequestPackage
__all__ = [
'ResolverMeta',
'resolve_errors',
'resolve_method',
]
class ResolverMeta(type):
def __new__(mcs, name, bases, namespace):
cls = super(ResolverMeta, mcs).__new__(mcs, name, bases, namespace)
cls.exception_map = {}
for base in bases:
if hasattr(base, 'exception_map'):
cls.exception_map.update(base.exception_map)
cls.exception_map.update(dict(
(method.__exception_type__, method)
for method in namespace.values()
if hasattr(method, '__exception_type__')
))
return cls
def resolve(cls, exception, logger=None):
try:
method = cls.exception_map[type(exception)]
except KeyError:
if logger:
logger.exception(exception)
else:
raise exception
else:
return method.__exception_verbose__, method(exception)
class ResolverMethodDecorator(object):
def __init__(self, exception_type, verbose=None):
self.exception_type = exception_type
self.verbose = verbose or exception_type.__name__
def __call__(self, method):
method.__exception_type__ = self.exception_type
method.__exception_verbose__ = self.verbose
return method
class ResolverDecorator(object):
def __init__(self, resolver, logging=True):
self.resolver = resolver
self.logging = logging
def __call__(self, func):
@wraps(func)
@inline_callbacks
def decorator(subject, package, *args, **kwargs):
assert isinstance(package, IRequestPackage)
try:
response = yield maybe_deferred(func(subject, package, *args, **kwargs))
except Exception as e:
if self.logging:
verbose, content = self.resolver.resolve(e, subject.logger)
else:
verbose, content = self.resolver.resolve(e)
response = package.create_response(content, {
HEADER.STATUS: STATUS.FAILURE,
HEADER.REASON: verbose,
})
return_value(response)
return decorator
resolve_errors = ResolverDecorator
resolve_method = ResolverMethodDecorator
|
{
"content_hash": "87399d1a6c2b9c790239b4727675a349",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 88,
"avg_line_length": 31.5625,
"alnum_prop": 0.5952475247524752,
"repo_name": "cordis/pycloudia",
"id": "17672e293ca680ab131aefaa1b530b83bd639cf6",
"size": "2525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycloudia/cluster/resolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139347"
}
],
"symlink_target": ""
}
|
class Executor:
pass
|
{
"content_hash": "13a2fd90f69bdc371d7ed5c49a393215",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 15,
"avg_line_length": 12.5,
"alnum_prop": 0.68,
"repo_name": "edublancas/python-ds-tools",
"id": "4453f038090e20a73c337aa98f459c986c1ce608",
"size": "26",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/dstools/pipeline/executors/Executor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11675"
}
],
"symlink_target": ""
}
|
"""dyn.core is a utilities module for use internally within the dyn library
itself. Although it's possible to use this functionality outside of the dyn
library, it is not recommened and could possible result in some strange
behavior.
"""
import base64
import copy
import locale
import logging
import re
import threading
import time
from datetime import datetime
from . import __version__
from .compat import (HTTPConnection, HTTPSConnection, HTTPException, json,
prepare_to_send, force_unicode)
def cleared_class_dict(dict_obj):
"""Return a cleared dict of class attributes. The items cleared are any
fields which evaluate to None, and any methods
"""
return {x: dict_obj[x] for x in dict_obj if dict_obj[x] is not None and
not hasattr(dict_obj[x], '__call__')}
def clean_args(dict_obj):
"""Clean a dictionary of API arguments to prevent the display of plain text
passwords to users
:param dict_obj: The dictionary of arguments to be cleaned
"""
cleaned_args = copy.deepcopy(dict_obj)
if 'password' in cleaned_args:
cleaned_args['password'] = '*****'
return cleaned_args
class _Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
cur_thread = threading.current_thread()
key = getattr(cls, '__metakey__')
if key not in cls._instances:
cls._instances[key] = {
# super(Singleton, cls) evaluates to type; *args/**kwargs get
# passed to class __init__ method via type.__call__
cur_thread: super(_Singleton, cls).__call__(*args, **kwargs)
}
elif key in cls._instances and cur_thread not in cls._instances[key]:
cls._instances[key][cur_thread] = \
super(_Singleton, cls).__call__(*args, **kwargs)
return cls._instances[key][cur_thread]
# This class is a workaround for supporting metaclasses in both Python2 and 3
class Singleton(_Singleton('SingletonMeta', (object,), {})):
"""A :class:`~dyn.core.Singleton` type for implementing a true Singleton
design pattern, cleanly, using metaclasses
"""
pass
class _History(list):
"""A *list* subclass specifically targeted at being able to store the
history of calls made via a SessionEngine
"""
def append(self, p_object):
"""Override builtin list append operators to allow for the automatic
appendation of a timestamp for cleaner record keeping
"""
now_ts = datetime.now().isoformat()
super(_History, self).append(tuple([now_ts] + list(p_object)))
class SessionEngine(Singleton):
"""Base object representing a DynectSession Session"""
_valid_methods = tuple()
uri_root = '/'
def __init__(self, host=None, port=443, ssl=True, history=False,
proxy_host=None, proxy_port=None, proxy_user=None,
proxy_pass=None):
"""Initialize a Dynect Rest Session object and store the provided
credentials
:param host: DynECT API server address
:param port: Port to connect to DynECT API server
:param ssl: Enable SSL
:param history: A boolean flag determining whether or not you would
like to store a record of all API calls made to review later
:param proxy_host: A proxy host to utilize
:param proxy_port: The port that the proxy is served on
:param proxy_user: A username to connect to the proxy with if required
:param proxy_pass: A password to connect to the proxy with if required
:return: SessionEngine object
"""
super(SessionEngine, self).__init__()
self.__call_cache = _History() if history else None
self.extra_headers = dict()
self.logger = logging.getLogger(self.name)
self.host = host
self.port = port
self.ssl = ssl
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
self.poll_incomplete = True
self.content_type = 'application/json'
self._encoding = locale.getdefaultlocale()[-1] or 'UTF-8'
self._token = self._conn = self._last_response = None
self._permissions = None
self._tasks = {}
@classmethod
def new_session(cls, *args, **kwargs):
"""Return a new session instance, regardless of whether or not there is
already an existing session.
:param args: Arguments to be passed to the Singleton __call__ method
:param kwargs: keyword arguments to be passed to the Singleton __call__
method
"""
cur_thread = threading.current_thread()
key = getattr(cls, '__metakey__')
instance = cls._instances.get(key, {}).get(cur_thread, None)
if instance:
instance.close_session()
return cls.__call__(*args, **kwargs)
@classmethod
def get_session(cls):
"""Return the current session for this Session type or None if there is
not an active session
"""
cur_thread = threading.current_thread()
key = getattr(cls, '__metakey__')
return cls._instances.get(key, {}).get(cur_thread, None)
@classmethod
def close_session(cls):
"""Remove the current session from the dict of instances and return it.
If there was not currently a session being stored, return None. If,
after removing this session, there is nothing under the current key,
delete that key's entry in the _instances dict.
"""
cur_thread = threading.current_thread()
key = getattr(cls, '__metakey__')
closed = cls._instances.get(key, {}).pop(cur_thread, None)
if len(cls._instances.get(key, {})) == 0:
cls._instances.pop(key, None)
return closed
@property
def name(self):
"""A human readable version of the name of this object"""
return str(self.__class__).split('.')[-1][:-2]
def connect(self):
"""Establishes a connection to the REST API server as defined by the
host, port and ssl instance variables. If a proxy is specified, it
is used.
"""
if self._token:
self.logger.debug('Forcing logout from old session')
orig_value = self.poll_incomplete
self.poll_incomplete = False
self.execute('/REST/Session', 'DELETE')
self.poll_incomplete = orig_value
self._token = None
self._conn = None
use_proxy = False
headers = {}
if self.proxy_host and not self.proxy_port:
msg = 'Proxy missing port, please specify a port'
raise ValueError(msg)
if self.proxy_host and self.proxy_port:
use_proxy = True
if self.proxy_user and self.proxy_pass:
auth = '{}:{}'.format(self.proxy_user, self.proxy_pass)
headers['Proxy-Authorization'] = 'Basic ' + base64.b64encode(
auth)
if use_proxy:
if self.ssl:
s = 'Establishing SSL connection to {}:{} with proxy {}:{}'
msg = s.format(
self.host,
self.port,
self.proxy_host,
self.proxy_port)
self.logger.info(msg)
self._conn = HTTPSConnection(self.proxy_host, self.proxy_port,
timeout=300)
self._conn.set_tunnel(self.host, self.port, headers)
else:
s = ('Establishing unencrypted connection to {}:{} with proxy '
'{}:{}')
msg = s.format(
self.host,
self.port,
self.proxy_host,
self.proxy_port)
self.logger.info(msg)
self._conn = HTTPConnection(self.proxy_host, self.proxy_port,
timeout=300)
self._conn.set_tunnel(self.host, self.port, headers)
else:
if self.ssl:
msg = 'Establishing SSL connection to {}:{}'.format(self.host,
self.port)
self.logger.info(msg)
self._conn = HTTPSConnection(self.host, self.port,
timeout=300)
else:
msg = 'Establishing unencrypted connection to {}:{}'.format(
self.host,
self.port)
self.logger.info(msg)
self._conn = HTTPConnection(self.host, self.port,
timeout=300)
def _process_response(self, response, method, final=False):
"""API Method. Process an API response for failure, incomplete, or
success and throw any appropriate errors
:param response: the JSON response from the request being processed
:param method: the HTTP method
:param final: boolean flag representing whether or not to continue
polling
"""
return response
def _handle_error(self, uri, method, raw_args):
"""Handle the processing of a connection error with the api. Note, to be
implemented as needed in subclasses.
"""
return None
def _retry(self, msgs, final=False):
"""Retry logic around throttled or blocked tasks"""
throttle_err = 'RATE_LIMIT_EXCEEDED'
throttled = any(throttle_err == err['ERR_CD'] for err in msgs)
if throttled:
# We're rate limited, so wait 5 seconds and try again
return dict(retry=True, wait=5, final=final)
blocked_err = 'Operation blocked by current task'
blocked = any(blocked_err in err['INFO'] for err in msgs)
pat = re.compile(r'^task_id:\s+(\d+)$')
if blocked:
try:
# Get the task id
task = next(pat.match(i['INFO']).group(1) for i in msgs
if pat.match(i.get('INFO', '')))
except:
# Task id could not be recovered
wait = 1
else:
# Exponential backoff for individual blocked tasks
wait = self._tasks.get(task, 1)
self._tasks[task] = wait * 2 + 1
# Give up if final or wait > 30 seconds
return dict(retry=True, wait=wait, final=wait > 30 or final)
# Neither blocked nor throttled?
return dict(retry=False, wait=0, final=True)
def _handle_response(self, response, uri, method, raw_args, final):
"""Handle the processing of the API's response"""
body = response.read()
self.logger.debug('RESPONSE: {0}'.format(body))
self._last_response = response
if self.poll_incomplete:
response, body = self.poll_response(response, body)
self._last_response = response
if not body:
err_msg_fmt = "Received Empty Response: {!r} status: {!r} {!r}"
error_message = err_msg_fmt.format(body, response.status, uri)
self.logger.error(error_message)
raise ValueError(error_message)
json_err_fmt = "Decode Error on Response Body: {!r} status: {!r} {!r}"
try:
ret_val = json.loads(body.decode('UTF-8'))
except ValueError:
self.logger.error(json_err_fmt.format(body, response.status, uri))
raise
if self.__call_cache is not None:
self.__call_cache.append((uri, method, clean_args(raw_args),
ret_val['status']))
self._meta_update(uri, method, ret_val)
retry = {}
# Try to retry?
if ret_val['status'] == 'failure' and not final:
retry = self._retry(ret_val['msgs'], final)
if retry.get('retry', False):
time.sleep(retry['wait'])
return self.execute(uri, method, raw_args, final=retry['final'])
else:
return self._process_response(ret_val, method)
def _validate_uri(self, uri):
"""Validate and return a cleaned up uri. Make sure the command is
prefixed by '/REST/'
"""
if not uri.startswith('/'):
uri = '/' + uri
if not uri.startswith(self.uri_root):
uri = self.uri_root + uri
return uri
def _validate_method(self, method):
"""Validate the provided HTTP method type"""
if method.upper() not in self._valid_methods:
msg = '{} is not a valid HTTP method. Please use one of {}'
msg = msg.format(method, ', '.join(self._valid_methods))
raise ValueError(msg)
def _prepare_arguments(self, args, method, uri):
"""Prepare the arguments to be sent off to the API"""
if args is None:
args = {}
if not isinstance(args, dict):
# If args is an object type, parse it's dict for valid args
# If an item in args.__dict__ has a _json attribute, use that in
# place of the actual object
d = args.__dict__
args = {(x if not x.startswith('_') else x[1:]):
(d[x] if not hasattr(d[x], '_json') else getattr(d[x],
'_json'))
for x in d if d[x] is not None and
not hasattr(d[x], '__call__') and x.startswith('_')}
return args, json.dumps(args), uri
def execute(self, uri, method, args=None, final=False):
"""Execute a commands against the rest server
:param uri: The uri of the resource to access. /REST/ will be prepended
if it is not at the beginning of the uri
:param method: One of 'DELETE', 'GET', 'POST', or 'PUT'
:param args: Any arguments to be sent as a part of the request
:param final: boolean flag representing whether or not we have already
failed executing once or not
"""
if self._conn is None:
self.connect()
uri = self._validate_uri(uri)
# Make sure the method is valid
self._validate_method(method)
# Prepare arguments to send to API
raw_args, args, uri = self._prepare_arguments(args, method, uri)
msg = 'uri: {}, method: {}, args: {}'
self.logger.debug(
msg.format(uri, method, clean_args(json.loads(args))))
# Send the command and deal with results
self.send_command(uri, method, args)
# Deal with the results
try:
response = self._conn.getresponse()
except (IOError, HTTPException) as e:
if final:
raise e
else:
# Handle processing a connection error
resp = self._handle_error(uri, method, raw_args)
# If we got a valid response back from our _handle_error call
# Then return it, otherwise raise the original exception
if resp is not None:
return resp
raise e
return self._handle_response(response, uri, method, raw_args, final)
def _meta_update(self, uri, method, results):
"""Update the HTTP session token if the uri is a login or logout
:param uri: the uri from the call being updated
:param method: the api method
:param results: the JSON results
"""
# If we had a successful log in, update the token
if uri.startswith('/REST/Session') and method == 'POST':
if results['status'] == 'success':
self._token = results['data']['token']
# Otherwise, if it's a successful logout, blank the token
if uri.startswith('/REST/Session') and method == 'DELETE':
if results['status'] == 'success':
self._token = None
def poll_response(self, response, body):
"""Looks at a response from a REST command, and while indicates that
the job is incomplete, poll for response
:param response: the JSON response containing return codes
:param body: the body of the HTTP response
"""
while response.status == 307:
time.sleep(1)
uri = response.getheader('Location')
self.logger.info('Polling {}'.format(uri))
self.send_command(uri, 'GET', '')
response = self._conn.getresponse()
body = response.read()
return response, body
def send_command(self, uri, method, args):
"""Responsible for packaging up the API request and sending it to the
server over the established connection
:param uri: The uri of the resource to interact with
:param method: The HTTP method to use
:param args: Encoded arguments to send to the server
"""
self._conn.putrequest(method, uri)
# Build headers
user_agent = 'dyn-py v{}'.format(__version__)
headers = {'Content-Type': self.content_type, 'User-Agent': user_agent}
for key, val in self.extra_headers.items():
headers[key] = val
if self._token is not None:
headers['Auth-Token'] = self._token
for key, val in headers.items():
self._conn.putheader(key, val)
# Now the arguments
self._conn.putheader('Content-length', '%d' % len(args))
self._conn.endheaders()
self._conn.send(prepare_to_send(args))
def wait_for_job_to_complete(self, job_id, timeout=120):
"""When a response comes back with a status of "incomplete" we need to
wait and poll for the status of that job until it comes back with
success or failure
:param job_id: the id of the job to poll for a response from
:param timeout: how long (in seconds) we should wait for a valid
response before giving up on this request
"""
self.logger.debug('Polling for job_id: {}'.format(job_id))
start = datetime.now()
uri = '/Job/{}/'.format(job_id)
api_args = {}
# response = self.execute(uri, 'GET', api_args)
response = {'status': 'incomplete'}
now = datetime.now()
self.logger.warn('Waiting for job {}'.format(job_id))
too_long = (now - start).seconds < timeout
while response['status'] is 'incomplete' and too_long:
time.sleep(10)
response = self.execute(uri, 'GET', api_args)
return response
def __getstate__(cls):
"""Because HTTP/HTTPS connections are not serializeable, we need to
strip the connection instance out before we ship the pickled data
"""
d = cls.__dict__.copy()
d.pop('_conn')
return d
def __setstate__(cls, state):
"""Because the HTTP/HTTPS connection was stripped out in __getstate__ we
must manually re-enter it as None and let the sessions execute method
handle rebuilding it later
"""
cls.__dict__ = state
cls.__dict__['_conn'] = None
def __str__(self):
"""str override"""
return force_unicode('<{}>').format(self.name)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
@property
def history(self):
"""A history of all API calls that have been made during the duration
of this Session's existence. These API call details are returned as a
*list* of 5-tuples of the form: (timestamp, uri, method, args, status)
where status will be one of 'success' or 'failure'
"""
return self.__call_cache
|
{
"content_hash": "210ad18424eb6de5ad31cf5873cbcd8f",
"timestamp": "",
"source": "github",
"line_count": 518,
"max_line_length": 80,
"avg_line_length": 38.488416988416986,
"alnum_prop": 0.5694437478055876,
"repo_name": "dyninc/dyn-python",
"id": "0e12036a881741c4387d171f17c2b11463563982",
"size": "19961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dyn/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "474"
},
{
"name": "Python",
"bytes": "709156"
}
],
"symlink_target": ""
}
|
'''Crontab generator for Citation Hunt.
This outputs the schedule of database update jobs for all languages.
The runs for different languages are spread out evenly within a given day, and
the runs for a given language are spread out evenly across the days of the
month.
'''
import os
import sys
_upper_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
if _upper_dir not in sys.path:
sys.path.append(_upper_dir)
import config
freq = 4 # how many days between runs, for each language
duration = 4 # how many hours between runs within a single day
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/
cronjob_template = '''
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: citationhunt-update-{name}
labels:
name: citationhunt.update-{name}
# The toolforge=tool label will cause $HOME and other paths to be mounted from Toolforge
toolforge: tool
spec:
schedule: "0 {h} {dom}-31/{freq} * *"
jobTemplate:
spec:
activeDeadlineSeconds: 86400
template:
metadata:
labels:
toolforge: tool
spec:
containers:
- name: ch
workingDir: /data/project/citationhunt
image: docker-registry.tools.wmflabs.org/toolforge-python37-sssd-base:latest
args: [
/data/project/citationhunt/www/python/venv/bin/python3,
/data/project/citationhunt/citationhunt/scripts/update_db_tools_labs.py,
{lc}
]
resources:
limits:
memory: "4Gi"
requests:
memory: "1Gi"
restartPolicy: Never
concurrencyPolicy: Replace
'''
h = 0
for lc in sorted(config.LANG_CODES_TO_LANG_NAMES):
print(cronjob_template.format(
lc = lc, name = lc.replace('_', '-'),
h=(h % 24), dom=1 + (h // 24), freq=freq))
h += duration
|
{
"content_hash": "e68c2ea3bcec49eddaae1b6162d613d4",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 92,
"avg_line_length": 29.060606060606062,
"alnum_prop": 0.6282586027111574,
"repo_name": "eggpi/citationhunt",
"id": "552cf76743e47a5e0c33bebc3d4da9ecee03432b",
"size": "1942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k8s/crontab.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13227"
},
{
"name": "HTML",
"bytes": "16959"
},
{
"name": "JavaScript",
"bytes": "112497"
},
{
"name": "Python",
"bytes": "193620"
},
{
"name": "Shell",
"bytes": "861"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="tickformat",
parent_name="choroplethmapbox.colorbar",
**kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "7669b72b701cdd1b376f3b1cb407fbb2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 30.11764705882353,
"alnum_prop": 0.5859375,
"repo_name": "plotly/python-api",
"id": "0e0b1a49f559f2f32a2026341d8c6cc29b355b5c",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choroplethmapbox/colorbar/_tickformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from modeltranslation.admin import TranslationAdmin
from paperclip.models import Attachment
from .models import FileType, Organism, Theme, RecordSource
class OrganismAdmin(admin.ModelAdmin):
list_display = ('organism', 'structure')
search_fields = ('organism', 'structure')
list_filter = ('structure',)
class FileTypeAdmin(admin.ModelAdmin):
list_display = ('type', 'structure')
search_fields = ('type', 'structure')
list_filter = ('structure',)
class MapEntityContentTypeFilter(admin.SimpleListFilter):
title = _('content type')
parameter_name = 'content_type'
def lookups(self, request, model_admin):
from mapentity import registry
values = []
for model, entity in registry.registry.items():
content_type = model.get_content_type_id()
values.append((content_type, entity.label))
return tuple(values)
def queryset(self, request, queryset):
if self.value():
return queryset.filter(content_type=self.value())
class AttachmentAdmin(admin.ModelAdmin):
date_hierarchy = 'date_update'
search_fields = ('title', 'legend', 'author')
list_display = ('title', 'legend', 'author', 'content_type')
list_filter = ('filetype', MapEntityContentTypeFilter)
readonly_fields = ('content_type', 'object_id', 'creator', 'title')
def has_add_permission(self, request):
""" Do not add from Adminsite.
"""
return False
class ThemeAdmin(TranslationAdmin):
list_display = ('label', 'cirkwi', 'pictogram_img')
search_fields = ('label',)
class RecordSourceAdmin(admin.ModelAdmin):
list_display = ('name', 'structure', 'pictogram_img')
search_fields = ('name', 'structure')
list_filter = ('structure',)
admin.site.register(Organism, OrganismAdmin)
admin.site.register(Attachment, AttachmentAdmin)
admin.site.register(FileType, FileTypeAdmin)
admin.site.register(Theme, ThemeAdmin)
admin.site.register(RecordSource, RecordSourceAdmin)
|
{
"content_hash": "eee19d43e86ea80566d43c9210ef5563",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 71,
"avg_line_length": 31.78787878787879,
"alnum_prop": 0.6882745471877979,
"repo_name": "mabhub/Geotrek",
"id": "73d6cb1e3a36f59f91e9508b538619b279a21543",
"size": "2098",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "geotrek/common/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "5981"
},
{
"name": "HTML",
"bytes": "88887"
},
{
"name": "JavaScript",
"bytes": "183605"
},
{
"name": "Makefile",
"bytes": "4038"
},
{
"name": "PLpgSQL",
"bytes": "73761"
},
{
"name": "Python",
"bytes": "2252325"
},
{
"name": "Shell",
"bytes": "16247"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.