hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f0f391b40b6e4cd93087c650971f8c404f520a2 | 2,726 | py | Python | src/estimation/extendedCV.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | null | null | null | src/estimation/extendedCV.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | 2 | 2020-08-06T10:01:59.000Z | 2021-05-17T12:14:44.000Z | src/estimation/extendedCV.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | 2 | 2020-08-19T06:52:09.000Z | 2021-12-10T08:57:54.000Z | from pathlib import Path
import numpy as np
import pandas as pd
import pickle
import statsmodels.api as sm
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
import lightgbm as lgb
from sklearn.linear_model import LogisticRegression, LinearRegression
from standard import getdf, get_dependent_var
from extended import data_general, prepare_classifier, prepare_regressor
###############################################################################
# dir = Path(__file__).resolve().parents[2]
dir = Path("/home/christian/dynasim/")
input_path = dir / "input"
model_path = dir / "src/estimation/modelsCV/"
###############################################################################
def _estimate(dataf, dep_var, type):
dataf = dataf.copy()
dataf = data_general(dataf, dep_var)
dataf.dropna(inplace=True)
if type == 'regression':
dici = prepare_regressor(dataf, dep_var)
# pickle.dump(dici['y_scaler'],
# open(model_path / str(dep_var + "_y_scaler_multi"), 'wb'))
estimator = lgb.LGBMRegressor(num_leaves = 31)
elif type == 'binary':
dici = prepare_classifier(dataf)
estimator = lgb.LGBMClassifier(num_leaves = 31)
else:
dici = prepare_classifier(dataf)
estimator = lgb.LGBMClassifier(num_leaves = 31)
modl = estimator.fit(dici['X_train'], dici['y_train'],
eval_set=[(dici['X_test'], dici['y_test'])],
feature_name = dici['features'],
early_stopping_rounds = 5)
param_grid = {
'learning_rate': np.linspace(0.1, 1, 4),
'n_estimators': [100, 200, 300],
'boosting_type': ['gbdt', 'rf', 'dart'],
'feature_fraction': [0.9],
'bagging_fraction': [0.8]
}
cv_modl = GridSearchCV(modl, param_grid, cv=3, verbose=2,n_jobs=6)
cv_modl.fit(dici['X_train'], dici['y_train'])
# Make directory if it doesn't exist yet
Path(model_path / dep_var).mkdir(parents=True, exist_ok=True)
result = cv_modl.best_estimator_
result.booster_.save_model(str(model_path / dep_var / "_extended.txt"))
pickle.dump(dici['X_scaler'],
open(model_path / dep_var / "_X_scaler_multi", 'wb'))
###############################################################################
if __name__ == "__main__":
df = pd.read_pickle(input_path / 'merged').dropna()
df1 = getdf(df)
_estimate(df1, "birth", "binary")
_estimate(df1, "employment_status", "multiclass")
_estimate(df1[df1["working"]==1], "hours", "regression")
_estimate(df1[df1["working"]==1], "gross_earnings", "regression")
| 32.452381 | 80 | 0.596478 |
62202cbf94eab7ef7aa6bede7f6c779ab244e720 | 1,584 | py | Python | python/Strong_Password.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/Strong_Password.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/Strong_Password.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | 1 | 2020-08-29T17:12:52.000Z | 2020-08-29T17:12:52.000Z | '''
https://www.hackerrank.com/challenges/strong-password/submissions/code/103472161
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the minimumNumber function below.
def minimumNumber(n, password):
# Return the minimum number of characters to make the password strong
# try the one in discussion instead
# very concise with any () below
count = 0
if any(i.isdigit() for i in password)==False:
count+=1
if any(i.islower() for i in password)==False:
count+=1
if any(i.isupper() for i in password)==False:
count+=1
if any(i in '!@#$%^&*()-+' for i in password)==False:
count+=1
return max(count,6-n)
''' wrong below because matching ALL in substring, just need to match 1 inside substring!
count = [0 for i in range(5)]
temp = 0
n = len(password)
if n < 6:
count[0] = 6 - n
elif password.find("0123456789") == -1:
count[1] = 1
elif password.find("abcdefghijklmnopqrstuvwxyz") == -1:
count[2] = 1
elif password.find("ABCDEFGHIJKLMNOPQRSTUVWXYZ") == -1:
count[3] = 1
elif password.find("!@#$%^&*()-+") == -1:
count[4] = 1
for i in range(1,5):
temp += count[i]
if temp > max(count):
return temp
else:
return max(count)
'''
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
password = input()
answer = minimumNumber(n, password)
fptr.write(str(answer) + '\n')
fptr.close()
| 22.309859 | 93 | 0.587121 |
5f67d64458e477f962010840ae3c50069311fafd | 1,593 | py | Python | listening-test-server/handlers/administration/template_handler.py | MirageJian/listening-test | d387aa0a4f6b8304f74dc1c4117787607494e8f2 | [
"MIT"
] | 7 | 2020-09-15T09:20:30.000Z | 2022-03-24T23:15:39.000Z | listening-test-server/handlers/administration/template_handler.py | MirageJian/listening-test | d387aa0a4f6b8304f74dc1c4117787607494e8f2 | [
"MIT"
] | 5 | 2021-01-20T22:29:22.000Z | 2022-02-28T03:24:04.000Z | listening-test-server/handlers/administration/template_handler.py | MirageJian/listening-test | d387aa0a4f6b8304f74dc1c4117787607494e8f2 | [
"MIT"
] | 3 | 2020-08-26T16:50:47.000Z | 2020-11-06T22:33:37.000Z | from handlers.base import BaseHandler
from handlers.miscellanea.task_name_mapping import switch_task_collection
class TemplateHandler(BaseHandler):
# Get all templates based on test type
async def get(self):
test_type = self.get_argument('testType')
collection = switch_task_collection(self, test_type)
data = collection.aggregate([
{'$match': {'isTemplate': True}},
{'$lookup': {'from': 'users', 'localField': 'userId', 'foreignField': '_id', 'as': 'creator'}},
{'$set': {'creator': {'$arrayElemAt': ['$creator', 0]}}},
{'$project': {'items': 0, 'description': 0, 'settings': 0, 'creator.permissions': 0,
'creator.password': 0, 'creator.policy': 0, 'creator.createdAt': 0}},
{'$sort': {'createdAt': -1}}
])
self.dumps_write(data)
# Make a test a template
async def put(self):
# Get user and check the permissions
self.user_id = await self.auth_current_user('Template')
test_type = self.get_argument('testType')
# Get collection and request data
body = self.loads_body()
collection = switch_task_collection(self, test_type)
# Find the test and update
data = collection.find_one({'_id': body['_id']})
if 'isTemplate' not in data:
data['isTemplate'] = True
else:
data['isTemplate'] = not data['isTemplate']
collection.update_one({'_id': data['_id']}, {'$set': data})
# Write result
self.dumps_write(data['isTemplate'])
| 39.825 | 107 | 0.594476 |
131430567350f002a3415d33ac931172460459f4 | 7,853 | py | Python | sandbox/lib/jumpscale/JumpScale9Lib/clients/google_compute/GoogleCompute.py | Jumpscale/sandbox_linux | 2aacd36b467ef30ac83718abfa82c6883b67a02f | [
"Apache-2.0"
] | 2 | 2017-06-07T08:11:47.000Z | 2017-11-10T02:19:48.000Z | JumpScale9Lib/clients/google_compute/GoogleCompute.py | Jumpscale/lib9 | 82224784ef2a7071faeb48349007211c367bc673 | [
"Apache-2.0"
] | 188 | 2017-06-21T06:16:13.000Z | 2020-06-17T14:20:24.000Z | sandbox/lib/jumpscale/JumpScale9Lib/clients/google_compute/GoogleCompute.py | Jumpscale/sandbox_linux | 2aacd36b467ef30ac83718abfa82c6883b67a02f | [
"Apache-2.0"
] | 3 | 2018-06-12T05:18:28.000Z | 2019-09-24T06:49:17.000Z | from js9 import j
from pprint import pprint
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
# https://cloud.google.com/compute/docs/reference/latest/instances/list
JSBASE = j.application.jsbase_get_class()
class GoogleCompute(JSBASE):
def __init__(self):
self.__jslocation__ = "j.clients.google_compute"
JSBASE.__init__(self)
self.zone = 'us-east1-b'
self.projectName = 'constant-carver-655'
self.credentials = None
self.service = None
self._projects = None
self._instances = None
self._images = {}
def init(self, zone=None, projectName=None):
if zone is not None:
self.zone = zone
if projectName is not None:
self.projectName = projectName
self.credentials = GoogleCredentials.get_application_default()
self.service = discovery.build(
'compute', 'v1', credentials=self.credentials)
@property
def project(self):
if self._instances is None:
request = self.service.projects().get(project=self.projectName)
response = request.execute()
self._instances = response
return self._instances
def instances_list(self):
request = self.service.instances().list(
project=self.projectName, zone=self.zone)
res = []
while request is not None:
response = request.execute()
if not "items" in response:
return []
for instance in response['items']:
# pprint(instance)
res.append(instance)
request = self.service.instances().list_next(
previous_request=request, previous_response=response)
return res
def images_list(self):
"""
list private ! images
"""
request = self.service.images().list(project=self.projectName)
res = []
while request is not None:
response = request.execute()
if not "items" in response:
return []
for image in response['items']:
res.append(image)
pprint(image)
request = self.service.images().list_next(
previous_request=request, previous_response=response)
return res
@property
def images_ubuntu(self):
"""https://cloud.google.com/compute/docs/images"""
if "ubuntu" not in self._images:
res = []
for family in ["ubuntu-1604-lts", "ubuntu-1704"]:
image_response = self.service.images().getFromFamily(
project='ubuntu-os-cloud', family=family).execute()
res.append(image_response['selfLink'])
self._images["ubuntu"] = res
return self._images["ubuntu"]
def imageurl_get(self, name="ubuntu - 1604"):
for item in self.images_ubuntu:
if item.lower().find("ubuntu-1604") is not -1:
return item
raise RuntimeError("did not find image: %s" % name)
def instance_create(self, name="builder", machineType="n1-standard-1", osType="ubuntu-1604", startupScript="", storageBucket="", sshkeyname=''):
"""
@param sshkeyname is your name for your ssh key, if not specified will use your preferred key from j.core.config["ssh"]["sshkeyname"]
"""
source_disk_image = self.imageurl_get()
# Configure the machine
machine_type = "zones/%s/machineTypes/%s" % (self.zone, machineType)
config = {
'name': name,
'machineType': machine_type,
# Specify the boot disk and the image to use as a source.
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}],
# Allow the instance to access cloud storage and logging.
'serviceAccounts': [{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write'
]
}],
# Metadata is readable from the instance and allows you to
# pass configuration from deployment scripts to instances.
'metadata': {
'items': [{
'key': 'ssh-keys',
'value': sshkeys
},
{
# Startup script is automatically executed by the
# instance upon startup.
'key': 'startup-script',
'value': startupScript
}, {
'key': 'bucket',
'value': storageBucket
}]
}
}
self.logger.debug(config)
res = self.service.instances().insert(project=self.projectName,
zone=self.zone, body=config).execute()
return res
def add_sshkey(self, machinename, username, keyname):
"""
instance: instance name
username: a username for that key
key: the pub key you want to allow (is name of key on your system, needs to be loaded in ssh-agent)
@TODO: *1 I am sure a key can be loaded for all vmachines which will be created, not just for this 1
@TODO: *1 what does instance mean? is that the name?
"""
# get pub key from local FS
keypath = j.clients.ssh.sshkey_path_get("kds")
key = j.sal.fs.readFile(keypath + ".pub")
# get old instance metadata
request = self.service.instances().get(
zone=self.zone, project=self.projectName, instance=instance)
res = request.execute()
metadata = res.get('metadata', {})
# add the key
items = metadata.get('items', [])
for item in items:
if item['key'] == 'ssh-keys':
item['value'] = '{} \n{}:{}'.format(
item['value'], username, key)
break
else:
items.append(
{'key': 'ssh-keys', 'value': '{}:{}'.format(username, key)})
# Set instance metadata
metadata["items"] = items
request = self.service.instances().setMetadata(
zone=self.zone, project=self.projectName, instance=instance, body=metadata)
request.execute()
# TODO:*1 we need to check for duplicates
def machinetypes_list(self):
request = self.service.machineTypes().list(
project=self.projectName, zone=self.zone)
res = []
while request is not None:
response = request.execute()
if not "items" in response:
return []
for instance in response['items']:
# pprint(instance)
res.append(instance)
request = self.service.instances().list_next(
previous_request=request, previous_response=response)
return res
@property
def sshkeys(self):
self.project
res = []
for item in self.project["commonInstanceMetadata"]["items"]:
if item["key"] == "sshKeys":
res.append(item["value"])
return res
| 36.022936 | 148 | 0.540048 |
2869b6b1f8afe68b19b6beff7df3d879432f81bf | 71,676 | py | Python | vendor-local/src/httplib2/python2/httplib2test.py | Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803 | 4e374b4d52dfb9039ebe543e7f27682189022307 | [
"BSD-3-Clause"
] | 2 | 2015-04-06T15:20:29.000Z | 2016-12-30T12:25:11.000Z | vendor-local/src/httplib2/python2/httplib2test.py | Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803 | 4e374b4d52dfb9039ebe543e7f27682189022307 | [
"BSD-3-Clause"
] | 2 | 2019-02-17T17:38:02.000Z | 2019-03-28T03:49:16.000Z | vendor-local/src/httplib2/python2/httplib2test.py | Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803 | 4e374b4d52dfb9039ebe543e7f27682189022307 | [
"BSD-3-Clause"
] | 1 | 2019-03-28T03:49:18.000Z | 2019-03-28T03:49:18.000Z | #!/usr/bin/env python2.4
"""
httplib2test
A set of unit tests for httplib2.py.
Requires Python 2.4 or later
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__license__ = "MIT"
__history__ = """ """
__version__ = "0.1 ($Rev: 118 $)"
import StringIO
import base64
import httplib
import httplib2
import os
import socket
import sys
import time
import unittest
import urlparse
try:
import ssl
except ImportError:
pass
# Python 2.3 support
if not hasattr(unittest.TestCase, 'assertTrue'):
unittest.TestCase.assertTrue = unittest.TestCase.failUnless
unittest.TestCase.assertFalse = unittest.TestCase.failIf
# The test resources base uri
base = 'http://bitworking.org/projects/httplib2/test/'
#base = 'http://localhost/projects/httplib2/test/'
cacheDirName = ".cache"
class CredentialsTest(unittest.TestCase):
def test(self):
c = httplib2.Credentials()
c.add("joe", "password")
self.assertEqual(("joe", "password"), list(c.iter("bitworking.org"))[0])
self.assertEqual(("joe", "password"), list(c.iter(""))[0])
c.add("fred", "password2", "wellformedweb.org")
self.assertEqual(("joe", "password"), list(c.iter("bitworking.org"))[0])
self.assertEqual(1, len(list(c.iter("bitworking.org"))))
self.assertEqual(2, len(list(c.iter("wellformedweb.org"))))
self.assertTrue(("fred", "password2") in list(c.iter("wellformedweb.org")))
c.clear()
self.assertEqual(0, len(list(c.iter("bitworking.org"))))
c.add("fred", "password2", "wellformedweb.org")
self.assertTrue(("fred", "password2") in list(c.iter("wellformedweb.org")))
self.assertEqual(0, len(list(c.iter("bitworking.org"))))
self.assertEqual(0, len(list(c.iter(""))))
class ParserTest(unittest.TestCase):
def testFromStd66(self):
self.assertEqual( ('http', 'example.com', '', None, None ), httplib2.parse_uri("http://example.com"))
self.assertEqual( ('https', 'example.com', '', None, None ), httplib2.parse_uri("https://example.com"))
self.assertEqual( ('https', 'example.com:8080', '', None, None ), httplib2.parse_uri("https://example.com:8080"))
self.assertEqual( ('http', 'example.com', '/', None, None ), httplib2.parse_uri("http://example.com/"))
self.assertEqual( ('http', 'example.com', '/path', None, None ), httplib2.parse_uri("http://example.com/path"))
self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', None ), httplib2.parse_uri("http://example.com/path?a=1&b=2"))
self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', 'fred' ), httplib2.parse_uri("http://example.com/path?a=1&b=2#fred"))
self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', 'fred' ), httplib2.parse_uri("http://example.com/path?a=1&b=2#fred"))
class UrlNormTest(unittest.TestCase):
def test(self):
self.assertEqual( "http://example.org/", httplib2.urlnorm("http://example.org")[-1])
self.assertEqual( "http://example.org/", httplib2.urlnorm("http://EXAMple.org")[-1])
self.assertEqual( "http://example.org/?=b", httplib2.urlnorm("http://EXAMple.org?=b")[-1])
self.assertEqual( "http://example.org/mypath?a=b", httplib2.urlnorm("http://EXAMple.org/mypath?a=b")[-1])
self.assertEqual( "http://localhost:80/", httplib2.urlnorm("http://localhost:80")[-1])
self.assertEqual( httplib2.urlnorm("http://localhost:80/"), httplib2.urlnorm("HTTP://LOCALHOST:80"))
try:
httplib2.urlnorm("/")
self.fail("Non-absolute URIs should raise an exception")
except httplib2.RelativeURIError:
pass
class UrlSafenameTest(unittest.TestCase):
def test(self):
# Test that different URIs end up generating different safe names
self.assertEqual( "example.org,fred,a=b,58489f63a7a83c3b7794a6a398ee8b1f", httplib2.safename("http://example.org/fred/?a=b"))
self.assertEqual( "example.org,fred,a=b,8c5946d56fec453071f43329ff0be46b", httplib2.safename("http://example.org/fred?/a=b"))
self.assertEqual( "www.example.org,fred,a=b,499c44b8d844a011b67ea2c015116968", httplib2.safename("http://www.example.org/fred?/a=b"))
self.assertEqual( httplib2.safename(httplib2.urlnorm("http://www")[-1]), httplib2.safename(httplib2.urlnorm("http://WWW")[-1]))
self.assertEqual( "www.example.org,fred,a=b,692e843a333484ce0095b070497ab45d", httplib2.safename("https://www.example.org/fred?/a=b"))
self.assertNotEqual( httplib2.safename("http://www"), httplib2.safename("https://www"))
# Test the max length limits
uri = "http://" + ("w" * 200) + ".org"
uri2 = "http://" + ("w" * 201) + ".org"
self.assertNotEqual( httplib2.safename(uri2), httplib2.safename(uri))
# Max length should be 200 + 1 (",") + 32
self.assertEqual(233, len(httplib2.safename(uri2)))
self.assertEqual(233, len(httplib2.safename(uri)))
# Unicode
if sys.version_info >= (2,3):
self.assertEqual( "xn--http,-4y1d.org,fred,a=b,579924c35db315e5a32e3d9963388193", httplib2.safename(u"http://\u2304.org/fred/?a=b"))
class _MyResponse(StringIO.StringIO):
def __init__(self, body, **kwargs):
StringIO.StringIO.__init__(self, body)
self.headers = kwargs
def iteritems(self):
return self.headers.iteritems()
class _MyHTTPConnection(object):
"This class is just a mock of httplib.HTTPConnection used for testing"
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
self.host = host
self.port = port
self.timeout = timeout
self.log = ""
self.sock = None
def set_debuglevel(self, level):
pass
def connect(self):
"Connect to a host on a given port."
pass
def close(self):
pass
def request(self, method, request_uri, body, headers):
pass
def getresponse(self):
return _MyResponse("the body", status="200")
class HttpTest(unittest.TestCase):
def setUp(self):
if os.path.exists(cacheDirName):
[os.remove(os.path.join(cacheDirName, file)) for file in os.listdir(cacheDirName)]
if sys.version_info < (2, 6):
disable_cert_validation = True
else:
disable_cert_validation = False
self.http = httplib2.Http(
cacheDirName,
disable_ssl_certificate_validation=disable_cert_validation)
self.http.clear_credentials()
def testIPv6NoSSL(self):
try:
self.http.request("http://[::1]/")
except socket.gaierror:
self.fail("should get the address family right for IPv6")
except socket.error:
# Even if IPv6 isn't installed on a machine it should just raise socket.error
pass
def testIPv6SSL(self):
try:
self.http.request("https://[::1]/")
except socket.gaierror:
self.fail("should get the address family right for IPv6")
except socket.error:
# Even if IPv6 isn't installed on a machine it should just raise socket.error
pass
def testConnectionType(self):
self.http.force_exception_to_status_code = False
response, content = self.http.request("http://bitworking.org", connection_type=_MyHTTPConnection)
self.assertEqual(response['content-location'], "http://bitworking.org")
self.assertEqual(content, "the body")
def testGetUnknownServer(self):
self.http.force_exception_to_status_code = False
try:
self.http.request("http://fred.bitworking.org/")
self.fail("An httplib2.ServerNotFoundError Exception must be thrown on an unresolvable server.")
except httplib2.ServerNotFoundError:
pass
# Now test with exceptions turned off
self.http.force_exception_to_status_code = True
(response, content) = self.http.request("http://fred.bitworking.org/")
self.assertEqual(response['content-type'], 'text/plain')
self.assertTrue(content.startswith("Unable to find"))
self.assertEqual(response.status, 400)
def testGetConnectionRefused(self):
self.http.force_exception_to_status_code = False
try:
self.http.request("http://localhost:7777/")
self.fail("An socket.error exception must be thrown on Connection Refused.")
except socket.error:
pass
# Now test with exceptions turned off
self.http.force_exception_to_status_code = True
(response, content) = self.http.request("http://localhost:7777/")
self.assertEqual(response['content-type'], 'text/plain')
self.assertTrue("Connection refused" in content)
self.assertEqual(response.status, 400)
def testGetIRI(self):
if sys.version_info >= (2,3):
uri = urlparse.urljoin(base, u"reflector/reflector.cgi?d=\N{CYRILLIC CAPITAL LETTER DJE}")
(response, content) = self.http.request(uri, "GET")
d = self.reflector(content)
self.assertTrue(d.has_key('QUERY_STRING'))
self.assertTrue(d['QUERY_STRING'].find('%D0%82') > 0)
def testGetIsDefaultMethod(self):
# Test that GET is the default method
uri = urlparse.urljoin(base, "methods/method_reflector.cgi")
(response, content) = self.http.request(uri)
self.assertEqual(response['x-method'], "GET")
def testDifferentMethods(self):
# Test that all methods can be used
uri = urlparse.urljoin(base, "methods/method_reflector.cgi")
for method in ["GET", "PUT", "DELETE", "POST"]:
(response, content) = self.http.request(uri, method, body=" ")
self.assertEqual(response['x-method'], method)
def testHeadRead(self):
# Test that we don't try to read the response of a HEAD request
# since httplib blocks response.read() for HEAD requests.
# Oddly enough this doesn't appear as a problem when doing HEAD requests
# against Apache servers.
uri = "http://www.google.com/"
(response, content) = self.http.request(uri, "HEAD")
self.assertEqual(response.status, 200)
self.assertEqual(content, "")
def testGetNoCache(self):
# Test that can do a GET w/o the cache turned on.
http = httplib2.Http()
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.previous, None)
def testGetOnlyIfCachedCacheHit(self):
# Test that can do a GET with cache and 'only-if-cached'
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = self.http.request(uri, "GET")
(response, content) = self.http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})
self.assertEqual(response.fromcache, True)
self.assertEqual(response.status, 200)
def testGetOnlyIfCachedCacheMiss(self):
# Test that can do a GET with no cache with 'only-if-cached'
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = self.http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})
self.assertEqual(response.fromcache, False)
self.assertEqual(response.status, 504)
def testGetOnlyIfCachedNoCacheAtAll(self):
# Test that can do a GET with no cache with 'only-if-cached'
# Of course, there might be an intermediary beyond us
# that responds to the 'only-if-cached', so this
# test can't really be guaranteed to pass.
http = httplib2.Http()
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})
self.assertEqual(response.fromcache, False)
self.assertEqual(response.status, 504)
def testUserAgent(self):
# Test that we provide a default user-agent
uri = urlparse.urljoin(base, "user-agent/test.cgi")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertTrue(content.startswith("Python-httplib2/"))
def testUserAgentNonDefault(self):
# Test that the default user-agent can be over-ridden
uri = urlparse.urljoin(base, "user-agent/test.cgi")
(response, content) = self.http.request(uri, "GET", headers={'User-Agent': 'fred/1.0'})
self.assertEqual(response.status, 200)
self.assertTrue(content.startswith("fred/1.0"))
def testGet300WithLocation(self):
# Test the we automatically follow 300 redirects if a Location: header is provided
uri = urlparse.urljoin(base, "300/with-location-header.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 300)
self.assertEqual(response.previous.fromcache, False)
# Confirm that the intermediate 300 is not cached
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 300)
self.assertEqual(response.previous.fromcache, False)
def testGet300WithLocationNoRedirect(self):
# Test the we automatically follow 300 redirects if a Location: header is provided
self.http.follow_redirects = False
uri = urlparse.urljoin(base, "300/with-location-header.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 300)
def testGet300WithoutLocation(self):
# Not giving a Location: header in a 300 response is acceptable
# In which case we just return the 300 response
uri = urlparse.urljoin(base, "300/without-location-header.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 300)
self.assertTrue(response['content-type'].startswith("text/html"))
self.assertEqual(response.previous, None)
def testGet301(self):
# Test that we automatically follow 301 redirects
# and that we cache the 301 response
uri = urlparse.urljoin(base, "301/onestep.asis")
destination = urlparse.urljoin(base, "302/final-destination.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertTrue(response.has_key('content-location'))
self.assertEqual(response['content-location'], destination)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 301)
self.assertEqual(response.previous.fromcache, False)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response['content-location'], destination)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 301)
self.assertEqual(response.previous.fromcache, True)
def testHead301(self):
# Test that we automatically follow 301 redirects
uri = urlparse.urljoin(base, "301/onestep.asis")
destination = urlparse.urljoin(base, "302/final-destination.txt")
(response, content) = self.http.request(uri, "HEAD")
self.assertEqual(response.status, 200)
self.assertEqual(response.previous.status, 301)
self.assertEqual(response.previous.fromcache, False)
def testGet301NoRedirect(self):
# Test that we automatically follow 301 redirects
# and that we cache the 301 response
self.http.follow_redirects = False
uri = urlparse.urljoin(base, "301/onestep.asis")
destination = urlparse.urljoin(base, "302/final-destination.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 301)
def testGet302(self):
# Test that we automatically follow 302 redirects
# and that we DO NOT cache the 302 response
uri = urlparse.urljoin(base, "302/onestep.asis")
destination = urlparse.urljoin(base, "302/final-destination.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response['content-location'], destination)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 302)
self.assertEqual(response.previous.fromcache, False)
uri = urlparse.urljoin(base, "302/onestep.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
self.assertEqual(response['content-location'], destination)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 302)
self.assertEqual(response.previous.fromcache, False)
self.assertEqual(response.previous['content-location'], uri)
uri = urlparse.urljoin(base, "302/twostep.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 302)
self.assertEqual(response.previous.fromcache, False)
def testGet302RedirectionLimit(self):
# Test that we can set a lower redirection limit
# and that we raise an exception when we exceed
# that limit.
self.http.force_exception_to_status_code = False
uri = urlparse.urljoin(base, "302/twostep.asis")
try:
(response, content) = self.http.request(uri, "GET", redirections = 1)
self.fail("This should not happen")
except httplib2.RedirectLimit:
pass
except Exception, e:
self.fail("Threw wrong kind of exception ")
# Re-run the test with out the exceptions
self.http.force_exception_to_status_code = True
(response, content) = self.http.request(uri, "GET", redirections = 1)
self.assertEqual(response.status, 500)
self.assertTrue(response.reason.startswith("Redirected more"))
self.assertEqual("302", response['status'])
self.assertTrue(content.startswith("<html>"))
self.assertTrue(response.previous != None)
def testGet302NoLocation(self):
# Test that we throw an exception when we get
# a 302 with no Location: header.
self.http.force_exception_to_status_code = False
uri = urlparse.urljoin(base, "302/no-location.asis")
try:
(response, content) = self.http.request(uri, "GET")
self.fail("Should never reach here")
except httplib2.RedirectMissingLocation:
pass
except Exception, e:
self.fail("Threw wrong kind of exception ")
# Re-run the test with out the exceptions
self.http.force_exception_to_status_code = True
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 500)
self.assertTrue(response.reason.startswith("Redirected but"))
self.assertEqual("302", response['status'])
self.assertTrue(content.startswith("This is content"))
def testGet302ViaHttps(self):
# Google always redirects to http://google.com
(response, content) = self.http.request("https://www.google.com", "GET")
self.assertEqual(200, response.status)
self.assertEqual(302, response.previous.status)
def testGetViaHttps(self):
# Test that we can handle HTTPS
(response, content) = self.http.request("https://www.google.com/adsense/", "GET")
self.assertEqual(200, response.status)
def testGetViaHttpsSpecViolationOnLocation(self):
# Test that we follow redirects through HTTPS
# even if they violate the spec by including
# a relative Location: header instead of an
# absolute one.
(response, content) = self.http.request("https://www.google.com/adsense", "GET")
self.assertEqual(200, response.status)
self.assertNotEqual(None, response.previous)
def testSslCertValidation(self):
if sys.version_info >= (2, 6):
# Test that we get an ssl.SSLError when specifying a non-existent CA
# certs file.
http = httplib2.Http(ca_certs='/nosuchfile')
self.assertRaises(ssl.SSLError,
http.request, "https://www.google.com/", "GET")
# Test that we get a SSLHandshakeError if we try to access
# https;//www.google.com, using a CA cert file that doesn't contain
# the CA Gogole uses (i.e., simulating a cert that's not signed by a
# trusted CA).
other_ca_certs = os.path.join(
os.path.dirname(os.path.abspath(httplib2.__file__ )),
"test", "other_cacerts.txt")
http = httplib2.Http(ca_certs=other_ca_certs)
self.assertRaises(httplib2.SSLHandshakeError,
http.request, "https://www.google.com/", "GET")
def testSslCertValidationDoubleDots(self):
if sys.version_info >= (2, 6):
# Test that we get match a double dot cert
try:
self.http.request("https://1.www.appspot.com/", "GET")
except httplib2.CertificateHostnameMismatch:
self.fail('cert with *.*.appspot.com should not raise an exception.')
def testSslHostnameValidation(self):
if sys.version_info >= (2, 6):
# The SSL server at google.com:443 returns a certificate for
# 'www.google.com', which results in a host name mismatch.
# Note that this test only works because the ssl module and httplib2
# do not support SNI; for requests specifying a server name of
# 'google.com' via SNI, a matching cert would be returned.
self.assertRaises(httplib2.CertificateHostnameMismatch,
self.http.request, "https://google.com/", "GET")
def testSslCertValidationWithoutSslModuleFails(self):
if sys.version_info < (2, 6):
http = httplib2.Http(disable_ssl_certificate_validation=False)
self.assertRaises(httplib2.CertificateValidationUnsupported,
http.request, "https://www.google.com/", "GET")
def testGetViaHttpsKeyCert(self):
# At this point I can only test
# that the key and cert files are passed in
# correctly to httplib. It would be nice to have
# a real https endpoint to test against.
# bitworking.org presents an certificate for a non-matching host
# (*.webfaction.com), so we need to disable cert checking for this test.
http = httplib2.Http(timeout=2, disable_ssl_certificate_validation=True)
http.add_certificate("akeyfile", "acertfile", "bitworking.org")
try:
(response, content) = http.request("https://bitworking.org", "GET")
except:
pass
self.assertEqual(http.connections["https:bitworking.org"].key_file, "akeyfile")
self.assertEqual(http.connections["https:bitworking.org"].cert_file, "acertfile")
try:
(response, content) = http.request("https://notthere.bitworking.org", "GET")
except:
pass
self.assertEqual(http.connections["https:notthere.bitworking.org"].key_file, None)
self.assertEqual(http.connections["https:notthere.bitworking.org"].cert_file, None)
def testGet303(self):
# Do a follow-up GET on a Location: header
# returned from a POST that gave a 303.
uri = urlparse.urljoin(base, "303/303.cgi")
(response, content) = self.http.request(uri, "POST", " ")
self.assertEqual(response.status, 200)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 303)
def testGet303NoRedirect(self):
# Do a follow-up GET on a Location: header
# returned from a POST that gave a 303.
self.http.follow_redirects = False
uri = urlparse.urljoin(base, "303/303.cgi")
(response, content) = self.http.request(uri, "POST", " ")
self.assertEqual(response.status, 303)
def test303ForDifferentMethods(self):
# Test that all methods can be used
uri = urlparse.urljoin(base, "303/redirect-to-reflector.cgi")
for (method, method_on_303) in [("PUT", "GET"), ("DELETE", "GET"), ("POST", "GET"), ("GET", "GET"), ("HEAD", "GET")]:
(response, content) = self.http.request(uri, method, body=" ")
self.assertEqual(response['x-method'], method_on_303)
def testGet304(self):
# Test that we use ETags properly to validate our cache
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = self.http.request(uri, "GET")
self.assertNotEqual(response['etag'], "")
(response, content) = self.http.request(uri, "GET")
(response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'must-revalidate'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
cache_file_name = os.path.join(cacheDirName, httplib2.safename(httplib2.urlnorm(uri)[-1]))
f = open(cache_file_name, "r")
status_line = f.readline()
f.close()
self.assertTrue(status_line.startswith("status:"))
(response, content) = self.http.request(uri, "HEAD")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
(response, content) = self.http.request(uri, "GET", headers = {'range': 'bytes=0-0'})
self.assertEqual(response.status, 206)
self.assertEqual(response.fromcache, False)
def testGetIgnoreEtag(self):
# Test that we can forcibly ignore ETags
uri = urlparse.urljoin(base, "reflector/reflector.cgi")
(response, content) = self.http.request(uri, "GET")
self.assertNotEqual(response['etag'], "")
(response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})
d = self.reflector(content)
self.assertTrue(d.has_key('HTTP_IF_NONE_MATCH'))
self.http.ignore_etag = True
(response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})
d = self.reflector(content)
self.assertEqual(response.fromcache, False)
self.assertFalse(d.has_key('HTTP_IF_NONE_MATCH'))
def testOverrideEtag(self):
# Test that we can forcibly ignore ETags
uri = urlparse.urljoin(base, "reflector/reflector.cgi")
(response, content) = self.http.request(uri, "GET")
self.assertNotEqual(response['etag'], "")
(response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})
d = self.reflector(content)
self.assertTrue(d.has_key('HTTP_IF_NONE_MATCH'))
self.assertNotEqual(d['HTTP_IF_NONE_MATCH'], "fred")
(response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0', 'if-none-match': 'fred'})
d = self.reflector(content)
self.assertTrue(d.has_key('HTTP_IF_NONE_MATCH'))
self.assertEqual(d['HTTP_IF_NONE_MATCH'], "fred")
#MAP-commented this out because it consistently fails
# def testGet304EndToEnd(self):
# # Test that end to end headers get overwritten in the cache
# uri = urlparse.urljoin(base, "304/end2end.cgi")
# (response, content) = self.http.request(uri, "GET")
# self.assertNotEqual(response['etag'], "")
# old_date = response['date']
# time.sleep(2)
#
# (response, content) = self.http.request(uri, "GET", headers = {'Cache-Control': 'max-age=0'})
# # The response should be from the cache, but the Date: header should be updated.
# new_date = response['date']
# self.assertNotEqual(new_date, old_date)
# self.assertEqual(response.status, 200)
# self.assertEqual(response.fromcache, True)
def testGet304LastModified(self):
# Test that we can still handle a 304
# by only using the last-modified cache validator.
uri = urlparse.urljoin(base, "304/last-modified-only/last-modified-only.txt")
(response, content) = self.http.request(uri, "GET")
self.assertNotEqual(response['last-modified'], "")
(response, content) = self.http.request(uri, "GET")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
def testGet307(self):
# Test that we do follow 307 redirects but
# do not cache the 307
uri = urlparse.urljoin(base, "307/onestep.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 307)
self.assertEqual(response.previous.fromcache, False)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
self.assertEqual(content, "This is the final destination.\n")
self.assertEqual(response.previous.status, 307)
self.assertEqual(response.previous.fromcache, False)
def testGet410(self):
# Test that we pass 410's through
uri = urlparse.urljoin(base, "410/410.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 410)
def testVaryHeaderSimple(self):
"""
RFC 2616 13.6
When the cache receives a subsequent request whose Request-URI
specifies one or more cache entries including a Vary header field,
the cache MUST NOT use such a cache entry to construct a response
to the new request unless all of the selecting request-headers
present in the new request match the corresponding stored
request-headers in the original request.
"""
# test that the vary header is sent
uri = urlparse.urljoin(base, "vary/accept.asis")
(response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
self.assertEqual(response.status, 200)
self.assertTrue(response.has_key('vary'))
# get the resource again, from the cache since accept header in this
# request is the same as the request
(response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True, msg="Should be from cache")
# get the resource again, not from cache since Accept headers does not match
(response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/html'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False, msg="Should not be from cache")
# get the resource again, without any Accept header, so again no match
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False, msg="Should not be from cache")
def testNoVary(self):
# when there is no vary, a different Accept header (e.g.) should not
# impact if the cache is used
# test that the vary header is not sent
uri = urlparse.urljoin(base, "vary/no-vary.asis")
(response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
self.assertEqual(response.status, 200)
self.assertFalse(response.has_key('vary'))
(response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True, msg="Should be from cache")
(response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/html'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True, msg="Should be from cache")
def testVaryHeaderDouble(self):
uri = urlparse.urljoin(base, "vary/accept-double.asis")
(response, content) = self.http.request(uri, "GET", headers={
'Accept': 'text/plain', 'Accept-Language': 'da, en-gb;q=0.8, en;q=0.7'})
self.assertEqual(response.status, 200)
self.assertTrue(response.has_key('vary'))
# we are from cache
(response, content) = self.http.request(uri, "GET", headers={
'Accept': 'text/plain', 'Accept-Language': 'da, en-gb;q=0.8, en;q=0.7'})
self.assertEqual(response.fromcache, True, msg="Should be from cache")
(response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
# get the resource again, not from cache, varied headers don't match exact
(response, content) = self.http.request(uri, "GET", headers={'Accept-Language': 'da'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False, msg="Should not be from cache")
def testVaryUnusedHeader(self):
# A header's value is not considered to vary if it's not used at all.
uri = urlparse.urljoin(base, "vary/unused-header.asis")
(response, content) = self.http.request(uri, "GET", headers={
'Accept': 'text/plain'})
self.assertEqual(response.status, 200)
self.assertTrue(response.has_key('vary'))
# we are from cache
(response, content) = self.http.request(uri, "GET", headers={
'Accept': 'text/plain',})
self.assertEqual(response.fromcache, True, msg="Should be from cache")
def testHeadGZip(self):
# Test that we don't try to decompress a HEAD response
uri = urlparse.urljoin(base, "gzip/final-destination.txt")
(response, content) = self.http.request(uri, "HEAD")
self.assertEqual(response.status, 200)
self.assertNotEqual(int(response['content-length']), 0)
self.assertEqual(content, "")
def testGetGZip(self):
# Test that we support gzip compression
uri = urlparse.urljoin(base, "gzip/final-destination.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertFalse(response.has_key('content-encoding'))
self.assertTrue(response.has_key('-content-encoding'))
self.assertEqual(int(response['content-length']), len("This is the final destination.\n"))
self.assertEqual(content, "This is the final destination.\n")
def testPostAndGZipResponse(self):
uri = urlparse.urljoin(base, "gzip/post.cgi")
(response, content) = self.http.request(uri, "POST", body=" ")
self.assertEqual(response.status, 200)
self.assertFalse(response.has_key('content-encoding'))
self.assertTrue(response.has_key('-content-encoding'))
def testGetGZipFailure(self):
# Test that we raise a good exception when the gzip fails
self.http.force_exception_to_status_code = False
uri = urlparse.urljoin(base, "gzip/failed-compression.asis")
try:
(response, content) = self.http.request(uri, "GET")
self.fail("Should never reach here")
except httplib2.FailedToDecompressContent:
pass
except Exception:
self.fail("Threw wrong kind of exception")
# Re-run the test with out the exceptions
self.http.force_exception_to_status_code = True
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 500)
self.assertTrue(response.reason.startswith("Content purported"))
def testTimeout(self):
self.http.force_exception_to_status_code = True
uri = urlparse.urljoin(base, "timeout/timeout.cgi")
try:
import socket
socket.setdefaulttimeout(1)
except:
# Don't run the test if we can't set the timeout
return
(response, content) = self.http.request(uri)
self.assertEqual(response.status, 408)
self.assertTrue(response.reason.startswith("Request Timeout"))
self.assertTrue(content.startswith("Request Timeout"))
def testIndividualTimeout(self):
uri = urlparse.urljoin(base, "timeout/timeout.cgi")
http = httplib2.Http(timeout=1)
http.force_exception_to_status_code = True
(response, content) = http.request(uri)
self.assertEqual(response.status, 408)
self.assertTrue(response.reason.startswith("Request Timeout"))
self.assertTrue(content.startswith("Request Timeout"))
def testHTTPSInitTimeout(self):
c = httplib2.HTTPSConnectionWithTimeout('localhost', 80, timeout=47)
self.assertEqual(47, c.timeout)
def testGetDeflate(self):
# Test that we support deflate compression
uri = urlparse.urljoin(base, "deflate/deflated.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertFalse(response.has_key('content-encoding'))
self.assertEqual(int(response['content-length']), len("This is the final destination."))
self.assertEqual(content, "This is the final destination.")
def testGetDeflateFailure(self):
# Test that we raise a good exception when the deflate fails
self.http.force_exception_to_status_code = False
uri = urlparse.urljoin(base, "deflate/failed-compression.asis")
try:
(response, content) = self.http.request(uri, "GET")
self.fail("Should never reach here")
except httplib2.FailedToDecompressContent:
pass
except Exception:
self.fail("Threw wrong kind of exception")
# Re-run the test with out the exceptions
self.http.force_exception_to_status_code = True
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 500)
self.assertTrue(response.reason.startswith("Content purported"))
def testGetDuplicateHeaders(self):
# Test that duplicate headers get concatenated via ','
uri = urlparse.urljoin(base, "duplicate-headers/multilink.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(content, "This is content\n")
self.assertEqual(response['link'].split(",")[0], '<http://bitworking.org>; rel="home"; title="BitWorking"')
def testGetCacheControlNoCache(self):
# Test Cache-Control: no-cache on requests
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = self.http.request(uri, "GET")
self.assertNotEqual(response['etag'], "")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
(response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-cache'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
def testGetCacheControlPragmaNoCache(self):
# Test Pragma: no-cache on requests
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = self.http.request(uri, "GET")
self.assertNotEqual(response['etag'], "")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
(response, content) = self.http.request(uri, "GET", headers={'Pragma': 'no-cache'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
def testGetCacheControlNoStoreRequest(self):
# A no-store request means that the response should not be stored.
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
(response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
def testGetCacheControlNoStoreResponse(self):
# A no-store response means that the response should not be stored.
uri = urlparse.urljoin(base, "no-store/no-store.asis")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
def testGetCacheControlNoCacheNoStoreRequest(self):
# Test that a no-store, no-cache clears the entry from the cache
# even if it was cached previously.
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = self.http.request(uri, "GET")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.fromcache, True)
(response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store, no-cache'})
(response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store, no-cache'})
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
def testUpdateInvalidatesCache(self):
# Test that calling PUT or DELETE on a
# URI that is cache invalidates that cache.
uri = urlparse.urljoin(base, "304/test_etag.txt")
(response, content) = self.http.request(uri, "GET")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.fromcache, True)
(response, content) = self.http.request(uri, "DELETE")
self.assertEqual(response.status, 405)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.fromcache, False)
def testUpdateUsesCachedETag(self):
# Test that we natively support http://www.w3.org/1999/04/Editing/
uri = urlparse.urljoin(base, "conditional-updates/test.cgi")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
(response, content) = self.http.request(uri, "PUT", body="foo")
self.assertEqual(response.status, 200)
(response, content) = self.http.request(uri, "PUT", body="foo")
self.assertEqual(response.status, 412)
def testUpdatePatchUsesCachedETag(self):
# Test that we natively support http://www.w3.org/1999/04/Editing/
uri = urlparse.urljoin(base, "conditional-updates/test.cgi")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
(response, content) = self.http.request(uri, "PATCH", body="foo")
self.assertEqual(response.status, 200)
(response, content) = self.http.request(uri, "PATCH", body="foo")
self.assertEqual(response.status, 412)
def testUpdateUsesCachedETagAndOCMethod(self):
# Test that we natively support http://www.w3.org/1999/04/Editing/
uri = urlparse.urljoin(base, "conditional-updates/test.cgi")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
self.http.optimistic_concurrency_methods.append("DELETE")
(response, content) = self.http.request(uri, "DELETE")
self.assertEqual(response.status, 200)
def testUpdateUsesCachedETagOverridden(self):
# Test that we natively support http://www.w3.org/1999/04/Editing/
uri = urlparse.urljoin(base, "conditional-updates/test.cgi")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, False)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
self.assertEqual(response.fromcache, True)
(response, content) = self.http.request(uri, "PUT", body="foo", headers={'if-match': 'fred'})
self.assertEqual(response.status, 412)
def testBasicAuth(self):
# Test Basic Authentication
uri = urlparse.urljoin(base, "basic/file.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
uri = urlparse.urljoin(base, "basic/")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
self.http.add_credentials('joe', 'password')
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
uri = urlparse.urljoin(base, "basic/file.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
def testBasicAuthWithDomain(self):
# Test Basic Authentication
uri = urlparse.urljoin(base, "basic/file.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
uri = urlparse.urljoin(base, "basic/")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
self.http.add_credentials('joe', 'password', "example.org")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
uri = urlparse.urljoin(base, "basic/file.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
domain = urlparse.urlparse(base)[1]
self.http.add_credentials('joe', 'password', domain)
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
uri = urlparse.urljoin(base, "basic/file.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
def testBasicAuthTwoDifferentCredentials(self):
# Test Basic Authentication with multiple sets of credentials
uri = urlparse.urljoin(base, "basic2/file.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
uri = urlparse.urljoin(base, "basic2/")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
self.http.add_credentials('fred', 'barney')
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
uri = urlparse.urljoin(base, "basic2/file.txt")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
def testBasicAuthNested(self):
# Test Basic Authentication with resources
# that are nested
uri = urlparse.urljoin(base, "basic-nested/")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
uri = urlparse.urljoin(base, "basic-nested/subdir")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
# Now add in credentials one at a time and test.
self.http.add_credentials('joe', 'password')
uri = urlparse.urljoin(base, "basic-nested/")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
uri = urlparse.urljoin(base, "basic-nested/subdir")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
self.http.add_credentials('fred', 'barney')
uri = urlparse.urljoin(base, "basic-nested/")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
uri = urlparse.urljoin(base, "basic-nested/subdir")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
def testDigestAuth(self):
# Test that we support Digest Authentication
uri = urlparse.urljoin(base, "digest/")
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 401)
self.http.add_credentials('joe', 'password')
(response, content) = self.http.request(uri, "GET")
self.assertEqual(response.status, 200)
uri = urlparse.urljoin(base, "digest/file.txt")
(response, content) = self.http.request(uri, "GET")
def testDigestAuthNextNonceAndNC(self):
# Test that if the server sets nextnonce that we reset
# the nonce count back to 1
uri = urlparse.urljoin(base, "digest/file.txt")
self.http.add_credentials('joe', 'password')
(response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})
info = httplib2._parse_www_authenticate(response, 'authentication-info')
self.assertEqual(response.status, 200)
(response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})
info2 = httplib2._parse_www_authenticate(response, 'authentication-info')
self.assertEqual(response.status, 200)
if info.has_key('nextnonce'):
self.assertEqual(info2['nc'], 1)
def testDigestAuthStale(self):
# Test that we can handle a nonce becoming stale
uri = urlparse.urljoin(base, "digest-expire/file.txt")
self.http.add_credentials('joe', 'password')
(response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})
info = httplib2._parse_www_authenticate(response, 'authentication-info')
self.assertEqual(response.status, 200)
time.sleep(3)
# Sleep long enough that the nonce becomes stale
(response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})
self.assertFalse(response.fromcache)
self.assertTrue(response._stale_digest)
info3 = httplib2._parse_www_authenticate(response, 'authentication-info')
self.assertEqual(response.status, 200)
def reflector(self, content):
return dict( [tuple(x.split("=", 1)) for x in content.strip().split("\n")] )
def testReflector(self):
uri = urlparse.urljoin(base, "reflector/reflector.cgi")
(response, content) = self.http.request(uri, "GET")
d = self.reflector(content)
self.assertTrue(d.has_key('HTTP_USER_AGENT'))
def testConnectionClose(self):
uri = "http://www.google.com/"
(response, content) = self.http.request(uri, "GET")
for c in self.http.connections.values():
self.assertNotEqual(None, c.sock)
(response, content) = self.http.request(uri, "GET", headers={"connection": "close"})
for c in self.http.connections.values():
self.assertEqual(None, c.sock)
try:
import memcache
class HttpTestMemCached(HttpTest):
def setUp(self):
self.cache = memcache.Client(['127.0.0.1:11211'], debug=0)
#self.cache = memcache.Client(['10.0.0.4:11211'], debug=1)
self.http = httplib2.Http(self.cache)
self.cache.flush_all()
# Not exactly sure why the sleep is needed here, but
# if not present then some unit tests that rely on caching
# fail. Memcached seems to lose some sets immediately
# after a flush_all if the set is to a value that
# was previously cached. (Maybe the flush is handled async?)
time.sleep(1)
self.http.clear_credentials()
except:
pass
# ------------------------------------------------------------------------
class HttpPrivateTest(unittest.TestCase):
def testParseCacheControl(self):
# Test that we can parse the Cache-Control header
self.assertEqual({}, httplib2._parse_cache_control({}))
self.assertEqual({'no-cache': 1}, httplib2._parse_cache_control({'cache-control': ' no-cache'}))
cc = httplib2._parse_cache_control({'cache-control': ' no-cache, max-age = 7200'})
self.assertEqual(cc['no-cache'], 1)
self.assertEqual(cc['max-age'], '7200')
cc = httplib2._parse_cache_control({'cache-control': ' , '})
self.assertEqual(cc[''], 1)
try:
cc = httplib2._parse_cache_control({'cache-control': 'Max-age=3600;post-check=1800,pre-check=3600'})
self.assertTrue("max-age" in cc)
except:
self.fail("Should not throw exception")
def testNormalizeHeaders(self):
# Test that we normalize headers to lowercase
h = httplib2._normalize_headers({'Cache-Control': 'no-cache', 'Other': 'Stuff'})
self.assertTrue(h.has_key('cache-control'))
self.assertTrue(h.has_key('other'))
self.assertEqual('Stuff', h['other'])
def testExpirationModelTransparent(self):
# Test that no-cache makes our request TRANSPARENT
response_headers = {
'cache-control': 'max-age=7200'
}
request_headers = {
'cache-control': 'no-cache'
}
self.assertEqual("TRANSPARENT", httplib2._entry_disposition(response_headers, request_headers))
def testMaxAgeNonNumeric(self):
# Test that no-cache makes our request TRANSPARENT
response_headers = {
'cache-control': 'max-age=fred, min-fresh=barney'
}
request_headers = {
}
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationModelNoCacheResponse(self):
# The date and expires point to an entry that should be
# FRESH, but the no-cache over-rides that.
now = time.time()
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+4)),
'cache-control': 'no-cache'
}
request_headers = {
}
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationModelStaleRequestMustReval(self):
# must-revalidate forces STALE
self.assertEqual("STALE", httplib2._entry_disposition({}, {'cache-control': 'must-revalidate'}))
def testExpirationModelStaleResponseMustReval(self):
# must-revalidate forces STALE
self.assertEqual("STALE", httplib2._entry_disposition({'cache-control': 'must-revalidate'}, {}))
def testExpirationModelFresh(self):
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),
'cache-control': 'max-age=2'
}
request_headers = {
}
self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))
time.sleep(3)
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationMaxAge0(self):
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),
'cache-control': 'max-age=0'
}
request_headers = {
}
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationModelDateAndExpires(self):
now = time.time()
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+2)),
}
request_headers = {
}
self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))
time.sleep(3)
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpiresZero(self):
now = time.time()
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
'expires': "0",
}
request_headers = {
}
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationModelDateOnly(self):
now = time.time()
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+3)),
}
request_headers = {
}
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationModelOnlyIfCached(self):
response_headers = {
}
request_headers = {
'cache-control': 'only-if-cached',
}
self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationModelMaxAgeBoth(self):
now = time.time()
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
'cache-control': 'max-age=2'
}
request_headers = {
'cache-control': 'max-age=0'
}
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationModelDateAndExpiresMinFresh1(self):
now = time.time()
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+2)),
}
request_headers = {
'cache-control': 'min-fresh=2'
}
self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
def testExpirationModelDateAndExpiresMinFresh2(self):
now = time.time()
response_headers = {
'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+4)),
}
request_headers = {
'cache-control': 'min-fresh=2'
}
self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))
def testParseWWWAuthenticateEmpty(self):
res = httplib2._parse_www_authenticate({})
self.assertEqual(len(res.keys()), 0)
def testParseWWWAuthenticate(self):
# different uses of spaces around commas
res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Test realm="test realm" , foo=foo ,bar="bar", baz=baz,qux=qux'})
self.assertEqual(len(res.keys()), 1)
self.assertEqual(len(res['test'].keys()), 5)
# tokens with non-alphanum
res = httplib2._parse_www_authenticate({ 'www-authenticate': 'T*!%#st realm=to*!%#en, to*!%#en="quoted string"'})
self.assertEqual(len(res.keys()), 1)
self.assertEqual(len(res['t*!%#st'].keys()), 2)
# quoted string with quoted pairs
res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Test realm="a \\"test\\" realm"'})
self.assertEqual(len(res.keys()), 1)
self.assertEqual(res['test']['realm'], 'a "test" realm')
def testParseWWWAuthenticateStrict(self):
httplib2.USE_WWW_AUTH_STRICT_PARSING = 1;
self.testParseWWWAuthenticate();
httplib2.USE_WWW_AUTH_STRICT_PARSING = 0;
def testParseWWWAuthenticateBasic(self):
res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me"'})
basic = res['basic']
self.assertEqual('me', basic['realm'])
res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me", algorithm="MD5"'})
basic = res['basic']
self.assertEqual('me', basic['realm'])
self.assertEqual('MD5', basic['algorithm'])
res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me", algorithm=MD5'})
basic = res['basic']
self.assertEqual('me', basic['realm'])
self.assertEqual('MD5', basic['algorithm'])
def testParseWWWAuthenticateBasic2(self):
res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me",other="fred" '})
basic = res['basic']
self.assertEqual('me', basic['realm'])
self.assertEqual('fred', basic['other'])
def testParseWWWAuthenticateBasic3(self):
res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic REAlm="me" '})
basic = res['basic']
self.assertEqual('me', basic['realm'])
def testParseWWWAuthenticateDigest(self):
res = httplib2._parse_www_authenticate({ 'www-authenticate':
'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41"'})
digest = res['digest']
self.assertEqual('testrealm@host.com', digest['realm'])
self.assertEqual('auth,auth-int', digest['qop'])
def testParseWWWAuthenticateMultiple(self):
res = httplib2._parse_www_authenticate({ 'www-authenticate':
'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41" Basic REAlm="me" '})
digest = res['digest']
self.assertEqual('testrealm@host.com', digest['realm'])
self.assertEqual('auth,auth-int', digest['qop'])
self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])
self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])
basic = res['basic']
self.assertEqual('me', basic['realm'])
def testParseWWWAuthenticateMultiple2(self):
# Handle an added comma between challenges, which might get thrown in if the challenges were
# originally sent in separate www-authenticate headers.
res = httplib2._parse_www_authenticate({ 'www-authenticate':
'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me" '})
digest = res['digest']
self.assertEqual('testrealm@host.com', digest['realm'])
self.assertEqual('auth,auth-int', digest['qop'])
self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])
self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])
basic = res['basic']
self.assertEqual('me', basic['realm'])
def testParseWWWAuthenticateMultiple3(self):
# Handle an added comma between challenges, which might get thrown in if the challenges were
# originally sent in separate www-authenticate headers.
res = httplib2._parse_www_authenticate({ 'www-authenticate':
'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me", WSSE realm="foo", profile="UsernameToken"'})
digest = res['digest']
self.assertEqual('testrealm@host.com', digest['realm'])
self.assertEqual('auth,auth-int', digest['qop'])
self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])
self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])
basic = res['basic']
self.assertEqual('me', basic['realm'])
wsse = res['wsse']
self.assertEqual('foo', wsse['realm'])
self.assertEqual('UsernameToken', wsse['profile'])
def testParseWWWAuthenticateMultiple4(self):
res = httplib2._parse_www_authenticate({ 'www-authenticate':
'Digest realm="test-real.m@host.com", qop \t=\t"\tauth,auth-int", nonce="(*)&^&$%#",opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me", WSSE realm="foo", profile="UsernameToken"'})
digest = res['digest']
self.assertEqual('test-real.m@host.com', digest['realm'])
self.assertEqual('\tauth,auth-int', digest['qop'])
self.assertEqual('(*)&^&$%#', digest['nonce'])
def testParseWWWAuthenticateMoreQuoteCombos(self):
res = httplib2._parse_www_authenticate({'www-authenticate':'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'})
digest = res['digest']
self.assertEqual('myrealm', digest['realm'])
def testParseWWWAuthenticateMalformed(self):
try:
res = httplib2._parse_www_authenticate({'www-authenticate':'OAuth "Facebook Platform" "invalid_token" "Invalid OAuth access token."'})
self.fail("should raise an exception")
except httplib2.MalformedHeader:
pass
def testDigestObject(self):
credentials = ('joe', 'password')
host = None
request_uri = '/projects/httplib2/test/digest/'
headers = {}
response = {
'www-authenticate': 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth"'
}
content = ""
d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)
d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46")
our_request = "authorization: %s" % headers['authorization']
working_request = 'authorization: Digest username="joe", realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", uri="/projects/httplib2/test/digest/", algorithm=MD5, response="97ed129401f7cdc60e5db58a80f3ea8b", qop=auth, nc=00000001, cnonce="33033375ec278a46"'
self.assertEqual(our_request, working_request)
def testDigestObjectWithOpaque(self):
credentials = ('joe', 'password')
host = None
request_uri = '/projects/httplib2/test/digest/'
headers = {}
response = {
'www-authenticate': 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", opaque="atestopaque"'
}
content = ""
d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)
d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46")
our_request = "authorization: %s" % headers['authorization']
working_request = 'authorization: Digest username="joe", realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", uri="/projects/httplib2/test/digest/", algorithm=MD5, response="97ed129401f7cdc60e5db58a80f3ea8b", qop=auth, nc=00000001, cnonce="33033375ec278a46", opaque="atestopaque"'
self.assertEqual(our_request, working_request)
def testDigestObjectStale(self):
credentials = ('joe', 'password')
host = None
request_uri = '/projects/httplib2/test/digest/'
headers = {}
response = httplib2.Response({ })
response['www-authenticate'] = 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'
response.status = 401
content = ""
d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)
# Returns true to force a retry
self.assertTrue( d.response(response, content) )
def testDigestObjectAuthInfo(self):
credentials = ('joe', 'password')
host = None
request_uri = '/projects/httplib2/test/digest/'
headers = {}
response = httplib2.Response({ })
response['www-authenticate'] = 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'
response['authentication-info'] = 'nextnonce="fred"'
content = ""
d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)
# Returns true to force a retry
self.assertFalse( d.response(response, content) )
self.assertEqual('fred', d.challenge['nonce'])
self.assertEqual(1, d.challenge['nc'])
def testWsseAlgorithm(self):
digest = httplib2._wsse_username_token("d36e316282959a9ed4c89851497a717f", "2003-12-15T14:43:07Z", "taadtaadpstcsm")
expected = "quR/EWLAV4xLf9Zqyw4pDmfV9OY="
self.assertEqual(expected, digest)
def testEnd2End(self):
# one end to end header
response = {'content-type': 'application/atom+xml', 'te': 'deflate'}
end2end = httplib2._get_end2end_headers(response)
self.assertTrue('content-type' in end2end)
self.assertTrue('te' not in end2end)
self.assertTrue('connection' not in end2end)
# one end to end header that gets eliminated
response = {'connection': 'content-type', 'content-type': 'application/atom+xml', 'te': 'deflate'}
end2end = httplib2._get_end2end_headers(response)
self.assertTrue('content-type' not in end2end)
self.assertTrue('te' not in end2end)
self.assertTrue('connection' not in end2end)
# Degenerate case of no headers
response = {}
end2end = httplib2._get_end2end_headers(response)
self.assertEquals(0, len(end2end))
# Degenerate case of connection referrring to a header not passed in
response = {'connection': 'content-type'}
end2end = httplib2._get_end2end_headers(response)
self.assertEquals(0, len(end2end))
if __name__ == '__main__':
unittest.main()
| 46.034682 | 314 | 0.64772 |
57c9409b401d3448ab9228168d7fdc3d20f7af56 | 1,356 | py | Python | course/lesson05/task03/white_space.py | mstepovanyy/python-training | 0a6766674855cbe784bc1195774016aee889ad6c | [
"MIT",
"Unlicense"
] | null | null | null | course/lesson05/task03/white_space.py | mstepovanyy/python-training | 0a6766674855cbe784bc1195774016aee889ad6c | [
"MIT",
"Unlicense"
] | null | null | null | course/lesson05/task03/white_space.py | mstepovanyy/python-training | 0a6766674855cbe784bc1195774016aee889ad6c | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/python3
"""
Remove all leading and trailing white-space from a file and print a number of
modified lines.
"""
import re
from course.lesson05.task01.task01 import get_output_file
def strip(input_file_name):
"""
Remove all leading and trailing white-spaces, from 'input_file_name'.
And output result into into current directory.
Args:
input_file_name (str): path to input file
Returns:
int : number of modified lines.
"""
modified_lines = 0
pattern = re.compile(
r"""^\s+ # strip all the leading whitespaces
(.*) # group all in the middle except whitespaces
\s*$ # strip all the trailing whitespaces
""", re.VERBOSE) # compile regexp into one line
with open(input_file_name, mode="r", encoding="utf-8") as in_fd:
with open(get_output_file(input_file_name), mode="w", encoding="utf-8") as out_fd:
for line in in_fd.readlines():
match = pattern.match(line)
if match and len(match.group(1)) != len(line):
modified_lines += 1
out_fd.write(match.group(1))
out_fd.write("\n")
else:
out_fd.write(line)
return modified_lines
if __name__ == "__main__":
print("Number of modified lines: ", strip("../../alice.txt")) | 33.9 | 90 | 0.609882 |
075138fd0d9dbf29d2a70f36fba8ac8df35c02b4 | 10,220 | py | Python | celery/backends/amqp.py | aleszoulek/celery | a179038fec68808d50c0a1f42aa26d315a3817ad | [
"BSD-3-Clause"
] | 2 | 2021-04-30T02:01:12.000Z | 2022-02-18T05:30:28.000Z | celery/backends/amqp.py | aleszoulek/celery | a179038fec68808d50c0a1f42aa26d315a3817ad | [
"BSD-3-Clause"
] | null | null | null | celery/backends/amqp.py | aleszoulek/celery | a179038fec68808d50c0a1f42aa26d315a3817ad | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import socket
import time
from datetime import timedelta
from kombu.entity import Exchange, Queue
from kombu.messaging import Consumer, Producer
from celery import states
from celery.backends.base import BaseDictBackend
from celery.exceptions import TimeoutError
from celery.utils import timeutils
from celery.utils import cached_property
def repair_uuid(s):
# Historically the dashes in UUIDS are removed from AMQ entity names,
# but there is no known reason to. Hopefully we'll be able to fix
# this in v3.0.
return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
class AMQPBackend(BaseDictBackend):
"""Publishes results by sending messages."""
Exchange = Exchange
Queue = Queue
Consumer = Consumer
Producer = Producer
_pool = None
_pool_owner_pid = None
def __init__(self, connection=None, exchange=None, exchange_type=None,
persistent=None, serializer=None, auto_delete=True,
expires=None, connection_max=None, **kwargs):
super(AMQPBackend, self).__init__(**kwargs)
conf = self.app.conf
self._connection = connection
self.queue_arguments = {}
if persistent is None:
persistent = conf.CELERY_RESULT_PERSISTENT
self.persistent = persistent
delivery_mode = persistent and "persistent" or "transient"
exchange = exchange or conf.CELERY_RESULT_EXCHANGE
exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
self.exchange = self.Exchange(name=exchange,
type=exchange_type,
delivery_mode=delivery_mode,
durable=self.persistent,
auto_delete=auto_delete)
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
self.auto_delete = auto_delete
self.expires = expires
if self.expires is None:
self.expires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
if isinstance(self.expires, timedelta):
self.expires = timeutils.timedelta_seconds(self.expires)
if self.expires is not None:
self.expires = int(self.expires)
# requires RabbitMQ 2.1.0 or higher.
self.queue_arguments["x-expires"] = int(self.expires * 1000.0)
self.connection_max = (connection_max or
conf.CELERY_AMQP_TASK_RESULT_CONNECTION_MAX)
def _create_binding(self, task_id):
name = task_id.replace("-", "")
return self.Queue(name=name,
exchange=self.exchange,
routing_key=name,
durable=self.persistent,
auto_delete=self.auto_delete,
queue_arguments=self.queue_arguments)
def _create_producer(self, task_id, channel):
self._create_binding(task_id)(channel).declare()
return self.Producer(channel, exchange=self.exchange,
routing_key=task_id.replace("-", ""),
serializer=self.serializer)
def _create_consumer(self, bindings, channel):
return self.Consumer(channel, bindings, no_ack=True)
def _publish_result(self, connection, task_id, meta):
# cache single channel
if hasattr(connection, "_result_producer_chan") and \
connection._result_producer_chan is not None and \
connection._result_producer_chan.connection is not None:
channel = connection._result_producer_chan
else:
channel = connection._result_producer_chan = connection.channel()
try:
self._create_producer(task_id, channel).publish(meta)
finally:
channel.close()
def revive(self, channel):
pass
def _store_result(self, task_id, result, status, traceback=None,
max_retries=20, interval_start=0, interval_step=1,
interval_max=1):
"""Send task return value and status."""
conn = self.pool.acquire(block=True)
try:
send = conn.ensure(self, self._publish_result,
max_retries=max_retries,
interval_start=interval_start,
interval_step=interval_step,
interval_max=interval_max)
send(conn, task_id, {"task_id": task_id, "status": status,
"result": self.encode_result(result, status),
"traceback": traceback})
finally:
conn.release()
return result
def get_task_meta(self, task_id, cache=True):
if cache and task_id in self._cache:
return self._cache[task_id]
return self.poll(task_id)
def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
**kwargs):
cached_meta = self._cache.get(task_id)
if cache and cached_meta and \
cached_meta["status"] in states.READY_STATES:
meta = cached_meta
else:
try:
meta = self.consume(task_id, timeout=timeout)
except socket.timeout:
raise TimeoutError("The operation timed out.")
state = meta["status"]
if state == states.SUCCESS:
return meta["result"]
elif state in states.PROPAGATE_STATES:
if propagate:
raise self.exception_to_python(meta["result"])
return meta["result"]
else:
return self.wait_for(task_id, timeout, cache)
def poll(self, task_id):
conn = self.pool.acquire(block=True)
channel = conn.channel()
try:
binding = self._create_binding(task_id)(channel)
binding.declare()
result = binding.get()
if result:
payload = self._cache[task_id] = result.payload
return payload
elif task_id in self._cache: # use previously received state.
return self._cache[task_id]
return {"status": states.PENDING, "result": None}
finally:
channel.close()
conn.release()
def drain_events(self, connection, consumer, timeout=None, now=time.time):
wait = connection.drain_events
results = {}
def callback(meta, message):
if meta["status"] in states.READY_STATES:
uuid = repair_uuid(message.delivery_info["routing_key"])
results[uuid] = meta
consumer.register_callback(callback)
time_start = now()
while 1:
# Total time spent may exceed a single call to wait()
if timeout and now() - time_start >= timeout:
raise socket.timeout()
wait(timeout=timeout)
if results: # got event on the wanted channel.
break
self._cache.update(results)
return results
def consume(self, task_id, timeout=None):
conn = self.pool.acquire(block=True)
channel = conn.channel()
try:
binding = self._create_binding(task_id)
consumer = self._create_consumer(binding, channel)
consumer.consume()
try:
return self.drain_events(conn, consumer, timeout).values()[0]
finally:
consumer.cancel()
finally:
channel.close()
conn.release()
def get_many(self, task_ids, timeout=None):
conn = self.pool.acquire(block=True)
channel = conn.channel()
try:
ids = set(task_ids)
cached_ids = set()
for task_id in ids:
try:
cached = self._cache[task_id]
except KeyError:
pass
else:
if cached["status"] in states.READY_STATES:
yield task_id, cached
cached_ids.add(task_id)
ids ^= cached_ids
bindings = [self._create_binding(task_id) for task_id in task_ids]
consumer = self._create_consumer(bindings, channel)
consumer.consume()
try:
while ids:
r = self.drain_events(conn, consumer, timeout)
ids ^= set(r.keys())
for ready_id, ready_meta in r.items():
yield ready_id, ready_meta
except: # ☹ Py2.4 — Cannot yield inside try: finally: block
consumer.cancel()
raise
consumer.cancel()
except: # … ☹
channel.close()
conn.release()
raise
channel.close()
conn.release()
def reload_task_result(self, task_id):
raise NotImplementedError(
"reload_task_result is not supported by this backend.")
def reload_taskset_result(self, task_id):
"""Reload taskset result, even if it has been previously fetched."""
raise NotImplementedError(
"reload_taskset_result is not supported by this backend.")
def save_taskset(self, taskset_id, result):
"""Store the result and status of a task."""
raise NotImplementedError(
"save_taskset is not supported by this backend.")
def restore_taskset(self, taskset_id, cache=True):
"""Get the result of a taskset."""
raise NotImplementedError(
"restore_taskset is not supported by this backend.")
def _set_pool(self):
self._pool = self.app.broker_connection().Pool(self.connection_max)
self._pool_owner_pid = os.getpid()
def _reset_after_fork(self):
self._pool.force_close_all()
@property
def pool(self):
if self._pool is None:
self._set_pool()
elif os.getpid() != self._pool_owner_pid:
print("--- RESET POOL AFTER FORK --- ")
self._reset_after_fork()
self._set_pool()
return self._pool
| 37.29927 | 78 | 0.580137 |
b61adceda0fcb39bc36cbc4ff75ce8ac61b58732 | 2,241 | py | Python | habari/urls.py | ppolle/habari | 671b98c361ce593f708bc15f69dd3aa6fe72b128 | [
"MIT"
] | 3 | 2020-06-08T08:39:06.000Z | 2020-07-30T10:46:22.000Z | habari/urls.py | ppolle/habari | 671b98c361ce593f708bc15f69dd3aa6fe72b128 | [
"MIT"
] | 9 | 2021-03-19T11:18:58.000Z | 2022-02-10T15:48:35.000Z | habari/urls.py | ppolle/habari | 671b98c361ce593f708bc15f69dd3aa6fe72b128 | [
"MIT"
] | 1 | 2021-09-22T07:23:03.000Z | 2021-09-22T07:23:03.000Z | """habari URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Habari API",
default_version='v1',
description="An API that gives users access to Habari\'s news articles. The API currently only \
avails news items over the past 24 hours, from various sources within East Africa.\
The contries include Kenya, Uganda and Tanzania. There are currently 6 news sources from \
which a user can retrieve news articles from, i.e, The Daily Nation, The Business Daily, The Star, The daily Monitor, The Daily Standard,\
The Citizen and The East African",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="peter.m.polle@gmail.com"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^swagger(?P<format>\.json|\.yaml)$',
schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', schema_view.with_ui('swagger',
cache_timeout=0), name='schema-swagger-ui'),
url(r'^documentation/$', schema_view.with_ui('redoc',
cache_timeout=0), name='schema-redoc'),
url(r'^admin/', admin.site.urls),
url(r'^api/v1/', include('habari.apps.api.urls')),
url(r'', include('habari.apps.core.urls')),
]
| 41.5 | 146 | 0.672468 |
ab9c948546ec63451d13112d968c331dacbc361a | 158 | py | Python | code/code/code/test1/hcj_undergraduate_code/solver/__init__.py | ChunjunHu/SemanticSegmentationofCropRemoteSensingBasedonDeepLearning | 740f88f6e11ef7e9b84949441d74f19ec06e56d2 | [
"MIT"
] | 2 | 2022-01-06T02:26:24.000Z | 2022-01-28T00:04:35.000Z | code/code/code/test1/hcj_undergraduate_code/solver/__init__.py | ChunjunHu/SemanticSegmentationofCropRemoteSensingBasedonDeepLearning | 740f88f6e11ef7e9b84949441d74f19ec06e56d2 | [
"MIT"
] | null | null | null | code/code/code/test1/hcj_undergraduate_code/solver/__init__.py | ChunjunHu/SemanticSegmentationofCropRemoteSensingBasedonDeepLearning | 740f88f6e11ef7e9b84949441d74f19ec06e56d2 | [
"MIT"
] | null | null | null | '''
Author : now more
Connect : lin.honghui@qq.com
LastEditors : now more
Description :
LastEditTime: 2019-07-02 14:12:10
'''
from .build import *
| 15.8 | 33 | 0.664557 |
45b8c6790c1c6bea952aea79461045bcd5c50203 | 101 | py | Python | flocker/acceptance/__init__.py | stackriot/flocker | eaa586248986d7cd681c99c948546c2b507e44de | [
"Apache-2.0"
] | 2,690 | 2015-01-02T11:12:11.000Z | 2022-03-15T15:41:51.000Z | flocker/acceptance/__init__.py | stackriot/flocker | eaa586248986d7cd681c99c948546c2b507e44de | [
"Apache-2.0"
] | 2,102 | 2015-01-02T18:49:40.000Z | 2021-01-21T18:49:47.000Z | flocker/acceptance/__init__.py | stackriot/flocker | eaa586248986d7cd681c99c948546c2b507e44de | [
"Apache-2.0"
] | 333 | 2015-01-10T01:44:01.000Z | 2022-03-08T15:03:04.000Z | # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Acceptance tests for ``flocker``.
"""
| 16.833333 | 57 | 0.693069 |
17709bd89ca3fb4106c778acfd0b9eef2389e1db | 96 | py | Python | city_housing_index/local_admin/apps.py | Sinope-Nanto/city_house | 73589bb07c415b1deecf8a0618b79d376d5a6e88 | [
"MIT"
] | null | null | null | city_housing_index/local_admin/apps.py | Sinope-Nanto/city_house | 73589bb07c415b1deecf8a0618b79d376d5a6e88 | [
"MIT"
] | null | null | null | city_housing_index/local_admin/apps.py | Sinope-Nanto/city_house | 73589bb07c415b1deecf8a0618b79d376d5a6e88 | [
"MIT"
] | 1 | 2021-05-05T13:13:56.000Z | 2021-05-05T13:13:56.000Z | from django.apps import AppConfig
class LocalAdminConfig(AppConfig):
name = 'local_admin'
| 16 | 34 | 0.770833 |
f508f476d2a6dfd8ff156c03f6a1ce82cb3c8ef0 | 1,607 | py | Python | src/ramstk/models/dbtables/commondb_site_info_table.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 4 | 2018-08-26T09:11:36.000Z | 2019-05-24T12:01:02.000Z | src/ramstk/models/dbtables/commondb_site_info_table.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 52 | 2018-08-24T12:51:22.000Z | 2020-12-28T04:59:42.000Z | src/ramstk/models/dbtables/commondb_site_info_table.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 1 | 2018-10-11T07:57:55.000Z | 2018-10-11T07:57:55.000Z | # -*- coding: utf-8 -*-
#
# ramstk.models.commondb.site_info.table.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Site Information Table Model."""
# Standard Library Imports
from typing import Type
# RAMSTK Local Imports
from ..dbrecords import RAMSTKSiteInfoRecord
from .basetable import RAMSTKBaseTable
class RAMSTKSiteInfoTable(RAMSTKBaseTable):
"""Contain the attributes and methods of the Option data manager."""
# Define private dict class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_db_id_colname = "fld_site_id"
_db_tablename = "ramstk_site_info"
_select_msg = "request_get_option_attributes2"
_tag = "option"
# Define public dict class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self, **kwargs) -> None:
"""Initialize a Options data manager instance."""
RAMSTKBaseTable.__init__(self, **kwargs)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_id_columns = [
"site_id",
]
# Initialize private scalar attributes.
self._record: Type[RAMSTKSiteInfoRecord] = RAMSTKSiteInfoRecord
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.pkey = "site_id"
# Subscribe to PyPubSub messages.
| 27.706897 | 88 | 0.696951 |
d0497ef921c607268ad3911d963b8a8a0727f78d | 12,347 | py | Python | ClusterEnsembles/ClusterEnsembles.py | tsano430/ClusterEnsembles | 63d872eb1267fbd08138eaa39892376b1155273a | [
"MIT"
] | 32 | 2021-02-15T08:57:45.000Z | 2022-03-19T01:03:39.000Z | ClusterEnsembles/ClusterEnsembles.py | ashuein/ClusterEnsembles | 63d872eb1267fbd08138eaa39892376b1155273a | [
"MIT"
] | 20 | 2021-02-03T12:10:32.000Z | 2022-03-25T18:08:12.000Z | ClusterEnsembles/ClusterEnsembles.py | ashuein/ClusterEnsembles | 63d872eb1267fbd08138eaa39892376b1155273a | [
"MIT"
] | 11 | 2021-04-23T23:17:29.000Z | 2022-03-22T03:42:59.000Z | # coding: utf-8
# ClusterEnsembles.py
# Author: Takehiro Sano
# Contact: tsano430@gmail.com
# License: MIT License
import os
import warnings
from typing import Optional
import numpy as np
import pymetis
import kahypar
from scipy import sparse
from sklearn.metrics import normalized_mutual_info_score
from sklearn.utils.extmath import safe_sparse_dot
def create_hypergraph(labels):
"""Create the incidence matrix of labels' hypergraph.
Parameter
----------
labels: Labels generated by multiple clustering algorithms such as K-Means.
Return
-------
H: Incidence matrix of labels' hypergraph.
"""
H = []
for label in labels:
label = np.nan_to_num(label, nan=float('inf'))
unique_label = np.unique(label)
len_unique_label = len(unique_label)
label2id = dict(zip(unique_label, np.arange(len_unique_label)))
tmp = [label2id[elem] for elem in label]
h = np.identity(len_unique_label, dtype=int)[tmp]
if float('inf') in label2id.keys():
h = np.delete(h, obj=label2id[float('inf')], axis=1)
H.append(sparse.csc_matrix(h))
return sparse.hstack(H)
def to_pymetis_format(adj_mat):
"""Transform an adjacency matrix into the pymetis format.
Parameter
---------
adj_mat: Adjacency matrix.
Returns
-------
xadj, adjncy, eweights: Parameters for pymetis.
"""
xadj = [0]
adjncy = []
eweights = []
n_rows = adj_mat.shape[0]
adj_mat = adj_mat.tolil()
for i in range(n_rows):
row = adj_mat.getrow(i)
idx_row, idx_col = row.nonzero()
val = row[idx_row, idx_col]
adjncy += list(idx_col)
eweights += list(val.toarray()[0])
xadj.append(len(adjncy))
return xadj, adjncy, eweights
def cspa(labels, nclass):
"""Cluster-based Similarity Partitioning Algorithm (CSPA).
Parameters
----------
labels: Labels generated by multiple clustering algorithms such as K-Means.
nclass: Number of classes in a consensus clustering label.
Return
-------
label_ce: Consensus clustering label obtained from CSPA.
"""
H = create_hypergraph(labels)
S = H * H.T
xadj, adjncy, eweights = to_pymetis_format(S)
membership = pymetis.part_graph(
nparts=nclass, xadj=xadj, adjncy=adjncy, eweights=eweights)[1]
label_ce = np.array(membership)
return label_ce
def hgpa(labels, nclass, random_state):
"""HyperGraph Partitioning Algorithm (HGPA).
Parameters
----------
labels: Labels generated by multiple clustering algorithms such as K-Means.
nclass: Number of classes in a consensus clustering label.
random_state: Used for reproducible results.
Return
-------
label_ce: Consensus clustering label obtained from HGPA.
"""
# Create hypergraph for kahypar
H = create_hypergraph(labels)
n_nodes, n_nets = H.shape
node_weights = [1] * n_nodes
edge_weights = [1] * n_nets
hyperedge_indices = [0]
hyperedges = []
HT = H.T
for i in range(n_nets):
h = HT.getrow(i)
idx_row, idx_col = h.nonzero()
hyperedges += list(idx_col)
hyperedge_indices.append(len(hyperedges))
hypergraph = kahypar.Hypergraph(
n_nodes, n_nets, hyperedge_indices, hyperedges, nclass, edge_weights, node_weights)
# Settings for kahypar
context = kahypar.Context()
config_path = os.path.dirname(
__file__) + '/kahypar_config/km1_kKaHyPar_sea20.ini'
context.loadINIconfiguration(config_path)
if random_state is not None:
context.setSeed(random_state)
context.setK(nclass)
context.setEpsilon(0.03)
context.suppressOutput(True)
# Hypergraph partitioning
kahypar.partition(hypergraph, context)
label_ce = np.empty(n_nodes, dtype=int)
for i in range(n_nodes):
label_ce[i] = hypergraph.blockID(i)
return label_ce
def mcla(labels, nclass, random_state):
"""Meta-CLustering Algorithm (MCLA).
Parameters
----------
labels: Labels generated by multiple clustering algorithms such as K-Means.
nclass: Number of classes in a consensus clustering label.
random_state: Used for reproducible results.
Return
-------
label_ce: Consensus clustering label obtained from MCLA.
"""
np.random.seed(random_state)
# Construct Meta-graph
H = create_hypergraph(labels)
n_cols = H.shape[1]
W = sparse.identity(n_cols, dtype=float, format='lil')
for i in range(n_cols):
hi = H.getcol(i)
norm_hi = (hi.T * hi)[0, 0]
for j in range(n_cols):
if i < j:
hj = H.getcol(j)
norm_hj = (hj.T * hj)[0, 0]
inner_prod = (hi.T * hj)[0, 0]
W[i, j] = inner_prod / (norm_hi + norm_hj - inner_prod)
W[j, i] = W[i, j]
W *= 1e3
W = W.astype(int)
# Cluster Hyperedges
xadj, adjncy, eweights = to_pymetis_format(W)
membership = pymetis.part_graph(
nparts=nclass, xadj=xadj, adjncy=adjncy, eweights=eweights)[1]
# Collapse Meta-clusters
meta_clusters = sparse.dok_matrix(
(labels.shape[1], nclass), dtype=float).tolil()
for i, v in enumerate(membership):
meta_clusters[:, v] += H.getcol(i)
# Compete for Objects
label_ce = np.empty(labels.shape[1], dtype=int)
for i, v in enumerate(meta_clusters):
v = v.toarray()[0]
label_ce[i] = np.random.choice(np.nonzero(v == np.max(v))[0])
return label_ce
def hbgf(labels, nclass):
"""Hybrid Bipartite Graph Formulation (HBGF).
Parameters
----------
labels: Labels generated by multiple clustering algorithms such as K-Means.
nclass: Number of classes in a consensus clustering label.
Return
-------
label_ce: Consensus clustering label obtained from HBGF.
"""
A = create_hypergraph(labels)
n_rows, n_cols = A.shape
W = sparse.bmat([[sparse.dok_matrix((n_cols, n_cols)), A.T],
[A, sparse.dok_matrix((n_rows, n_rows))]])
xadj, adjncy, _ = to_pymetis_format(W)
membership = pymetis.part_graph(
nparts=nclass, xadj=xadj, adjncy=adjncy, eweights=None)[1]
label_ce = np.array(membership[n_cols:])
return label_ce
def create_connectivity_matrix(labels):
"""Create the connectivity matrix.
Parameter
---------
labels: Labels generated by multiple clustering algorithms such as K-Means.
Return
------
M: Connectivity matrix.
"""
n_labels, len_labels = labels.shape
M = np.zeros((len_labels, len_labels))
m = np.zeros_like(M)
for label in labels:
for i, elem in enumerate(label):
m[i] = np.where(elem == label, 1, 0)
M += m
M /= n_labels
return sparse.csr_matrix(M)
def orthogonal_nmf_algorithm(W, nclass, random_state, maxiter):
"""Algorithm for bi-orthogonal three-factor NMF problem.
Parameters
----------
W: Given matrix.
random_state: Used for reproducible results.
maxiter: Maximum number of iterations.
Return
-------
Q, S: Factor matrices.
"""
np.random.seed(random_state)
n = W.shape[0]
Q = np.random.rand(n, nclass).reshape(n, nclass)
S = np.diag(np.random.rand(nclass))
for _ in range(maxiter):
# Update Q
WQS = safe_sparse_dot(W, np.dot(Q, S), dense_output=True)
Q = Q * np.sqrt(WQS / (np.dot(Q, np.dot(Q.T, WQS)) + 1e-8))
# Update S
QTQ = np.dot(Q.T, Q)
WQ = safe_sparse_dot(W, Q, dense_output=False)
QTWQ = safe_sparse_dot(Q.T, WQ, dense_output=True)
S = S * np.sqrt(QTWQ / (np.dot(QTQ, np.dot(S, QTQ)) + 1e-8))
return Q, S
def nmf(labels, nclass, random_state, maxiter=200):
"""NMF-based consensus clustering.
Parameters
----------
labels: Labels generated by multiple clustering algorithms such as K-Means.
nclass: Number of classes in a consensus clustering label.
random_state: Used for reproducible results.
maxiter: Maximum number of iterations.
Return
-------
label_ce: Consensus clustering label obtained from NMF.
"""
M = create_connectivity_matrix(labels)
Q, S = orthogonal_nmf_algorithm(M, nclass, random_state, maxiter)
label_ce = np.argmax(np.dot(Q, np.sqrt(S)), axis=1)
return label_ce
def calc_objective(labels, label_ce):
"""Calculate the objective function value for cluster ensembles.
Parameters
----------
labels: Labels generated by multiple clustering algorithms such as K-Means.
label_ce: Consensus clustering label.
Return
-------
objv: Objective function value.
"""
objv = 0.0
for label in labels:
idx = np.isfinite(label)
objv += normalized_mutual_info_score(
label_ce[idx], label[idx], average_method='geometric')
objv /= labels.shape[0]
return objv
def cluster_ensembles(
labels: np.ndarray,
nclass: Optional[int] = None,
solver: str = 'hbgf',
random_state: Optional[int] = None,
verbose: bool = False) -> np.ndarray:
"""Generate a single consensus clustering label by using base labels
obtained from multiple clustering algorithms.
Parameters
----------
labels: Labels generated by multiple clustering algorithms such as K-Means.
nclass: Number of classes in a consensus clustering label.
solver: Solver type for cluster ensembles.
random_state: Used for 'hgpa', 'mcla', and 'nmf'.
Please pass a nonnegative integer for reproducible results.
verbose: Whether to be verbose.
Return
-------
label_ce: Consensus clustering label.
"""
if nclass is None:
nclass = -1
for label in labels:
len_unique_label = len(np.unique(label[~np.isnan(label)]))
nclass = max(nclass, len_unique_label)
if verbose:
print('Cluster Ensembles')
print(' - Number of classes:', nclass)
print(' - Solver:', solver)
print(' - Length of labels:', labels.shape[1])
print(' - Number of labels:', labels.shape[0])
if not (isinstance(nclass, int) and nclass > 0):
raise ValueError(
f'Number of class must be a positive integer; got (nclass={nclass})')
if not ((random_state is None) or isinstance(random_state, int)):
raise ValueError(
f'Number of random_state must be an integer; got (random_state={random_state})')
if isinstance(random_state, int):
random_state = abs(random_state)
if solver == 'cspa':
if labels.shape[1] > 5000:
warnings.warn(
'The length of labels is very large, so another solvers are recommended.')
label_ce = cspa(labels, nclass)
elif solver == 'hgpa':
label_ce = hgpa(labels, nclass, random_state)
elif solver == 'mcla':
label_ce = mcla(labels, nclass, random_state)
elif solver == 'hbgf':
label_ce = hbgf(labels, nclass)
elif solver == 'nmf':
label_ce = nmf(labels, nclass, random_state)
elif solver == 'all':
if verbose:
print(' - ANMI:')
ce_solvers = {'hgpa': hgpa, 'mcla': mcla, 'hbgf': hbgf}
if labels.shape[1] <= 5000:
ce_solvers['cspa'] = cspa
ce_solvers['nmf'] = nmf
best_objv = None
for name, ce_solver in ce_solvers.items():
if ce_solver == cspa or ce_solver == hbgf:
label = ce_solver(labels, nclass)
else:
label = ce_solver(labels, nclass, random_state)
objv = calc_objective(labels, label)
if verbose:
print(' -', name, ':', objv)
if best_objv is None:
best_objv = objv
best_solver = name
label_ce = label
if best_objv < objv:
best_objv = objv
best_solver = name
label_ce = label
if verbose:
print(' - Best solver:', best_solver)
else:
raise ValueError(
f"Invalid solver parameter: Got '{solver}' instead of \
one of ('cspa', 'hgpa', 'mcla', 'hbgf', 'nmf', 'all')")
return label_ce
| 29.609113 | 92 | 0.617802 |
b53ecaf1c7b47785375a731caa715e1de1706083 | 13,025 | py | Python | worker/workers/__init__.py | nnamon/worker | 3d2a2c659a58a031eccd31e6f05148c7e146964b | [
"BSD-2-Clause"
] | 8 | 2016-08-20T23:39:33.000Z | 2020-11-06T22:46:31.000Z | worker/workers/__init__.py | nnamon/worker | 3d2a2c659a58a031eccd31e6f05148c7e146964b | [
"BSD-2-Clause"
] | null | null | null | worker/workers/__init__.py | nnamon/worker | 3d2a2c659a58a031eccd31e6f05148c7e146964b | [
"BSD-2-Clause"
] | 6 | 2016-08-21T13:15:27.000Z | 2020-11-06T22:46:43.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import contextlib
import os
import pickle
import signal
import socket
import tempfile
import time
import paramiko
import stopit
import subprocess32 as subprocess
import tracer
from farnsworth.models import TracerCache, ChallengeBinaryNode
from rex.trace_additions import FormatInfoIntToStr, FormatInfoStrToInt, FormatInfoDontConstrain
import worker.log
LOG = worker.log.LOG.getChild('workers')
class CRSTracerCacheManager(tracer.cachemanager.CacheManager):
"""CRSTracerCacheManager
This class manages tracer caches for a given worker. Under-the-hood tracer
will call into this code to both load and store caches.
"""
def __init__(self, concrete_flag=False, atoi_flag=False):
super(self.__class__, self).__init__()
self.log = worker.log.LOG.getChild('cachemanager')
self.cs = None
self.concrete_flag = concrete_flag
self.atoi_flag = atoi_flag
def cache_lookup(self):
# Might better be a property?
if self.cs is not None:
try:
cached = TracerCache.get(TracerCache.cs == self.cs, TracerCache.concrete_flag == self.concrete_flag,
TracerCache.atoi_flag == self.atoi_flag)
self.log.debug("Loaded tracer state from cache for %s", self.cs.name)
return pickle.loads(str(cached.blob))
except TracerCache.DoesNotExist:
self.log.debug("No cached states found for %s", self.cs.name)
else:
self.log.warning("cachemanager's cs was never set, no cache to retrieve")
def cacher(self, simstate):
if self.cs is not None:
cache_data = self._prepare_cache_data(simstate)
if cache_data is not None:
self.log.info("Caching tracer state for challenge %s", self.cs.name)
TracerCache.create(cs=self.cs, blob=cache_data, concrete_flag=self.concrete_flag,
atoi_flag=self.atoi_flag)
else:
self.log.warning("ChallengeSet was never initialized cannot cache")
class AtoiManager(object):
"""AtoiManager
This class provides utilities for parsing symbols to atoi infos
for use in workers such as Rex
"""
def __init__(self):
pass
@staticmethod
def symbol_to_format_info(addr, symbol):
# picks the correct format info from a symbol
if symbol.startswith("atoi"):
allows_negative = "_no_signs" not in symbol
return FormatInfoStrToInt(addr, symbol, str_arg_num=0, base=10,
base_arg=None, allows_negative=allows_negative)
if symbol.startswith("based_atoi"):
allows_negative = "signed" in symbol
return FormatInfoStrToInt(addr, symbol, str_arg_num=0, base=int(symbol.split("_")[-1]),
base_arg=None, allows_negative=allows_negative)
if symbol == "int2str" or symbol == "uint2str":
return FormatInfoIntToStr(addr, symbol, int_arg_num=2, str_dst_num=0, base=10, base_arg=None)
if symbol == "int2str_v2" or symbol == "uint2str_v2":
return FormatInfoIntToStr(addr, symbol, int_arg_num=0, str_dst_num=1, base=10, base_arg=None)
if symbol == "int2str_v3" or symbol == "uint2str_v3":
return FormatInfoIntToStr(addr, symbol, int_arg_num=1, str_dst_num=0, base=10, base_arg=None)
if symbol.startswith("strtol"):
return FormatInfoStrToInt(addr, symbol, str_arg_num=0, base=None, base_arg=2,
allows_negative=True)
if symbol == "printf":
return FormatInfoDontConstrain(addr, symbol, check_symbolic_arg=0)
if symbol == "fdprintf":
return FormatInfoDontConstrain(addr, symbol, check_symbolic_arg=1)
return None
@staticmethod
def get_atoi_info(symbols):
# gets the list of atoi infos for a dictionary of symbols
infos = []
for addr, symbol in symbols.items():
atoi_info = AtoiManager.symbol_to_format_info(addr, symbol)
if atoi_info is not None:
infos.append(atoi_info)
return infos
class Worker(object):
def __init__(self):
LOG.debug("Creating Worker")
# Tracer cache set up for every job in case they use tracer
self.tracer_cache = CRSTracerCacheManager()
tracer.tracer.GlobalCacheManager = self.tracer_cache
self._job = None
self._cbn = None
self._cs = None
def _run(self, job):
raise NotImplementedError("Worker must implement _run(self, job)")
def run(self, job):
# Set up job, cs, cbn, and tracer cache
self._job = job
self._cs = job.cs
self._cbn = job.cbn
self.tracer_cache.cs = self._cs
if self._cs is not None:
if self._cbn is None and not job.cs.is_multi_cbn:
self._cbn = self._cs.cbns_original[0]
self._run(job)
class VMWorker(Worker):
def __init__(self, disk="/data/cgc-vm.qcow2", kvm_timeout=5, restrict_net=False, sandbox=True,
snapshot=True, ssh_port=8022, ssh_username="root",
ssh_keyfile="/home/angr/.ssh/id_rsa", ssh_timeout=30, vm_name=None):
super(VMWorker, self).__init__()
self._disk = disk
self._kvm_timeout = kvm_timeout
self._restrict_net = 'on' if restrict_net else 'off'
self._sandbox = 'on' if sandbox else 'off'
self._snapshot = 'on' if snapshot else 'off'
self._ssh_keyfile = ssh_keyfile
self._ssh_port = ssh_port
self._ssh_timeout = ssh_timeout
self._ssh_username = ssh_username
self._vm_name = vm_name if vm_name is not None else "cgc"
self._vm_pidfile = None
def __del__(self):
"""Clean-up method for VMWorker.
The VMWorker spawns up a VM that might still be running when the
worker is garbage-collected, which is something that we should
clean up. If the VM is running, try to kill it, at best effort.
"""
if self._vm_pidfile is not None:
if self.vm_pid is not None:
os.kill(self.vm_pid, signal.SIGKILL)
self._vm_pidfile.close()
@property
def vm_pid(self): # locally bound to pidfile file handle
self._vm_pidfile.seek(0)
pid_ = self._vm_pidfile.read()
if pid_:
return int(pid_)
def _bootup_vm(self, cores, memory):
"""Boot up the VM as, internal helper funtion.
Note that it opens temporarily file as self._vm_pidfile.
"""
LOG.debug("Spawning up VM to run jobs within")
drive = "file={0._disk},media=disk,discard=unmap,snapshot={0._snapshot},if=virtio".format(self)
netdev = ("user,id=fakenet0,net=172.16.6.0/24,restrict={0._restrict_net},"
"hostfwd=tcp:127.0.0.1:{0._ssh_port}-:22,").format(self)
self._vm_pidfile = tempfile.NamedTemporaryFile(mode='r', prefix="worker-vm", suffix="pid")
kvm_command = ["kvm", "-name", self._vm_name,
"-sandbox", self._sandbox,
"-machine", "pc-i440fx-1.7,accel=kvm,usb=off",
"-cpu", "SandyBridge",
"-smp", "{}".format(cores),
"-m", "{}M".format(memory),
"-snapshot",
"-drive", drive,
"-netdev", netdev,
"-net", "nic,netdev=fakenet0,model=virtio",
"-daemonize",
"-pidfile", self._vm_pidfile.name,
"-vnc", "none"]
try:
kvm_process = subprocess.Popen(kvm_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error("Is KVM installed? Popen raised %s", e)
raise EnvironmentError("Unable to start VM, KVM process failed %s", e)
stdout, stderr = None, None
try:
stdout, stderr = kvm_process.communicate(timeout=self._kvm_timeout)
LOG.debug("stdout: %s", stdout)
LOG.debug("stderr: %s", stderr)
except subprocess.TimeoutExpired:
LOG.error("VM did not start within %s seconds, killing it", self._kvm_timeout)
LOG.debug("stdout: %s", stdout)
LOG.debug("stderr: %s", stderr)
kvm_process.terminate()
if self.vm_pid is not None:
os.kill(self.vm_pid, signal.SIGTERM)
LOG.warning("5 seconds grace period before forcefully killing VM")
time.sleep(5)
kvm_process.kill()
if self.vm_pid is not None:
os.kill(self.vm_pid, signal.SIGKILL)
raise EnvironmentError("KVM start did not boot up properly")
def _wait_for_ssh(self):
LOG.debug("Waiting for SSH to become available from worker")
not_reachable = True
try:
# ThreadingTimeout does not work with PyPy, using signals instead
with stopit.SignalTimeout(self._ssh_timeout, swallow_exc=False):
while not_reachable:
try:
connection = socket.create_connection(("127.0.0.1", self._ssh_port))
not_reachable = False
connection.close()
except socket.error as e:
LOG.debug("Unable to connect just yet, sleeping")
time.sleep(1)
except stopit.TimeoutException:
LOG.error("SSH did not become available within %s seconds.", self._ssh_timeout)
raise EnvironmentError("SSH did not become available")
def _initialize_ssh_connection(self):
LOG.debug("Connecting to the VM via SSH")
self.ssh = paramiko.client.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
try:
self.ssh.connect("127.0.0.1", port=self._ssh_port, username=self._ssh_username,
key_filename=self._ssh_keyfile, timeout=self._ssh_timeout)
# Set TCP Keep-Alive to 5 seconds, so that the connection does not die
transport = self.ssh.get_transport()
transport.set_keepalive(5)
# also raises BadHostKeyException, should be taken care of via AutoAddPolicy()
# also raises AuthenticationException, should never occur because keys are provisioned
except socket.error as e:
LOG.error("TCP error connecting to SSH on VM.")
raise e
except paramiko.SSHException as e:
LOG.error("SSH error trying to connect to VM.")
raise e
def execute(self, command):
assert self.ssh is not None
environment = " ".join("{}='{}'".format(k, v) for k, v in os.environ.items()
if k.startswith("POSTGRES"))
env_command = "{} {}".format(environment, command)
LOG.debug("Executing command: %s", env_command)
stdout_content = None
stderr_content = None
try:
_, stdout, stderr = self.ssh.exec_command(env_command)
exit_status = stdout.channel.recv_exit_status()
if exit_status != 0:
raise paramiko.SSHException("'%s' failed with exit status %d", command, exit_status)
stdout_content = stdout.read()
stderr_content = stderr.read()
except paramiko.SSHException as e:
LOG.error("Unable to excute command '%s' on host: %s", command, e)
LOG.debug("stdout: %s", stdout.read())
LOG.debug("stderr: %s", stderr.read())
raise e
return stdout_content, stderr_content
@contextlib.contextmanager
def vm(self, cores, memory):
self._bootup_vm(cores, memory)
self._wait_for_ssh()
self._initialize_ssh_connection()
LOG.debug("Setting up route to database etc.")
self.execute("ip r add default via 172.16.6.2")
LOG.debug("Passing control over to the Worker")
yield
LOG.debug("Worker finished, cleaning up SSH connection and VM")
self.ssh.close()
if self.vm_pid is not None:
# We do not care about the state of the VM anymore, and can -9 it instead of -15
os.kill(self.vm_pid, signal.SIGKILL)
self._vm_pidfile.close()
# If not set to None, deconstructor will try to kill the VM and remove the file
self._vm_pidfile = None
def run(self, job):
try:
with self.vm(job.limit_cpu, job.limit_memory - 512):
super(VMWorker, self).run(job)
except EnvironmentError as e:
LOG.error("Error preparing VM for execution: %s", e)
| 41.613419 | 116 | 0.605067 |
0b5d802c7810eb09cbd5cd9daa6444f174e6054c | 2,915 | py | Python | fsc_delta.py | vtlim/GLIC | 90e00e7030748c70ad284cda8785745b6c16ecbb | [
"MIT"
] | 1 | 2019-08-22T06:43:23.000Z | 2019-08-22T06:43:23.000Z | fsc_delta.py | vtlim/GLIC | 90e00e7030748c70ad284cda8785745b6c16ecbb | [
"MIT"
] | null | null | null | fsc_delta.py | vtlim/GLIC | 90e00e7030748c70ad284cda8785745b6c16ecbb | [
"MIT"
] | null | null | null |
import natsort
import glob
import xmltodict
import matplotlib.pyplot as plt
import numpy as np
def plot_format(plt, delta=True):
plt.legend(fontsize=16, loc=1) # discrete
# plt.legend(fontsize=16, loc=1, ncol=2) # diverging
# set plot limits
# plt.ylim(-0.1, 1.1)
# set figure size
plt.gcf().set_size_inches(18,6)
# add grid
ax = plt.gca()
ax.grid(axis='y', linewidth=0.5)
# increase tick font size
ax.tick_params(axis='both', which='major', labelsize=16)
# add extra details for spatial freq ticks excluding x=-0.1,0,0.7
lp=0
deets = ['(1/10.0)','(1/5.00)','(1/3.33)','(1/2.50)','(1/2.00)','(1/1.67)']
if not delta:
for i, xpos in enumerate(ax.get_xticks()[2:8]):
ax.text(xpos, -0.30, deets[i], size = 12, ha = 'center')
lp=20
# add x-axis label
plt.xlabel("spatial frequency ($\AA^{-1}$)", fontsize=18, labelpad=lp)
return plt, ax
# get list of structures
files = glob.glob('*.xml')
files.sort(key=natsort.natural_keys)
# assume x-locations are the same across all files (!!)
yarray = []
labels = []
# iterate over each
for f in files:
# parse xml file
print(f)
with open(f) as fopen:
fdict = xmltodict.parse(fopen.read())
#import pprint
#pprint.pprint(dict(fdict))
# organize coordinates
xlist = [float(c['x']) for c in fdict['fsc']['coordinate']]
ylist = [float(c['y']) for c in fdict['fsc']['coordinate']]
# store data
yarray.append(ylist)
labels.append(f.split("_")[-1].split(".")[0]) # prefix_label.xml
# define plot colors
colors = plt.cm.tab10(np.linspace(0, 1, 10)) # discrete
#colors = plt.cm.coolwarm_r(np.linspace(0, 1, len(yarray))) # diverging
colors = np.vstack( ([0, 0, 0, 1], plt.cm.Paired(np.linspace(0, 1, 13))) ) # paired except first
# plot fsc curves
for i, (y, l) in enumerate(zip(yarray, labels)):
plt.plot(xlist, y, label=l, c=colors[i])
plt, ax = plot_format(plt, delta=False)
# find resolution at y=0.143 -- TODO: needs interpolation else this just finds closest y-point
#idx = (np.abs(np.array(y)-0.143)).argmin()
ax.axhline(0.143, color='grey', ls='--', lw=0.5)
#ax.axvline(xlist[idx], color='grey', ls='--', lw=0.5)
#plt.text(x=-0.025, y=-0.065, s='{} $\AA$\n({:4.3f}, {})'.format(round(1/xlist[idx], 2), xlist[idx], 0.143))
plt.ylabel('correlation', rotation=0, ha='right', fontsize=18)
plt.savefig('fsc.png', bbox_inches='tight')
plt.show()
# plot delta fsc curves (ex. y4-y3)
# todo: probably can combine with for loop above
for i, (y, l) in enumerate(zip(yarray, labels)):
if i==0:
dy = [0]*len(xlist)
else:
dy = np.array(y)-np.array(yarray[i-1])
plt.plot(xlist, dy, label=l, c=colors[i])
plt, ax = plot_format(plt)
plt.ylabel('change in\ncorrelation', rotation=0, ha='right', fontsize=18)
plt.savefig('fsc_delta.png', bbox_inches='tight')
plt.show()
| 29.15 | 108 | 0.622985 |
07fb02b354f4ccfaab484620b6f728bebff64b12 | 8,381 | py | Python | Main.py | SebasJin/Investo | 376ec98de094f9c7f507d2f216e8443e02b048a3 | [
"MIT"
] | null | null | null | Main.py | SebasJin/Investo | 376ec98de094f9c7f507d2f216e8443e02b048a3 | [
"MIT"
] | 5 | 2021-05-07T17:50:16.000Z | 2021-06-25T00:48:31.000Z | Main.py | SebasJin/Investo | 376ec98de094f9c7f507d2f216e8443e02b048a3 | [
"MIT"
] | null | null | null | # Import
import requests # Downloads Required Stock Data
import json # stores UserData and others
import os # ya just need it
from os import path
from time import sleep
from datetime import datetime
global url
url = ''
global user_info
user_info = []
global type
type = ''
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
UserChoice = ''
intro_program_message = """
"""+ Color.YELLOW + """------- + Investo by SebasJin + -------""" + Color.END + """
Investo is a free collection of services to help you obtain valuable data on investing
FYI, Investo has not been ported to windows and only works on most linux distributions
This service doesn't mean to serve as investment advice, and is ONLY for Educational purposes
* In order to begin this program, you must obtain a free Alphavantage api key and setup a Username *
* This must be done by going to the website https://www.alphavantage.co/support/#api-key *
""" + Color.RED + """ULTRA IMPORTANT, This program can be read by anyone, There is NO ENCRYPTION Or Password System, and
Only one user is supported, so copy this program into others home directory if they want to use it. \n\n\n""" + Color.END
def intro(): # checks to see if userdata is placed, and if not, then run through a intro process, else, pass
if path.exists("userdata.json"):
with open('userdata.json') as fp:
user_info = json.load(fp)
print(intro_program_message)
print('\n\n ' + Color.CYAN + 'You are being logged in as ' + user_info[0],
'and with an api key of ' + user_info[1] + Color.END)
if not path.exists('tmpdata'):
os.mkdir('tmpdata')
else: # gives info, sets up username and api key, load to file for persistence
print(intro_program_message)
user_info = []
usernameinput = input(
'\nBy creating a username you have acknowledged the terms: ')
user_info.append(usernameinput)
print('\n' * 90 + ' Hello ' + user_info[0])
print("""
As mentioned before, please go to https://www.alphavantage.co/support/#api-key and get a free API key
Please type it in correctly, if not please delete the file userdata.txt and create a username again
""")
apikey = input('\nPlease Enter your api key: ')
user_info.append(apikey)
endintromssg = """
This is your given user info is {0} and an api key of {1}
In 5 seconds this program will end, please run it once more it to use it.
"""
fp = open('userdata.json', 'w')
json.dump(user_info, fp)
fp.close()
if not path.exists('tmpdata'):
os.mkdir('tmpdata')
print(endintromssg.format(user_info[0], user_info[1]))
sleep(5)
quit()
def informational_message(): ## Need to optimize message (the bold things), use {} and .format along with print(' ' *40 to make spaces)
print("""
|----------------------------------------------------------------------------------------------------------------------|
| """ + Color.BOLD + Color.GREEN + Color.UNDERLINE + """-INFO-""" + Color.END +
""" |
|----------------------------------------------------------------------------------------------------------------------|
With your API key, you may now begin to gather financial date. Note: You may request data only up to 5 times per minute
Here is a list of the programs built-in functions.
Typing in 'help' will show more information on this program, check it out if you are new.
Typing in 'q' or 'quit' will end the program. You can also use the Control + c command if something wacky happens
Typing in 'stocklookup' will prompt you to enter in a stock ticker which will give a brief summary of it's fundamentals
and provide a json file which can be used for more thorough analysis. Typing in a '-m' after 'stocklookup' will now
print more of the json file of fundamentals onto your console screen.
Typing in 'stockprice' will prompt you to enter a stock ticker and give you it's current/last price. That's all
NOTE: MORE FUNCTIONALITY WILL BE ADDED LATER, and when entering a ticker symbol, uppercase is not required
|----------------------------------------------------------------------------------------------------------------------|
|----------------------------------------------------------------------------------------------------------------------|\n\n\n
""")
def stocklookup():
with open('userdata.json') as fp:
user_info = json.load(fp)
type = 'Overview'
stockstart()
url = 'https://www.alphavantage.co/query?function=OVERVIEW&symbol=' + ticker + '&apikey=' + user_info[1]
downloader(url)
print(Color.RED + '\nYou are obtaining stock fundamentals for ' + ticker +
'. Please ensure that your internet is functional.' + Color.END)
def stockprice():
type = 'dailyprice'
with open('userdata.json') as fp:
user_info = json.load(fp)
stockstart()
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=' + ticker + '&apikey=' + user_info[1]
# NEED TO CHANGE URL
downloader(url)
print(Color.RED + '\nYou are obtaining stocpricess for ' + ticker +
'. Please ensure that your internet is functional.' + Color.END)
def stockstart():
global ticker
ticker = ''
ticker = input(Color.BLUE + 'Enter Your Desired Ticker Symbol: ' + Color.END)
ticker = ticker.upper()
filetype()
def filetype():
global filename
print('I should be getting the ticker right?')
print(ticker)
tempfilename = []
now = datetime.now()
dt_string = now.strftime("%d-%m-%Y %H:%M:%S")
tempfilename.append(', ')
tempfilename.append(dt_string)
tempfilename.append(', ')
tempfilename.append(ticker)
tempfilename.append(type)
filename = ''.join(tempfilename)
def downloader(url):
print('\n\nYour url is ' + url)
request = requests.get(url, allow_redirects=True)
print(request.headers.get('content-type'))
myfile = requests.get(url)
open(os.getcwd() + '/tmpdata/data' + filename + '.json', 'wb').write(myfile.content)
intro()
while True:
informational_message()
UserChoice = input(Color.BLUE + 'Enter Your desired command, exculde the ticker symbol: ' + Color.END)
if UserChoice == 'stocklookup':
stocklookup()
if UserChoice == 'stockprice':
stockprice()
if UserChoice == 'quit':
quit()
if UserChoice == 'q':
quit()
if UserChoice == 'help':
print("""For Further Information, look for the file README.md and open it up.
If you don\'t have it, go to https://github.com/SebasJin/Investo, and read the README.md file. """)
# NEED TO ADD MORE FUNCTIONALITY,
"""
NEAR GOALS (kinda like a roadmap?)
: ADD MORE COMMANDS
: ADD METHOD OF GETTING STOCK PRICE EASILY
MIDDLE GOALS:
: ADD METHOD OF OPENING DOWNLOADED FILES, AND DISPLAYING LITLE PORTIONS OF IT AS A PREVIEW
EX: IF I WERE TO TYPE IN stocklookup, AND THEN ENTER IN ibm AS MY TICKER, I WOULD LIKE FOR THE PROGRAM TO
PRINT OUT INFORMATION SUCH AS THE PE RATION/EPS/ETC IN LITTLE BITE SIZED PEICES.
: GET PROPER DOCUMENTATION
Ideas: Split project into 2,
1st project is that you use this program to gather information on macreconomics. Ex. I want to be able to
gather data on the financial sector and see when it is most and least profitable, when it is more and less volatile
Ex2. I want to be able to see when the construction sector is more and least profitable, and see when the cashflow
high and low, and when it is least profitable
Evantuall, I want to be able to graph it out for each sector and find corralations between the two
2nd Project, make discord bot and use api to buy and sell make money and stocks
"""
| 38.444954 | 136 | 0.602076 |
9f082edb12649c9101c0693ef97b0b714b07bb7b | 3,794 | py | Python | src/sdi_pandas/oneHotEncoding/oneHotEncoding.py | thhapke/DI_Pandas | 7a9108007459260a30ea7ee404a76b42861c81c5 | [
"MIT"
] | 2 | 2020-01-02T19:54:46.000Z | 2020-03-09T08:49:33.000Z | src/sdi_pandas/oneHotEncoding/oneHotEncoding.py | thhapke/DI_Pandas | 7a9108007459260a30ea7ee404a76b42861c81c5 | [
"MIT"
] | null | null | null | src/sdi_pandas/oneHotEncoding/oneHotEncoding.py | thhapke/DI_Pandas | 7a9108007459260a30ea7ee404a76b42861c81c5 | [
"MIT"
] | 1 | 2020-03-28T22:53:16.000Z | 2020-03-28T22:53:16.000Z | import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 2, 3, 4, 5], 'col 2': [1, 2, 3, 4, 5], 'col3': [100, 200, 300, 400, 500]})
attributes = {'format': 'csv', 'name': 'DF_name'}
default_msg = api.Message(attributes={'name':'doit'},body = 'message')
callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'pandas': ''}
operator_description = "One Hot Encoding"
operator_description_long = "Transforms string(object) columns to categoricals by using 'pandas.get_dummies()"
add_readme = dict()
add_readme["References"] = r"""[pandas doc: get_dummies](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html)"""
training_cols = 'None'
config_params['training_cols'] = {'title': 'Training Columns', 'description': 'Training Columns', 'type': 'string'}
def process(msg) :
logger, log_stream = slog.set_logging('DEBUG')
# start custom process definition
prev_att = msg.attributes
df = msg.body
if not isinstance(df,pd.DataFrame) :
logger.error('Message body does not contain a pandas DataFrame')
raise TypeError('Message body does not contain a pandas DataFrame')
att_dict = dict()
att_dict['config'] = dict()
df = pd.get_dummies(df,prefix_sep='_',drop_first=True)
##############################################
# final infos to attributes and info message
##############################################
if df.empty :
logger.error('DataFrame is empty')
raise ValueError('DataFrame is empty')
att_dict['operator'] = 'selectDataFrame'
att_dict['name'] = prev_att['name']
att_dict['memory'] = df.memory_usage(deep=True).sum() / 1024 ** 2
att_dict['columns'] = str(list(df.columns))
att_dict['number_columns'] = df.shape[1]
att_dict['number_rows'] = df.shape[0]
example_rows = EXAMPLE_ROWS if att_dict['number_rows'] > EXAMPLE_ROWS else att_dict['number_rows']
for i in range(0,example_rows) :
att_dict['row_'+str(i)] = str([ str(i)[:10].ljust(10) for i in df.iloc[i, :].tolist()])
# end custom process definition
log = log_stream.getvalue()
msg = api.Message(attributes=att_dict,body=df)
return log, msg
inports = [{'name': 'input', 'type': 'message.DataFrame'}]
outports = [{'name': 'log', 'type': 'string'}, {'name': 'output', 'type': 'message.DataFrame'}]
def call_on_input(msg) :
log, msg = process(msg)
api.send(outports[0]['name'], log)
api.send(outports[1]['name'], msg)
#api.set_port_callback([inports[0]['name']], call_on_input)
def main() :
print('Test: Default')
api.set_port_callback([inports[0]['name']], call_on_input)
if __name__ == '__main__':
main()
#gs.gensolution(os.path.realpath(__file__), config, inports, outports)
| 33.280702 | 155 | 0.583026 |
eb065fed5d1c99cc19796e9808e2b46a781ca669 | 656 | py | Python | test/mustache.py | roualdes/TestBank | b1435294ab0be3eebab15868a7a88adb4a6a6de7 | [
"BSD-3-Clause"
] | 1 | 2020-07-27T23:17:33.000Z | 2020-07-27T23:17:33.000Z | test/mustache.py | roualdes/testbank | b1435294ab0be3eebab15868a7a88adb4a6a6de7 | [
"BSD-3-Clause"
] | 3 | 2019-06-18T22:23:33.000Z | 2022-01-22T05:21:40.000Z | test/mustache.py | roualdes/TestBank | b1435294ab0be3eebab15868a7a88adb4a6a6de7 | [
"BSD-3-Clause"
] | null | null | null | import json
import numpy as np
seed = #< SEEAD >#
id = '#< ID >#'
npint = np.iinfo(np.int)
if seed is None:
seed = np.random.randint(1, npint.max)
random.seed(seed)
#< #exercise >#
ex = "This problem has no key 'id' in the exercise schema."
output = json.dumps({'seed': seed,
'context': ex,
'questions': [],
'random': {}})
print(output)
#< /exercise >#
#< #solution >#
sol = "This problem's solution schema is OK."
output = json.dumps({'seed': seed,
'id': id,
'random': {},
'solution': sol})
print(output)
#< /solution >#
| 22.62069 | 59 | 0.501524 |
8e2557ca281f6ef3706e962149593c6e4d81fb67 | 820 | py | Python | auctions/apps.py | AH-SALAH/CS50W-commerce | 19663da14721b5fbadb691763d79d9ae66a40faa | [
"CNRI-Python"
] | null | null | null | auctions/apps.py | AH-SALAH/CS50W-commerce | 19663da14721b5fbadb691763d79d9ae66a40faa | [
"CNRI-Python"
] | null | null | null | auctions/apps.py | AH-SALAH/CS50W-commerce | 19663da14721b5fbadb691763d79d9ae66a40faa | [
"CNRI-Python"
] | null | null | null | from django.apps import AppConfig
from django.db.models.signals import post_delete, post_save, pre_save
class AuctionsConfig(AppConfig):
name = 'auctions'
def ready(self):
# importing model classes
from .models import Bid, Listing
# load it here to avoid 'apps aren't loaded yet err'
from .signals import post_save_bid_receiver, pre_save_listing_receiver, post_delete_listing_receiver
self.get_model('Bid')
self.get_model('Listing')
# registering signals with the model's string label
post_save.connect(post_save_bid_receiver, sender='auctions.Bid')
pre_save.connect(pre_save_listing_receiver, sender='auctions.Listing')
post_delete.connect(post_delete_listing_receiver,
sender='auctions.Listing')
| 35.652174 | 108 | 0.704878 |
0c262cfee9914341953e63fa3cb653bcb52d4c88 | 750 | py | Python | PornHub/PornHub/pipelines.py | dizhi333/WebHubBot | 1f643906727d64334fdd3770fb3694fcaddc59a6 | [
"MIT"
] | 5 | 2018-02-02T14:41:40.000Z | 2020-11-22T14:15:22.000Z | PornHub/PornHub/pipelines.py | 0xsb/WebHubBot | 012c16c62eeded18de7426cf68adc6c3d4fd9f8f | [
"MIT"
] | 3 | 2021-03-31T18:53:16.000Z | 2022-02-11T03:41:21.000Z | PornHub/PornHub/pipelines.py | 0xsb/WebHubBot | 012c16c62eeded18de7426cf68adc6c3d4fd9f8f | [
"MIT"
] | 3 | 2019-05-05T10:19:07.000Z | 2020-08-28T03:27:34.000Z | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from items import PornVideoItem
class PornhubMongoDBPipeline(object):
def __init__(self):
clinet = pymongo.MongoClient("localhost", 27017)
db = clinet["PornHub"]
self.PhRes = db["PhRes"]
def process_item(self, item, spider):
print 'MongoDBItem', item
""" 判断类型 存入MongoDB """
if isinstance(item, PornVideoItem):
print 'PornVideoItem True'
try:
self.PhRes.insert(dict(item))
except Exception:
pass
return item
| 26.785714 | 65 | 0.62 |
1f84ca4ff155fd421b90bd769f8d7d8f32a54934 | 3,496 | py | Python | src/data/clean_data.py | javierAraluce/ARAGAN | 5954cb8f5975b16b506ed33aaf842fc1cd715044 | [
"MIT"
] | 1 | 2022-02-02T20:06:15.000Z | 2022-02-02T20:06:15.000Z | src/data/clean_data.py | javierAraluce/ARAGAN | 5954cb8f5975b16b506ed33aaf842fc1cd715044 | [
"MIT"
] | null | null | null | src/data/clean_data.py | javierAraluce/ARAGAN | 5954cb8f5975b16b506ed33aaf842fc1cd715044 | [
"MIT"
] | null | null | null | import numpy as np
import os
from tqdm import tqdm
# path of training images
imgs_train_path = 'dataset/BDDA/training/camera_images/'
# path of training maps
maps_train_path = 'dataset/BDDA/training/gazemap_images_resized/'
# path of validation images
imgs_val_path = 'dataset/BDDA/validation/camera_images/'
# path of validation maps
maps_val_path = 'dataset/BDDA/validation/gazemap_images_resized/'
# path of test images
imgs_test_path = 'dataset/BDDA/test/camera_images/'
# path of test maps
maps_test_path = 'dataset/BDDA/test/gazemap_images_resized/'
def clean_dataset():
'''
Clean dataset
'''
cont_train = 0
cont_val = 0
cont_test = 0
data_names = [str(f) for f in os.listdir(imgs_train_path + 'all_images/') if f.endswith('.jpg')]
for image in tqdm(data_names):
seq, frame = image.split("_")
if not (os.path.exists(maps_train_path + 'all_images/' + seq + '_' + frame)):
cont_train += 1
# print(maps_train_path + 'all_images/' + seq + '_' + frame)
os.remove(imgs_train_path + 'all_images/' + seq + "_" + frame)
data_names = [str(f) for f in os.listdir(maps_train_path + 'all_images/') if f.endswith('.jpg')]
for image in tqdm(data_names):
seq, frame = image.split("_")
if not (os.path.exists(imgs_train_path + 'all_images/' + seq + '_' + frame)):
cont_train += 1
# print(imgs_train_path + 'all_images/' + seq + '_' + frame)
os.remove(maps_train_path + 'all_images/' + seq + '_' + frame)
data_names = [str(f) for f in os.listdir(imgs_val_path + 'all_images/') if f.endswith('.jpg')]
for image in tqdm(data_names):
seq, frame = image.split("_")
if not (os.path.exists(maps_val_path + 'all_images/'+ seq + "_pure_hm_" + frame)):
cont_val += 1
# print(maps_val_path + 'all_images/' + seq + "_pure_hm_" + frame)
os.remove(imgs_val_path + 'all_images/' + seq + "_" + frame)
data_names = [str(f) for f in os.listdir(maps_val_path + 'all_images/') if f.endswith('.jpg')]
for image in tqdm(data_names):
seq, trash1, trash2, frame = image.split("_")
if not (os.path.exists(imgs_val_path + 'all_images/' + seq + '_' + frame)):
cont_val += 1
# print(imgs_val_path + 'all_images/' + seq + '_' + frame)
os.remove(maps_val_path + 'all_images/' + seq + '_pure_hm_' + frame)
data_names = [str(f) for f in os.listdir(imgs_test_path + 'all_images/') if f.endswith('.jpg')]
for image in tqdm(data_names):
seq, frame = image.split("_")
if not (os.path.exists(maps_test_path + 'all_images/'+ seq + "_pure_hm_" + frame)):
cont_test += 1
print(maps_val_path + 'all_images/' + seq + "_pure_hm_" + frame)
os.remove(imgs_test_path + 'all_images/' + seq + "_" + frame)
data_names = [str(f) for f in os.listdir(maps_test_path + 'all_images/') if f.endswith('.jpg')]
for image in tqdm(data_names):
seq, trash1, trash2, frame = image.split("_")
if not (os.path.exists(imgs_test_path + 'all_images/' + seq + '_' + frame)):
# print(imgs_val_path + 'all_images/' + seq + '_' + frame)
os.remove(maps_test_path + 'all_images/' + seq + '_pure_hm_' + frame)
print('cont_train: ', cont_train)
print('cont_val: ', cont_val)
print('cont_test: ', cont_test)
if __name__ == "__main__":
clean_dataset() | 43.7 | 100 | 0.618421 |
83905f23090a09e0af4b9eb2b8054cc3e9765981 | 48 | py | Python | docker/distorm3/verify.py | ThisIsNotTheUserYouAreLookingFor/dockerfiles | f92673b0d15c457e4abe215cf260afbb5b25cf2e | [
"MIT"
] | 48 | 2018-12-12T12:18:09.000Z | 2022-03-05T02:23:42.000Z | docker/distorm3/verify.py | ThisIsNotTheUserYouAreLookingFor/dockerfiles | f92673b0d15c457e4abe215cf260afbb5b25cf2e | [
"MIT"
] | 7,201 | 2018-12-24T17:14:17.000Z | 2022-03-31T13:39:12.000Z | docker/distorm3/verify.py | ThisIsNotTheUserYouAreLookingFor/dockerfiles | f92673b0d15c457e4abe215cf260afbb5b25cf2e | [
"MIT"
] | 94 | 2018-12-17T10:59:21.000Z | 2022-03-29T12:59:30.000Z | import pefile
import distorm3
print("All OK!")
| 9.6 | 16 | 0.75 |
09e84d4dc4cb9affa10fd9d25029e2905e9ee26c | 2,129 | py | Python | fastlmmhpc/pyplink/altset_list/Consecutive.py | epiproject/FaST-LMM-HPC | 5d6df81268aeff19015194ab0718a9163b8d33af | [
"Apache-2.0"
] | 2 | 2019-12-10T09:55:40.000Z | 2019-12-11T20:58:10.000Z | fastlmmhpc/pyplink/altset_list/Consecutive.py | epiproject/FaST-LMM-HPC | 5d6df81268aeff19015194ab0718a9163b8d33af | [
"Apache-2.0"
] | null | null | null | fastlmmhpc/pyplink/altset_list/Consecutive.py | epiproject/FaST-LMM-HPC | 5d6df81268aeff19015194ab0718a9163b8d33af | [
"Apache-2.0"
] | null | null | null | import numpy as SP
import subprocess, sys, os.path
from itertools import *
from fastlmmhpc.pyplink.snpset import *
import math
class Consecutive(object): # implements ISnpSetList
"""
The sets should be every consecutive set of SNPs within a 2cM window of each user
(distance in cM is in the 3rd column of the bim file). As for the name of the set,
please make it <position-of-first-snp>@<position-of-middle-snp>@<position-of-last-snp>.
For 'middle' please break a tie to the first SNP.
"""
def __init__(self, bimFileName,cMWindow):
self.BimFileName = bimFileName
self.CMWindow = cMWindow
def addbed(self, bed):
return ConsecutivePlusBed(self,bed)
def copyinputs(self, copier):
copier.input(self.BimFileName)
#would be nicer if these used generic pretty printer
def __repr__(self):
return "Consecutive(bimFileName={0},bimFileName={1})".format(self.BimFileName,self.CMWindow)
class ConsecutivePlusBed(object): # implements ISnpSetListPlusBed
def __init__(self, spec, bed):
self.spec = spec
self.bed = bed
import pandas as pd
bimfields = pd.read_csv(self.spec.BimFileName,delimiter = '\s',usecols = (0,1,2,3),header=None,index_col=False,engine='python')
self.chrom = bimfields[0]
self.rs = bimfields[1]
self.cm = bimfields[2]
def __iter__(self):
startIndex=-1
endIndex=0 #one too far
while(True):
startIndex+=1
if startIndex >= len(self.rs):
return
while endIndex < len(self.rs) and self.chrom[startIndex] == self.chrom[endIndex] and self.cm[endIndex] - self.cm[startIndex] <= self.spec.CMWindow:
endIndex+=1
lastIndex = endIndex - 1;
midIndex = math.floor((startIndex+lastIndex)/2.0)
name = "{0}@{1}@{2}".format(startIndex,midIndex,lastIndex)
snpList=self.rs[range(startIndex,endIndex)]
yield SnpAndSetNamePlusBed(name,snpList,self.bed)
def __len__(self):
return len(self.rs)
| 33.793651 | 160 | 0.638328 |
fec0ec4650ac291cadb42a928141bd293bed3b89 | 7,615 | py | Python | examples/pwr_run/checkpointing/final_eval/random/job61.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/final_eval/random/job61.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/final_eval/random/job61.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.008
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_random/' + job_name + '*'
total_epochs = 11
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_random/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 32.682403 | 118 | 0.692974 |
01fba698973de3a6f6eb147c5866ed238fff9d4a | 7,543 | py | Python | Data_gen/data_process_audspec_7params_bpass_filter.py | Yashish92/MirrorNet-for-Audio-synthesizer-controls | 99145b7cb524ebb68e1991701f7f0c264ecb4996 | [
"MIT"
] | 1 | 2022-02-19T08:27:49.000Z | 2022-02-19T08:27:49.000Z | Data_gen/data_process_audspec_7params_bpass_filter.py | Yashish92/MirrorNet-for-Audio-synthesizer-controls | 99145b7cb524ebb68e1991701f7f0c264ecb4996 | [
"MIT"
] | null | null | null | Data_gen/data_process_audspec_7params_bpass_filter.py | Yashish92/MirrorNet-for-Audio-synthesizer-controls | 99145b7cb524ebb68e1991701f7f0c264ecb4996 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import matplotlib
SERVER = True # This variable enable or disable matplotlib, set it to true when you use the server!
if SERVER:
matplotlib.use('Agg')
import numpy as np
from scipy import signal
import os
import string
import librosa
import time
import random
import h5py
# import pyworld as pw
from tqdm import tqdm
import sys
# import matplotlib.pyplot as plt
import matplotlib
import matplotlib.pyplot as plt
import scipy.io.wavfile
from random_generation import get_f0, get_ap, get_random_h
import nsltools as nsl
import music_synthesize_new as music_syn
import json
import re
np.set_printoptions(threshold=sys.maxsize)
def generate_data(path_audio, path_params, audio_time=2, sampling_rate=8000, random=False): # changed audio time to 1 from 2
# path='./data/rahil_trial_data/'
# audio_time = 2
# sampling_rate = 22050
# random=True
# data_type = 'train' or 'cv'
# data path
no_of_params = 7 # 16 to 128
N = 5 # number of parameter samples across time
spec_len = 250
clean_path = path_audio # read Botinhao data from this path
directory = path_audio[:-1] + "_new.data" # save data to this path
if random:
directory = path_audio[:-1] + "_random" + "_new.data" # save data to this path
audio_len = np.int(audio_time * sampling_rate)
cur_sample = 0 # current sample generated
no_files = 0
# collect raw waveform and trim them into equal length, here is 32000 (16k * 2s)
spk_wav_tmp = np.zeros((100000, audio_len))
for (dirpath, dirnames, filenames) in tqdm(os.walk(clean_path)):
for files in tqdm(filenames):
if '.wav' in files:
file_no = int(re.split('_|\.',files)[2])
s_wav = clean_path + files
s_wav, s_sr = librosa.load(s_wav, sr=sampling_rate)
#print(file_no)
#chunk_num = len(s_wav) // audio_len
#extra_len = len(s_wav) % audio_len
#if extra_len > audio_len // 2:
# trim_len = (chunk_num + 1) * audio_len
#else:
# trim_len = chunk_num * audio_len
#if len(s_wav) < trim_len:
# if chunk_num == 0:
# spk_wav = np.concatenate([s_wav, np.zeros(trim_len - len(s_wav))])
# else:
# spk_wav = np.concatenate([s_wav[:chunk_num * audio_len], s_wav[-audio_len:]])
#elif len(s_wav) > trim_len:
# spk_wav = s_wav[:trim_len]
spk_wav = s_wav[:audio_len]
#print(spk_wav.shape)
spk_wav = np.array(spk_wav).reshape(-1, audio_len)
#print(spk_wav.shape)
spk_wav_tmp[file_no] = spk_wav
#cur_sample = cur_sample + spk_wav.shape[0]
no_files = no_files + 1
spk_wav_tmp = spk_wav_tmp[:no_files, :]
# spk_wav_tmp.shape= (Num of examples (N), 32000)
print(spk_wav_tmp.shape)
#print(spk_wav_tmp[3])
print('trim finished') # spk_wav_tmp.shape[0]=Number of files in data directory or Num of examples= N
param_array = np.zeros(7) # for music synthesizer preset
spk_tmp = np.zeros((spk_wav_tmp.shape[0], spk_wav_tmp.shape[1])) # raw speech with normalized power
h_tmp = np.zeros((spk_tmp.shape[0], no_of_params, N)) # ideal hiddens from world
spec_tmp_513 = np.zeros(
(spk_wav_tmp.shape[0], 128, spec_len)) # dimensions of the AudSpec - needs to be softcoded for scalability
spec_tmp_513_pw = np.zeros((spk_wav_tmp.shape[0], 128,
spec_len)) # dimensions of the Reconstructed AudSpec - needs to be softcoded for scalability
print(spec_tmp_513_pw.shape, 'spec_tmp_513_pw')
# create an array with parameters audio files
i = 0
for (dirpath, dirnames, filenames) in tqdm(os.walk(path_params)):
for files in tqdm(filenames):
if '.npy' in files:
if random:
file_no = int(re.split('_|\.',files)[3])
else:
file_no = int(re.split('_|\.',files)[2])
# print(dirnames)
param_file = path_params + files
# print(param_file)
loaded_params = np.load(param_file, allow_pickle=True)
# loaded_params, loaded_chars, loaded_audio = loaded["param"], loaded["chars"], loaded["audio"]
# print(loaded_params)
# param_array = gen_json_list(loaded_params)
# print(param_array)
# param_array = np.reshape(param_array, (6, 1)) # 16 to 128
# extended_arr = np.tile(param_array, (1, 126))
# print(extended_arr.shape)
# print(extended_arr)
h_tmp[file_no, :, :] = loaded_params.copy(order='C')
# h_tmp[i, :, :] = extended_arr # 16 to 128
i += 1
#print(h_tmp[2, :, :])
'''Parameters for AudSpectrogram'''
frmlen = 8
tc = 8
paras_c = [frmlen, tc, -2, np.log2(sampling_rate / 16000.0)]
# print(sampling_rate)
pad = 40
for i in tqdm(range(spk_wav_tmp.shape[0])):
# print("No of data: ", spk_wav_tmp.shape[0])
if i % 100 == 0:
print(i)
wav = spk_wav_tmp[i, :].copy().astype('float64')
wav = wav.reshape(-1)
# wav=nsl.unitseq(wav) #THis here causes the problem: RuntimeWarning: overflow encountered in exp
wav = wav / np.sqrt(np.sum(wav ** 2)) # power normalization
wav = nsl.unitseq(wav) # THis here causes the problem: RuntimeWarning: overflow encountered in exp
# #wav.shape=(32000,)
# if not random:
spk_tmp[i, :] = wav # this is saved
#spec513 = np.sqrt(nsl.wav2aud(wav, paras_c)) # audSpec
spec513 = nsl.wav2aud(wav, paras_c) # audSpec
# print (spec513.shape, 'spec513--line 116')
spec_tmp_513[i, :, 0:spec_len] = spec513.T # AudSpec
# print (spec_tmp_513[i,:,:])
dset = h5py.File(directory, 'w')
print(spk_tmp.shape)
spk_set = dset.create_dataset('speaker', shape=(spk_tmp.shape[0], spk_tmp.shape[1]), dtype=np.float64)
hid_set = dset.create_dataset('hidden', shape=(spk_tmp.shape[0], no_of_params, N), dtype=np.float64)
# spec513_set = dset.create_dataset('spec513', shape=(spk_tmp.shape[0], 513, 401), dtype=np.float64)
# spec_513_pw_set = dset.create_dataset('spec513_pw', shape=(spk_tmp.shape[0], 513, 401), dtype=np.float64)
spec513_set = dset.create_dataset('spec513', shape=(spk_tmp.shape[0], 128, spec_len), dtype=np.float64)
spec_513_pw_set = dset.create_dataset('spec513_pw', shape=(spk_tmp.shape[0], 128, spec_len), dtype=np.float64)
spk_set[:, :] = spk_tmp
hid_set[:, :, :] = h_tmp
# spec513_set[:,:,:] = d
spec513_set = []
spec_513_pw_set[:, :, :] = spec_tmp_513
dset.close()
print('finished')
if __name__ == "__main__":
if (len(sys.argv) == 3 or len(sys.argv) == 4) and sys.argv[1] != "-h":
if len(sys.argv) == 3:
generate_data(sys.argv[1], sys.argv[2])
elif sys.argv[3] == "random":
generate_data(sys.argv[1], sys.argv[2], random=True)
else:
print("We did not understand the second argument.")
else:
print("USAGE: python3", sys.argv[0], "<path to the wav data>")
# if __name__=="__main__":
# generate_data('./data/rahil_trial_data/')
| 39.082902 | 126 | 0.601087 |
129a7ac979700281256ea66f8839177308f3f2f5 | 4,816 | py | Python | python3/koans/about_class_attributes.py | mullikine/python_koans | 77fc56696cd9c5712ca9d5a28b03c4ca5691f7fe | [
"MIT"
] | 11 | 2016-07-15T14:14:34.000Z | 2020-09-16T06:10:21.000Z | python3/koans/about_class_attributes.py | mullikine/python_koans | 77fc56696cd9c5712ca9d5a28b03c4ca5691f7fe | [
"MIT"
] | 3 | 2016-02-19T13:07:16.000Z | 2019-09-24T22:26:28.000Z | python3/koans/about_class_attributes.py | mullikine/python_koans | 77fc56696cd9c5712ca9d5a28b03c4ca5691f7fe | [
"MIT"
] | 14 | 2016-01-16T09:58:03.000Z | 2021-06-29T16:38:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutClassMethods in the Ruby Koans
#
from runner.koan import *
class AboutClassAttributes(Koan):
class Dog:
pass
def test_objects_are_objects(self):
fido = self.Dog()
self.assertEqual(True, isinstance(fido, object))
def test_classes_are_types(self):
self.assertEqual(True, self.Dog.__class__ == type)
def test_classes_are_objects_too(self):
self.assertEqual(True, issubclass(self.Dog, object))
def test_objects_have_methods(self):
fido = self.Dog()
self.assertEqual(25, len(dir(fido)))
def test_classes_have_methods(self):
self.assertEqual(25, len(dir(self.Dog)))
def test_creating_objects_without_defining_a_class(self):
singularity = object()
self.assertEqual(22, len(dir(singularity)))
def test_defining_attributes_on_individual_objects(self):
fido = self.Dog()
fido.legs = 4
self.assertEqual(4, fido.legs)
def test_defining_functions_on_individual_objects(self):
fido = self.Dog()
fido.wag = lambda : 'fidos wag'
self.assertEqual('fidos wag', fido.wag())
def test_other_objects_are_not_affected_by_these_singleton_functions(self):
fido = self.Dog()
rover = self.Dog()
def wag():
return 'fidos wag'
fido.wag = wag
with self.assertRaises(AttributeError): rover.wag()
# ------------------------------------------------------------------
class Dog2:
def wag(self):
return 'instance wag'
def bark(self):
return "instance bark"
def growl(self):
return "instance growl"
@staticmethod
def bark():
return "staticmethod bark, arg: None"
@classmethod
def growl(cls):
return "classmethod growl, arg: cls=" + cls.__name__
def test_since_classes_are_objects_you_can_define_singleton_methods_on_them_too(self):
self.assertRegexpMatches(self.Dog2.growl(), 'cls=Dog2')
def test_classmethods_are_not_independent_of_instance_methods(self):
fido = self.Dog2()
self.assertRegexpMatches(fido.growl(), 'cls=Dog2')
self.assertRegexpMatches(self.Dog2.growl(), 'cls=Dog2')
def test_staticmethods_are_unbound_functions_housed_in_a_class(self):
self.assertRegexpMatches(self.Dog2.bark(), 'bark, arg: None')
def test_staticmethods_also_overshadow_instance_methods(self):
fido = self.Dog2()
self.assertRegexpMatches(fido.bark(), 'arg: None')
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def get_name_from_instance(self):
return self._name
def set_name_from_instance(self, name):
self._name = name
@classmethod
def get_name(cls):
return cls._name
@classmethod
def set_name(cls, name):
cls._name = name
name = property(get_name, set_name)
name_from_instance = property(get_name_from_instance, set_name_from_instance)
def test_classmethods_can_not_be_used_as_properties(self):
fido = self.Dog3()
with self.assertRaises(TypeError): fido.name = "Fido"
def test_classes_and_instances_do_not_share_instance_attributes(self):
fido = self.Dog3()
fido.set_name_from_instance("Fido")
fido.set_name("Rover")
self.assertEqual("Fido", fido.get_name_from_instance())
self.assertEqual("Rover", self.Dog3.get_name())
def test_classes_and_instances_do_share_class_attributes(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual("Fido", fido.get_name())
self.assertEqual("Fido", self.Dog3.get_name())
# ------------------------------------------------------------------
class Dog4:
def a_class_method(cls):
return 'dogs class method'
def a_static_method():
return 'dogs static method'
a_class_method = classmethod(a_class_method)
a_static_method = staticmethod(a_static_method)
def test_you_can_define_class_methods_without_using_a_decorator(self):
self.assertEqual('dogs class method', self.Dog4.a_class_method())
def test_you_can_define_static_methods_without_using_a_decorator(self):
self.assertEqual('dogs static method', self.Dog4.a_static_method())
# ------------------------------------------------------------------
def test_heres_an_easy_way_to_explicitly_call_class_methods_from_instance_methods(self):
fido = self.Dog4()
self.assertEqual('dogs class method', fido.__class__.a_class_method())
| 31.070968 | 92 | 0.623754 |
5e607e8cc1b41e3e37a4baad1aba26bfbe80629e | 4,990 | py | Python | bikeshed/wpt/wptElement.py | sideshowbarker/bikeshed | 1bdc5b25e90441d987cf722e1e1178f4dbb1ee92 | [
"CC0-1.0"
] | 1 | 2017-12-24T05:55:06.000Z | 2017-12-24T05:55:06.000Z | bikeshed/wpt/wptElement.py | toji/bikeshed | c9be1fd32a93cdf394ae251db3bbccc87c5e82a9 | [
"CC0-1.0"
] | null | null | null | bikeshed/wpt/wptElement.py | toji/bikeshed | c9be1fd32a93cdf394ae251db3bbccc87c5e82a9 | [
"CC0-1.0"
] | 1 | 2017-12-24T05:54:38.000Z | 2017-12-24T05:54:38.000Z | # -*- coding: utf-8 -*-
import io
from .. import config
from ..html import findAll, textContent, removeNode, E, addClass, appendChild, clearContents
from ..messages import *
def processWptElements(doc):
pathPrefix = doc.md.wptPathPrefix
atLeastOneElement = False
testData = None
# <wpt> elements
wptElements = findAll("wpt", doc)
seenTestNames = set()
for el in wptElements:
atLeastOneElement = True
if testData is None:
testData = loadTestData(doc)
testNames = testNamesFromEl(el, pathPrefix=pathPrefix)
for testName in testNames:
if testName not in testData:
die("Couldn't find WPT test '{0}' - did you misspell something?", testName, el=el)
continue
seenTestNames.add(testName)
createHTML(doc, el, testNames)
# <wpt-rest> elements
wptRestElements = findAll("wpt-rest", doc)
if wptRestElements and testData is None:
testData = loadTestData(doc)
if len(wptRestElements) > 1:
die("Only one <wpt-rest> element allowed per document, you have {0}.", len(wptRestElements))
wptRestElements = wptRestElements[0:1]
elif len(wptRestElements) == 1:
localPrefix = wptRestElements[0].get("pathprefix")
if localPrefix is not None:
pathPrefix = localPrefix
if pathPrefix is None:
die("Can't use <wpt-rest> without either a pathprefix="" attribute or a 'WPT Path Prefix' metadata.")
return
atLeastOneElement = True
prefixedNames = [p for p in testData if prefixInPath(pathPrefix, p) and p not in seenTestNames]
if len(prefixedNames) == 0:
die("Couldn't find any tests with the path prefix '{0}'.", pathPrefix)
return
createHTML(doc, wptRestElements[0], prefixedNames)
warn("<wpt-rest> is intended for debugging only. Move the tests to <wpt> elements next to what they're testing.")
else:
if pathPrefix:
if testData is None:
testData = loadTestData(doc)
checkForOmittedTests(pathPrefix, testData, seenTestNames)
if atLeastOneElement:
doc.extraStyles['style-wpt'] = wptStyle
def createHTML(doc, blockEl, testNames):
if doc.md.wptDisplay == "none":
removeNode(blockEl)
elif doc.md.wptDisplay == "inline":
blockEl.tag = "ul"
addClass(blockEl, "wpt-tests-block")
clearContents(blockEl)
for testName in testNames:
if ".https." in testName or ".serviceworker." in testName:
liveTestScheme = "https"
else:
liveTestScheme = "http"
_,_,lastNameFragment = testName.rpartition("/")
singleTestEl = E.li({"class": "wpt-test"},
E.a({"href": "https://wpt.fyi/results/"+testName}, lastNameFragment),
" ",
E.a({"title": testName, "href": "{0}://web-platform-tests.live/{1}".format(liveTestScheme, testName)}, E.small("(live test)")),
" ",
E.a({"href": "https://github.com/web-platform-tests/wpt/blob/master/"+testName}, E.small("(source)")))
appendChild(blockEl, singleTestEl)
else:
die("Programming error, uncaught WPT Display value in createHTML.")
def testNamesFromEl(el, pathPrefix=None):
testNames = []
localPrefix = el.get("pathprefix")
if localPrefix is not None:
pathPrefix = localPrefix
for name in [x.strip() for x in textContent(el).split("\n")]:
if name == "":
continue
testName = prefixPlusPath(pathPrefix, name)
testNames.append(testName)
return testNames
def prefixPlusPath(prefix, path):
# Join prefix to path, normalizing slashes
if path.startswith("/"):
return path[1:]
prefix = normalizePathSegment(prefix)
if prefix is None:
return path
return prefix + path
def prefixInPath(prefix, path):
if prefix is None:
return False
return path.startswith(normalizePathSegment(prefix))
def normalizePathSegment(pathSeg):
# No slash at front, yes slash at end
if pathSeg is None:
return None
if pathSeg.startswith("/"):
pathSeg = pathSeg[1:]
if not pathSeg.endswith("/"):
pathSeg += "/"
return pathSeg
def checkForOmittedTests(pathPrefix, testData, seenTestNames):
unseenTests = []
for testPath in testData.keys():
if ".tentative." in testPath:
continue
if prefixInPath(pathPrefix, testPath):
if testPath not in seenTestNames:
unseenTests.append(testPath)
if unseenTests:
warn("There are {0} WPT tests underneath your path prefix that aren't in your document and must be added:\n{1}",
len(unseenTests),
"\n".join(" " + path for path in sorted(unseenTests)))
def loadTestData(doc):
paths = {}
for line in doc.dataFile.fetch("wpt-tests.txt", str=True).split("\n")[1:]:
testType,_,testPath = line.strip().partition(" ")
paths[testPath] = testType
return paths
def xor(a, b):
return bool(a) != bool(b)
wptStyle = '''
.wpt-tests-block {
list-style: none;
border-left: .5em solid hsl(290, 70%, 60%);
background: hsl(290, 70%, 95%);
margin: 1em auto;
padding: .5em;
display: grid;
grid-template-columns: 1fr auto auto;
grid-column-gap: .5em;
}
.wpt-tests-block::before {
content: "Tests";
grid-column: 1/-1;
color: hsl(290, 70%, 30%);
text-transform: uppercase;
}
.wpt-test {
display: contents;
}
.wpt-test > a {
text-decoration: underline;
border: none;
}
'''
| 29.011628 | 131 | 0.703407 |
f112ba293ab4f0da14428f48eb931868c61a08ef | 209 | py | Python | src/pretix/plugins/ticketoutputpdf/urls.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/plugins/ticketoutputpdf/urls.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/plugins/ticketoutputpdf/urls.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/pdfoutput/editor/$', views.EditorView.as_view(),
name='editor'),
]
| 23.222222 | 111 | 0.631579 |
f2b41363868ca26aac34aab7d665d435d18297dc | 15,220 | py | Python | addons/blender_mmd_tools-main/mmd_tools/operators/model.py | V-Sekai/V-Sekai-Blender-tools | 3473ad4abb737756290a9007273519460742960d | [
"MIT"
] | 2 | 2021-12-21T16:38:58.000Z | 2022-01-08T00:56:35.000Z | addons/blender_mmd_tools-main/mmd_tools/operators/model.py | V-Sekai/V-Sekai-Blender-game-tools | 3473ad4abb737756290a9007273519460742960d | [
"MIT"
] | 1 | 2022-01-29T05:46:50.000Z | 2022-01-29T05:46:50.000Z | addons/blender_mmd_tools-main/mmd_tools/operators/model.py | V-Sekai/V-Sekai-Blender-game-tools | 3473ad4abb737756290a9007273519460742960d | [
"MIT"
] | 1 | 2021-11-07T19:41:34.000Z | 2021-11-07T19:41:34.000Z | # -*- coding: utf-8 -*-
import bpy
import mmd_tools.core.model as mmd_model
from bpy.types import Operator
from mmd_tools import register_wrap
from mmd_tools.bpyutils import SceneOp
from mmd_tools.core.bone import FnBone
@register_wrap
class MorphSliderSetup(Operator):
bl_idname = 'mmd_tools.morph_slider_setup'
bl_label = 'Morph Slider Setup'
bl_description = 'Translate MMD morphs of selected object into format usable by Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('CREATE', 'Create', 'Create placeholder object for morph sliders', 'SHAPEKEY_DATA', 0),
('BIND', 'Bind', 'Bind morph sliders', 'DRIVER', 1),
('UNBIND', 'Unbind', 'Unbind morph sliders', 'X', 2),
],
default='CREATE',
)
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
if self.type == 'BIND':
rig.morph_slider.bind()
elif self.type == 'UNBIND':
rig.morph_slider.unbind()
else:
rig.morph_slider.create()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class CleanRiggingObjects(Operator):
bl_idname = 'mmd_tools.clean_rig'
bl_label = 'Clean Rig'
bl_description = 'Delete temporary physics objects of selected object and revert physics to default MMD state'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
rig.clean()
SceneOp(context).active_object = root
return {'FINISHED'}
@register_wrap
class BuildRig(Operator):
bl_idname = 'mmd_tools.build_rig'
bl_label = 'Build Rig'
bl_description = 'Translate physics of selected object into format usable by Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
non_collision_distance_scale: bpy.props.FloatProperty(
name='Non-Collision Distance Scale',
description='The distance scale for creating extra non-collision constraints while building physics',
min=0, soft_max=10,
default=1.5,
)
def execute(self, context):
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
rig.build(self.non_collision_distance_scale)
SceneOp(context).active_object = root
return {'FINISHED'}
@register_wrap
class CleanAdditionalTransformConstraints(Operator):
bl_idname = 'mmd_tools.clean_additional_transform'
bl_label = 'Clean Additional Transform'
bl_description = 'Delete shadow bones of selected object and revert bones to default MMD state'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
rig.cleanAdditionalTransformConstraints()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class ApplyAdditionalTransformConstraints(Operator):
bl_idname = 'mmd_tools.apply_additional_transform'
bl_label = 'Apply Additional Transform'
bl_description = 'Translate appended bones of selected object for Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
rig.applyAdditionalTransformConstraints()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class SetupBoneFixedAxes(Operator):
bl_idname = 'mmd_tools.bone_fixed_axis_setup'
bl_label = 'Setup Bone Fixed Axis'
bl_description = 'Setup fixed axis of selected bones'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('DISABLE', 'Disable', 'Disable MMD fixed axis of selected bones', 0),
('LOAD', 'Load', 'Load/Enable MMD fixed axis of selected bones from their Y-axis or the only rotatable axis', 1),
('APPLY', 'Apply', 'Align bone axes to MMD fixed axis of each bone', 2),
],
default='LOAD',
)
def execute(self, context):
arm = context.active_object
if not arm or arm.type != 'ARMATURE':
self.report({'ERROR'}, 'Active object is not an armature object')
return {'CANCELLED'}
if self.type == 'APPLY':
FnBone.apply_bone_fixed_axis(arm)
FnBone.apply_additional_transformation(arm)
else:
FnBone.load_bone_fixed_axis(arm, enable=(self.type=='LOAD'))
return {'FINISHED'}
@register_wrap
class SetupBoneLocalAxes(Operator):
bl_idname = 'mmd_tools.bone_local_axes_setup'
bl_label = 'Setup Bone Local Axes'
bl_description = 'Setup local axes of each bone'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('DISABLE', 'Disable', 'Disable MMD local axes of selected bones', 0),
('LOAD', 'Load', 'Load/Enable MMD local axes of selected bones from their bone axes', 1),
('APPLY', 'Apply', 'Align bone axes to MMD local axes of each bone', 2),
],
default='LOAD',
)
def execute(self, context):
arm = context.active_object
if not arm or arm.type != 'ARMATURE':
self.report({'ERROR'}, 'Active object is not an armature object')
return {'CANCELLED'}
if self.type == 'APPLY':
FnBone.apply_bone_local_axes(arm)
FnBone.apply_additional_transformation(arm)
else:
FnBone.load_bone_local_axes(arm, enable=(self.type=='LOAD'))
return {'FINISHED'}
@register_wrap
class CreateMMDModelRoot(Operator):
bl_idname = 'mmd_tools.create_mmd_model_root_object'
bl_label = 'Create a MMD Model Root Object'
bl_description = 'Create a MMD model root object with a basic armature'
bl_options = {'REGISTER', 'UNDO'}
name_j = bpy.props.StringProperty(
name='Name',
description='The name of the MMD model',
default='New MMD Model',
)
name_e = bpy.props.StringProperty(
name='Name(Eng)',
description='The english name of the MMD model',
default='New MMD Model',
)
scale = bpy.props.FloatProperty(
name='Scale',
description='Scale',
default=0.08,
)
def execute(self, context):
rig = mmd_model.Model.create(self.name_j, self.name_e, self.scale, add_root_bone=True)
rig.initialDisplayFrames()
return {'FINISHED'}
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
@register_wrap
class ConvertToMMDModel(Operator):
bl_idname = 'mmd_tools.convert_to_mmd_model'
bl_label = 'Convert to a MMD Model'
bl_description = 'Convert active armature with its meshes to a MMD model (experimental)'
bl_options = {'REGISTER', 'UNDO'}
ambient_color_source: bpy.props.EnumProperty(
name='Ambient Color Source',
description='Select ambient color source',
items = [
('DIFFUSE', 'Diffuse', 'Diffuse color', 0),
('MIRROR', 'Mirror', 'Mirror color (if property "mirror_color" is available)', 1),
],
default='DIFFUSE',
)
edge_threshold: bpy.props.FloatProperty(
name='Edge Threshold',
description='MMD toon edge will not be enabled if freestyle line color alpha less than this value',
min=0,
max=1.001,
precision=3,
step=0.1,
default=0.1,
)
edge_alpha_min: bpy.props.FloatProperty(
name='Minimum Edge Alpha',
description='Minimum alpha of MMD toon edge color',
min=0,
max=1,
precision=3,
step=0.1,
default=0.5,
)
scale: bpy.props.FloatProperty(
name='Scale',
description='Scaling factor for converting the model',
default=0.08,
)
convert_material_nodes: bpy.props.BoolProperty(
name='Convert Material Nodes',
default=True,
)
middle_joint_bones_lock: bpy.props.BoolProperty(
name='Middle Joint Bones Lock',
description='Lock specific bones for backward compatibility.',
default=False,
)
@classmethod
def poll(cls, context):
obj = context.active_object
return obj and obj.type == 'ARMATURE' and obj.mode != 'EDIT'
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
def execute(self, context):
#TODO convert some basic MMD properties
armature = context.active_object
scale = self.scale
model_name = 'New MMD Model'
root = mmd_model.Model.findRoot(armature)
if root is None or root != armature.parent:
rig = mmd_model.Model.create(model_name, model_name, scale, armature=armature)
self.__attach_meshes_to(armature, SceneOp(context).id_objects)
self.__configure_rig(context, mmd_model.Model(armature.parent))
return {'FINISHED'}
def __attach_meshes_to(self, armature, objects):
def __is_child_of_armature(mesh):
if mesh.parent is None:
return False
return mesh.parent == armature or __is_child_of_armature(mesh.parent)
def __is_using_armature(mesh):
for m in mesh.modifiers:
if m.type =='ARMATURE' and m.object == armature:
return True
return False
def __get_root(mesh):
if mesh.parent is None:
return mesh
return __get_root(mesh.parent)
for x in objects:
if __is_using_armature(x) and not __is_child_of_armature(x):
x_root = __get_root(x)
m = x_root.matrix_world
x_root.parent_type = 'OBJECT'
x_root.parent = armature
x_root.matrix_world = m
def __configure_rig(self, context, rig):
root = rig.rootObject()
armature = rig.armature()
meshes = tuple(rig.meshes())
rig.loadMorphs()
if self.middle_joint_bones_lock:
vertex_groups = {g.name for mesh in meshes for g in mesh.vertex_groups}
for pose_bone in armature.pose.bones:
if not pose_bone.parent:
continue
if not pose_bone.bone.use_connect and pose_bone.name not in vertex_groups:
continue
pose_bone.lock_location = (True, True, True)
from mmd_tools.core.material import FnMaterial
FnMaterial.set_nodes_are_readonly(not self.convert_material_nodes)
try:
for m in {x for mesh in meshes for x in mesh.data.materials if x}:
FnMaterial.convert_to_mmd_material(m, context)
mmd_material = m.mmd_material
if self.ambient_color_source == 'MIRROR' and hasattr(m, 'mirror_color'):
mmd_material.ambient_color = m.mirror_color
else:
mmd_material.ambient_color = [0.5*c for c in mmd_material.diffuse_color]
if hasattr(m, 'line_color'): # freestyle line color
line_color = list(m.line_color)
mmd_material.enabled_toon_edge = line_color[3] >= self.edge_threshold
mmd_material.edge_color = line_color[:3] + [max(line_color[3], self.edge_alpha_min)]
finally:
FnMaterial.set_nodes_are_readonly(False)
from mmd_tools.operators.display_item import DisplayItemQuickSetup
DisplayItemQuickSetup.load_bone_groups(root.mmd_root, armature)
rig.initialDisplayFrames(reset=False) # ensure default frames
DisplayItemQuickSetup.load_facial_items(root.mmd_root)
root.mmd_root.active_display_item_frame = 0
@register_wrap
class ResetObjectVisibility(bpy.types.Operator):
bl_idname = 'mmd_tools.reset_object_visibility'
bl_label = 'Reset Object Visivility'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context: bpy.types.Context):
active_object: bpy.types.Object = context.active_object
return mmd_model.Model.findRoot(active_object) is not None
def execute(self, context: bpy.types.Context):
active_object: bpy.types.Object = context.active_object
mmd_root_object = mmd_model.Model.findRoot(active_object)
mmd_root = mmd_root_object.mmd_root
mmd_root_object.hide = False
rigid_group_object = mmd_model.FnModel.find_rigid_group(mmd_root_object)
if rigid_group_object:
rigid_group_object.hide = True
joint_group_object = mmd_model.FnModel.find_joint_group(mmd_root_object)
if joint_group_object:
joint_group_object.hide = True
temporary_group_object = mmd_model.FnModel.find_temporary_group(mmd_root_object)
if temporary_group_object:
temporary_group_object.hide = True
mmd_root.show_meshes = True
mmd_root.show_armature = True
mmd_root.show_temporary_objects = False
mmd_root.show_rigid_bodies = False
mmd_root.show_names_of_rigid_bodies = False
mmd_root.show_joints = False
mmd_root.show_names_of_joints = False
return {'FINISHED'}
@register_wrap
class AssembleAll(Operator):
bl_idname = 'mmd_tools.assemble_all'
bl_label = 'Assemble All'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
active_object = context.active_object
root_object = mmd_model.Model.findRoot(active_object)
rig = mmd_model.Model(root_object)
rig.applyAdditionalTransformConstraints()
rig.build(1.5)
rig.morph_slider.bind()
bpy.ops.mmd_tools.sdef_bind({'selected_objects': [active_object]})
root_object.mmd_root.use_property_driver = True
SceneOp(context).active_object = active_object
return {'FINISHED'}
@register_wrap
class DisassembleAll(Operator):
bl_idname = 'mmd_tools.disassemble_all'
bl_label = 'Disassemble All'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
active_object = context.active_object
root_object = mmd_model.Model.findRoot(active_object)
rig = mmd_model.Model(root_object)
root_object.mmd_root.use_property_driver = False
bpy.ops.mmd_tools.sdef_unbind({'selected_objects': [active_object]})
rig.morph_slider.unbind()
rig.clean()
rig.cleanAdditionalTransformConstraints()
SceneOp(context).active_object = active_object
return {'FINISHED'}
| 36.238095 | 125 | 0.645269 |
30cee60190195e34730a2579c0147bba5e01dc37 | 4,253 | py | Python | _posts/2020-12-18-目标检测专题/code/chapter02_image_classification_introduction/2.2_introduction_of_image_classification/classical_cnn_models/AlexNet/AlexNet.py | CarmanZheng/CarmanZheng.github.io | bb969ad037f3db4dae7b0d5edf99b365f9d589cb | [
"MIT"
] | null | null | null | _posts/2020-12-18-目标检测专题/code/chapter02_image_classification_introduction/2.2_introduction_of_image_classification/classical_cnn_models/AlexNet/AlexNet.py | CarmanZheng/CarmanZheng.github.io | bb969ad037f3db4dae7b0d5edf99b365f9d589cb | [
"MIT"
] | null | null | null | _posts/2020-12-18-目标检测专题/code/chapter02_image_classification_introduction/2.2_introduction_of_image_classification/classical_cnn_models/AlexNet/AlexNet.py | CarmanZheng/CarmanZheng.github.io | bb969ad037f3db4dae7b0d5edf99b365f9d589cb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from tqdm import tqdm
import torch.nn.functional as F
# Device configuration
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
print("You are using:", device)
# Hyper parameters
NUM_CLASSES = 10
num_epochs = 30
batch_size = 500
learning_rate = 0.001
DATA_PATH = '../../../../../dataset/'
# transform = transforms.Compose([transforms.Resize((70, 70)),
# transforms.RandomCrop((64, 64)),
# transforms.ToTensor()])
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# 训练集
train_dataset = torchvision.datasets.CIFAR10(root=DATA_PATH,
train=True,
transform=transform,
download=True)
# 测试集
test_dataset = torchvision.datasets.CIFAR10(root=DATA_PATH,
train=False,
transform=transform)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class AlexNet(nn.Module):
def __init__(self, num_classes=NUM_CLASSES):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 2 * 2, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 2 * 2)
x = self.classifier(x)
return x
model = AlexNet().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
# Test the model
# eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
| 33.488189 | 90 | 0.555373 |
85e894bfb08b202d8fd4f47b132592e6005348e4 | 500 | py | Python | molecule/default/tests/test_default.py | bossjones/boss-ansible-role-update-hosts | e00ba0a9b910ebbd529513139f1d09f58d4fa8b5 | [
"Apache-2.0"
] | null | null | null | molecule/default/tests/test_default.py | bossjones/boss-ansible-role-update-hosts | e00ba0a9b910ebbd529513139f1d09f58d4fa8b5 | [
"Apache-2.0"
] | 4 | 2021-04-07T23:17:55.000Z | 2022-02-07T22:26:17.000Z | molecule/default/tests/test_default.py | bossjones/boss-ansible-role-update-hosts | e00ba0a9b910ebbd529513139f1d09f58d4fa8b5 | [
"Apache-2.0"
] | null | null | null | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts.molecule')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
assert f.contains('192.168.55.6 update-hosts-xenial-1 update-hosts-xenial-1')
assert f.contains('192.168.55.7 update-hosts-xenial-2 update-hosts-xenial-2')
| 27.777778 | 81 | 0.73 |
d9e3ece63c2a87bf465d139175635bfaf6a046ae | 76,804 | py | Python | mongomock/collection.py | aie0/mongomock | f0114d954831df81665819779e0b666c3eca3d94 | [
"BSD-3-Clause"
] | null | null | null | mongomock/collection.py | aie0/mongomock | f0114d954831df81665819779e0b666c3eca3d94 | [
"BSD-3-Clause"
] | null | null | null | mongomock/collection.py | aie0/mongomock | f0114d954831df81665819779e0b666c3eca3d94 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division
import collections
from collections import OrderedDict
import copy
from datetime import datetime
import functools
import itertools
import json
import math
import threading
import time
import warnings
try:
from bson import json_util, SON, BSON
except ImportError:
json_utils = SON = BSON = None
try:
import execjs
except ImportError:
execjs = None
try:
from pymongo import ReturnDocument
except ImportError:
class ReturnDocument(object):
BEFORE = False
AFTER = True
from sentinels import NOTHING
from six import iteritems
from six import iterkeys
from six import itervalues
from six import MAXSIZE
from six import string_types
from six import text_type
from mongomock.command_cursor import CommandCursor
from mongomock import DuplicateKeyError, BulkWriteError
from mongomock.filtering import filter_applies
from mongomock.filtering import iter_key_candidates
from mongomock import helpers
from mongomock import InvalidOperation
from mongomock import ObjectId
from mongomock import OperationFailure
from mongomock.results import BulkWriteResult
from mongomock.results import DeleteResult
from mongomock.results import InsertManyResult
from mongomock.results import InsertOneResult
from mongomock.results import UpdateResult
from mongomock.write_concern import WriteConcern
from mongomock import WriteError
lock = threading.RLock()
def validate_is_mapping(option, value):
if not isinstance(value, collections.Mapping):
raise TypeError('%s must be an instance of dict, bson.son.SON, or '
'other type that inherits from '
'collections.Mapping' % (option,))
def validate_is_mutable_mapping(option, value):
if not isinstance(value, collections.MutableMapping):
raise TypeError('%s must be an instance of dict, bson.son.SON, or '
'other type that inherits from '
'collections.MutableMapping' % (option,))
def validate_ok_for_replace(replacement):
validate_is_mapping('replacement', replacement)
if replacement:
first = next(iter(replacement))
if first.startswith('$'):
raise ValueError('replacement can not include $ operators')
def validate_ok_for_update(update):
validate_is_mapping('update', update)
if not update:
raise ValueError('update only works with $ operators')
first = next(iter(update))
if not first.startswith('$'):
raise ValueError('update only works with $ operators')
def validate_write_concern_params(**params):
if params:
WriteConcern(**params)
def get_value_by_dot(doc, key):
"""Get dictionary value using dotted key"""
result = doc
for i in key.split('.'):
result = result[i]
return result
def set_value_by_dot(doc, key, value):
"""Set dictionary value using dotted key"""
result = doc
keys = key.split('.')
for i in keys[:-1]:
if i not in result:
result[i] = {}
result = result[i]
result[keys[-1]] = value
return doc
class BulkWriteOperation(object):
def __init__(self, builder, selector, is_upsert=False):
self.builder = builder
self.selector = selector
self.is_upsert = is_upsert
def upsert(self):
assert not self.is_upsert
return BulkWriteOperation(self.builder, self.selector, is_upsert=True)
def register_remove_op(self, multi):
collection = self.builder.collection
selector = self.selector
def exec_remove():
op_result = collection.remove(selector, multi=multi)
if op_result.get("ok"):
return {'nRemoved': op_result.get('n')}
err = op_result.get("err")
if err:
return {"writeErrors": [err]}
return {}
self.builder.executors.append(exec_remove)
def remove(self):
assert not self.is_upsert
self.register_remove_op(multi=True)
def remove_one(self,):
assert not self.is_upsert
self.register_remove_op(multi=False)
def register_update_op(self, document, multi, **extra_args):
if not extra_args.get("remove"):
validate_ok_for_update(document)
collection = self.builder.collection
selector = self.selector
def exec_update():
result = collection._update(spec=selector, document=document,
multi=multi, upsert=self.is_upsert,
**extra_args)
ret_val = {}
if result.get('upserted'):
ret_val["upserted"] = result.get('upserted')
ret_val["nUpserted"] = result.get('n')
modified = result.get('nModified')
if modified is not None:
ret_val['nModified'] = modified
ret_val['nMatched'] = modified
if result.get('err'):
ret_val['err'] = result.get('err')
return ret_val
self.builder.executors.append(exec_update)
def update(self, document):
self.register_update_op(document, multi=True)
def update_one(self, document):
self.register_update_op(document, multi=False)
def replace_one(self, document):
self.register_update_op(document, multi=False, remove=True)
class BulkOperationBuilder(object):
def __init__(self, collection, ordered=False):
self.collection = collection
self.ordered = ordered
self.results = {}
self.executors = []
self.done = False
self._insert_returns_nModified = True
self._update_returns_nModified = True
def find(self, selector):
return BulkWriteOperation(self, selector)
def insert(self, doc):
def exec_insert():
self.collection.insert(doc)
return {'nInserted': 1}
self.executors.append(exec_insert)
def __aggregate_operation_result(self, total_result, key, value):
agg_val = total_result.get(key)
assert agg_val is not None, "Unknow operation result %s=%s" \
" (unrecognized key)" % (key, value)
if isinstance(agg_val, int):
total_result[key] += value
elif isinstance(agg_val, list):
if key == "upserted":
new_element = {"index": len(agg_val), "_id": value}
agg_val.append(new_element)
else:
agg_val.append(value)
else:
assert False, "Fixme: missed aggreation rule for type: %s for" \
" key {%s=%s}" % (type(agg_val), key, agg_val)
def _set_nModified_policy(self, insert, update):
self._insert_returns_nModified = insert
self._update_returns_nModified = update
def execute(self, write_concern=None):
if not self.executors:
raise InvalidOperation("Bulk operation empty!")
if self.done:
raise InvalidOperation("Bulk operation already executed!")
self.done = True
result = {'nModified': 0, 'nUpserted': 0, 'nMatched': 0,
'writeErrors': [], 'upserted': [], 'writeConcernErrors': [],
'nRemoved': 0, 'nInserted': 0}
has_update = False
has_insert = False
broken_nModified_info = False
for execute_func in self.executors:
exec_name = execute_func.__name__
op_result = execute_func()
for (key, value) in op_result.items():
self.__aggregate_operation_result(result, key, value)
if exec_name == "exec_update":
has_update = True
if "nModified" not in op_result:
broken_nModified_info = True
has_insert |= exec_name == "exec_insert"
if broken_nModified_info:
result.pop('nModified')
elif has_insert and self._insert_returns_nModified:
pass
elif has_update and self._update_returns_nModified:
pass
elif self._update_returns_nModified and self._insert_returns_nModified:
pass
else:
result.pop('nModified')
return result
def add_insert(self, doc):
self.insert(doc)
def add_update(self, selector, doc, multi, upsert, collation=None):
write_operation = BulkWriteOperation(self, selector, is_upsert=upsert)
write_operation.register_update_op(doc, multi)
def add_replace(self, selector, doc, upsert, collation=None):
write_operation = BulkWriteOperation(self, selector, is_upsert=upsert)
write_operation.replace_one(doc)
def add_delete(self, selector, just_one, collation=None):
write_operation = BulkWriteOperation(self, selector, is_upsert=False)
write_operation.register_remove_op(not just_one)
class Collection(object):
def __init__(self, db, name):
self.name = name
self.full_name = "{0}.{1}".format(db.name, name)
self.database = db
self._documents = OrderedDict()
self._uniques = []
def __repr__(self):
return "Collection({0}, '{1}')".format(self.database, self.name)
def __getitem__(self, name):
return self.database[self.name + '.' + name]
def __getattr__(self, name):
return self.__getitem__(name)
def initialize_unordered_bulk_op(self):
return BulkOperationBuilder(self, ordered=False)
def initialize_ordered_bulk_op(self):
return BulkOperationBuilder(self, ordered=True)
def insert(self, data, manipulate=True, check_keys=True,
continue_on_error=False, **kwargs):
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
validate_write_concern_params(**kwargs)
return self._insert(data)
def insert_one(self, document):
validate_is_mutable_mapping('document', document)
return InsertOneResult(self._insert(document), acknowledged=True)
def insert_many(self, documents, ordered=True):
if not isinstance(documents, collections.Iterable) or not documents:
raise TypeError('documents must be a non-empty list')
for document in documents:
validate_is_mutable_mapping('document', document)
try:
return InsertManyResult(self._insert(documents), acknowledged=True)
except DuplicateKeyError:
raise BulkWriteError('batch op errors occurred')
def _insert(self, data):
if isinstance(data, list):
return [self._insert(item) for item in data]
if not all(isinstance(k, string_types) for k in data):
raise ValueError("Document keys must be strings")
if BSON:
# bson validation
BSON.encode(data, check_keys=True)
if '_id' not in data:
data['_id'] = ObjectId()
object_id = data['_id']
if isinstance(object_id, dict):
object_id = helpers.hashdict(object_id)
if object_id in self._documents:
raise DuplicateKeyError("Duplicate Key Error", 11000)
for unique, is_sparse in self._uniques:
find_kwargs = {}
for key, direction in unique:
find_kwargs[key] = data.get(key, None)
answer = self.find(find_kwargs)
if answer.count() > 0 and not (is_sparse and find_kwargs[key] is None):
raise DuplicateKeyError("Duplicate Key Error", 11000)
with lock:
self._documents[object_id] = self._internalize_dict(data)
return data['_id']
def _internalize_dict(self, d):
return {k: copy.deepcopy(v) for k, v in iteritems(d)}
def _has_key(self, doc, key):
key_parts = key.split('.')
sub_doc = doc
for part in key_parts:
if part not in sub_doc:
return False
sub_doc = sub_doc[part]
return True
def _remove_key(self, doc, key):
key_parts = key.split('.')
sub_doc = doc
for part in key_parts[:-1]:
sub_doc = sub_doc[part]
del sub_doc[key_parts[-1]]
def update_one(self, filter, update, upsert=False):
validate_ok_for_update(update)
return UpdateResult(self._update(filter, update, upsert=upsert),
acknowledged=True)
def update_many(self, filter, update, upsert=False):
validate_ok_for_update(update)
return UpdateResult(self._update(filter, update, upsert=upsert,
multi=True),
acknowledged=True)
def replace_one(self, filter, replacement, upsert=False):
validate_ok_for_replace(replacement)
return UpdateResult(self._update(filter, replacement, upsert=upsert),
acknowledged=True)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=False, **kwargs):
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
return self._update(spec, document, upsert, manipulate, multi,
check_keys, **kwargs)
def _update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=False, **kwargs):
validate_is_mapping('spec', spec)
validate_is_mapping('document', document)
updated_existing = False
upserted_id = None
num_updated = 0
for existing_document in itertools.chain(self._iter_documents(spec), [None]):
# we need was_insert for the setOnInsert update operation
was_insert = False
# the sentinel document means we should do an upsert
if existing_document is None:
if not upsert or num_updated:
continue
_id = document.get('_id')
to_insert = dict(spec, _id=_id) if _id else spec
to_insert = self._expand_dots(to_insert)
upserted_id = self._insert(self._discard_operators(to_insert))
existing_document = self._documents[upserted_id]
was_insert = True
else:
updated_existing = True
num_updated += 1
first = True
subdocument = None
for k, v in iteritems(document):
if k in _updaters.keys():
updater = _updaters[k]
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, updater, subdocument)
elif k == '$setOnInsert':
if not was_insert:
continue
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, _set_updater, subdocument)
elif k == '$currentDate':
for value in itervalues(v):
if value == {'$type': 'timestamp'}:
raise NotImplementedError('timestamp is not supported so far')
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, _current_date_updater, subdocument)
elif k == '$addToSet':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document[field] += [
obj for obj in list(value['$each'])
if obj not in existing_document[field]]
continue
if value not in existing_document[field]:
existing_document[field].append(value)
continue
# push to array in a nested attribute
else:
# create nested attributes if they do not exist
subdocument = existing_document
for field in nested_field_list[:-1]:
if field not in subdocument:
subdocument[field] = {}
subdocument = subdocument[field]
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[
nested_field_list[-1]]
if isinstance(value, dict) and '$each' in value:
push_results += [
obj for obj in list(value['$each'])
if obj not in push_results]
elif value not in push_results:
push_results.append(value)
subdocument[nested_field_list[-1]] = push_results
elif k == '$pull':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
# nested fields includes a positional element
# need to find that element
if '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(
existing_document, spec, nested_field_list)
# value should be a dictionary since we're pulling
pull_results = []
# and the last subdoc should be an array
for obj in subdocument[nested_field_list[-1]]:
if isinstance(obj, dict):
for pull_key, pull_value in iteritems(value):
if obj[pull_key] != pull_value:
pull_results.append(obj)
continue
if obj != value:
pull_results.append(obj)
# cannot write to doc directly as it doesn't save to
# existing_document
subdocument[nested_field_list[-1]] = pull_results
else:
arr = existing_document
for field in nested_field_list:
if field not in arr:
break
arr = arr[field]
if not isinstance(arr, list):
continue
if isinstance(value, dict):
for idx, obj in enumerate(arr):
if filter_applies(value, obj):
del arr[idx]
else:
for idx, obj in enumerate(arr):
if value == obj:
del arr[idx]
elif k == '$pullAll':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field in existing_document:
arr = existing_document[field]
existing_document[field] = [
obj for obj in arr if obj not in value]
continue
else:
subdocument = existing_document
for nested_field in nested_field_list[:-1]:
if nested_field not in subdocument:
break
subdocument = subdocument[nested_field]
if nested_field_list[-1] in subdocument:
arr = subdocument[nested_field_list[-1]]
subdocument[nested_field_list[-1]] = [
obj for obj in arr if obj not in value]
elif k == '$push':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list
# append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document[field] += list(value['$each'])
continue
existing_document[field].append(value)
continue
# nested fields includes a positional element
# need to find that element
elif '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(
existing_document, spec, nested_field_list)
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
if isinstance(value, dict):
# check to see if we have the format
# { '$each': [] }
if '$each' in value:
push_results += list(value['$each'])
else:
push_results.append(value)
else:
push_results.append(value)
# cannot write to doc directly as it doesn't save to
# existing_document
subdocument[nested_field_list[-1]] = push_results
# push to array in a nested attribute
else:
# create nested attributes if they do not exist
subdocument = existing_document
for field in nested_field_list[:-1]:
if field not in subdocument:
subdocument[field] = {}
subdocument = subdocument[field]
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
if isinstance(value, dict) and '$each' in value:
push_results += list(value['$each'])
else:
push_results.append(value)
subdocument[nested_field_list[-1]] = push_results
else:
if first:
# replace entire document
for key in document.keys():
if key.startswith('$'):
# can't mix modifiers with non-modifiers in
# update
raise ValueError('field names cannot start with $ [{}]'.format(k))
_id = spec.get('_id', existing_document.get('_id'))
existing_document.clear()
if _id:
existing_document['_id'] = _id
existing_document.update(self._internalize_dict(document))
if existing_document['_id'] != _id:
raise OperationFailure(
"The _id field cannot be changed from {0} to {1}"
.format(existing_document['_id'], _id))
break
else:
# can't mix modifiers with non-modifiers in update
raise ValueError(
'Invalid modifier specified: {}'.format(k))
first = False
# if empty document comes
if len(document) == 0:
_id = spec.get('_id', existing_document.get('_id'))
existing_document.clear()
if _id:
existing_document['_id'] = _id
if not multi:
break
return {
text_type("connectionId"): self.database.client._id,
text_type("err"): None,
text_type("n"): num_updated,
text_type("nModified"): num_updated if updated_existing else 0,
text_type("ok"): 1,
text_type("upserted"): upserted_id,
text_type("updatedExisting"): updated_existing,
}
def _get_subdocument(self, existing_document, spec, nested_field_list):
"""This method retrieves the subdocument of the existing_document.nested_field_list.
It uses the spec to filter through the items. It will continue to grab nested documents
until it can go no further. It will then return the subdocument that was last saved.
'$' is the positional operator, so we use the $elemMatch in the spec to find the right
subdocument in the array.
"""
# current document in view
doc = existing_document
# previous document in view
subdocument = existing_document
# current spec in view
subspec = spec
# walk down the dictionary
for subfield in nested_field_list:
if subfield == '$':
# positional element should have the equivalent elemMatch in the
# query
subspec = subspec['$elemMatch']
for item in doc:
# iterate through
if filter_applies(subspec, item):
# found the matching item save the parent
subdocument = doc
# save the item
doc = item
break
continue
subdocument = doc
doc = doc[subfield]
if subfield not in subspec:
break
subspec = subspec[subfield]
return subdocument
def _expand_dots(self, doc):
expanded = {}
paths = {}
for k, v in iteritems(doc):
key_parts = k.split('.')
sub_doc = v
for i in reversed(range(1, len(key_parts))):
key = key_parts[i]
sub_doc = {key: sub_doc}
key = key_parts[0]
if key in expanded:
raise WriteError("cannot infer query fields to set, "
"both paths '%s' and '%s' are matched"
% (k, paths[key]))
paths[key] = k
expanded[key] = sub_doc
return expanded
def _discard_operators(self, doc):
# TODO(this looks a little too naive...)
return {k: v for k, v in iteritems(doc) if not k.startswith("$")}
def find(self, filter=None, projection=None, skip=0, limit=0,
no_cursor_timeout=False, cursor_type=None, sort=None,
allow_partial_results=False, oplog_replay=False, modifiers=None,
batch_size=0, manipulate=True):
spec = filter
if spec is None:
spec = {}
validate_is_mapping('filter', spec)
return Cursor(self, spec, sort, projection, skip, limit)
def _get_dataset(self, spec, sort, fields, as_class):
dataset = (self._copy_only_fields(document, fields, as_class)
for document in self._iter_documents(spec))
if sort:
for sortKey, sortDirection in reversed(sort):
dataset = iter(sorted(
dataset, key=lambda x: _resolve_sort_key(sortKey, x),
reverse=sortDirection < 0))
return dataset
def _copy_field(self, obj, container):
if isinstance(obj, list):
new = []
for item in obj:
new.append(self._copy_field(item, container))
return new
if isinstance(obj, dict):
new = container()
for key, value in obj.items():
new[key] = self._copy_field(value, container)
return new
else:
return copy.copy(obj)
def _extract_projection_operators(self, fields):
"""Removes and returns fields with projection operators."""
result = {}
allowed_projection_operators = {'$elemMatch'}
for key, value in iteritems(fields):
if isinstance(value, dict):
for op in value:
if op not in allowed_projection_operators:
raise ValueError('Unsupported projection option: {}'.format(op))
result[key] = value
for key in result:
del fields[key]
return result
def _apply_projection_operators(self, ops, doc, doc_copy):
"""Applies projection operators to copied document."""
for field, op in iteritems(ops):
if field not in doc_copy:
if field in doc:
# field was not copied yet (since we are in include mode)
doc_copy[field] = doc[field]
else:
# field doesn't exist in original document, no work to do
continue
if '$elemMatch' in op:
if isinstance(doc_copy[field], list):
# find the first item that matches
matched = False
for item in doc_copy[field]:
if filter_applies(op['$elemMatch'], item):
matched = True
doc_copy[field] = [item]
break
# nothing have matched
if not matched:
del doc_copy[field]
else:
# remove the field since there is nothing to iterate
del doc_copy[field]
def _copy_only_fields(self, doc, fields, container):
"""Copy only the specified fields."""
if fields is None:
return self._copy_field(doc, container)
else:
if not fields:
fields = {"_id": 1}
if not isinstance(fields, dict):
fields = helpers._fields_list_to_dict(fields)
# we can pass in something like {"_id":0, "field":1}, so pull the id
# value out and hang on to it until later
id_value = fields.pop('_id', 1)
# filter out fields with projection operators, we will take care of them later
projection_operators = self._extract_projection_operators(fields)
# other than the _id field, all fields must be either includes or
# excludes, this can evaluate to 0
if len(set(list(fields.values()))) > 1:
raise ValueError(
'You cannot currently mix including and excluding fields.')
# if we have novalues passed in, make a doc_copy based on the
# id_value
if len(list(fields.values())) == 0:
if id_value == 1:
doc_copy = container()
else:
doc_copy = self._copy_field(doc, container)
# if 1 was passed in as the field values, include those fields
elif list(fields.values())[0] == 1:
doc_copy = container()
for key in fields:
key_parts = key.split('.')
subdocument = doc
subdocument_copy = doc_copy
last_copy = subdocument_copy
full_key_path_found = True
for key_part in key_parts[:-1]:
if key_part not in subdocument:
full_key_path_found = False
break
subdocument = subdocument[key_part]
last_copy = subdocument_copy
subdocument_copy = subdocument_copy.setdefault(key_part, {})
if full_key_path_found:
last_key = key_parts[-1]
if isinstance(subdocument, dict) and last_key in subdocument:
subdocument_copy[last_key] = subdocument[last_key]
elif isinstance(subdocument, (list, tuple)):
subdocument = [{last_key: x[last_key]}
for x in subdocument if last_key in x]
if subdocument:
last_copy[key_parts[-2]] = subdocument
# otherwise, exclude the fields passed in
else:
doc_copy = self._copy_field(doc, container)
for key in fields:
key_parts = key.split('.')
subdocument_copy = doc_copy
full_key_path_found = True
for key_part in key_parts[:-1]:
if key_part not in subdocument_copy:
full_key_path_found = False
break
subdocument_copy = subdocument_copy[key_part]
if not full_key_path_found or key_parts[-1] not in subdocument_copy:
continue
del subdocument_copy[key_parts[-1]]
# set the _id value if we requested it, otherwise remove it
if id_value == 0:
doc_copy.pop('_id', None)
else:
if '_id' in doc:
doc_copy['_id'] = doc['_id']
fields['_id'] = id_value # put _id back in fields
# time to apply the projection operators and put back their fields
self._apply_projection_operators(projection_operators, doc, doc_copy)
for field, op in iteritems(projection_operators):
fields[field] = op
return doc_copy
def _update_document_fields(self, doc, fields, updater):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
self._update_document_single_field(doc, k, v, updater)
def _update_document_fields_positional(self, doc, fields, spec, updater,
subdocument=None):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
if '$' in k:
field_name_parts = k.split('.')
if not subdocument:
current_doc = doc
subspec = spec
for part in field_name_parts[:-1]:
if part == '$':
subspec = subspec.get('$elemMatch', subspec)
for item in current_doc:
if filter_applies(subspec, item):
current_doc = item
break
continue
new_spec = {}
for el in subspec:
if el.startswith(part):
if len(el.split(".")) > 1:
new_spec[".".join(
el.split(".")[1:])] = subspec[el]
else:
new_spec = subspec[el]
subspec = new_spec
current_doc = current_doc[part]
subdocument = current_doc
if (field_name_parts[-1] == '$' and
isinstance(subdocument, list)):
for i, doc in enumerate(subdocument):
if filter_applies(subspec, doc):
subdocument[i] = v
break
continue
updater(subdocument, field_name_parts[-1], v)
continue
# otherwise, we handle it the standard way
self._update_document_single_field(doc, k, v, updater)
return subdocument
def _update_document_fields_with_positional_awareness(self, existing_document, v, spec,
updater, subdocument):
positional = any('$' in key for key in iterkeys(v))
if positional:
return self._update_document_fields_positional(
existing_document, v, spec, updater, subdocument)
self._update_document_fields(existing_document, v, updater)
return subdocument
def _update_document_single_field(self, doc, field_name, field_value, updater):
field_name_parts = field_name.split(".")
for part in field_name_parts[:-1]:
if isinstance(doc, list):
try:
if part == '$':
doc = doc[0]
else:
doc = doc[int(part)]
continue
except ValueError:
pass
elif isinstance(doc, dict):
doc = doc.setdefault(part, {})
else:
return
field_name = field_name_parts[-1]
if isinstance(doc, list):
try:
doc[int(field_name)] = field_value
except IndexError:
pass
else:
updater(doc, field_name, field_value)
def _iter_documents(self, filter=None):
return (document for document in list(itervalues(self._documents))
if filter_applies(filter, document))
def find_one(self, filter=None, *args, **kwargs):
# Allow calling find_one with a non-dict argument that gets used as
# the id for the query.
if filter is None:
filter = {}
if not isinstance(filter, collections.Mapping):
filter = {'_id': filter}
try:
return next(self.find(filter, *args, **kwargs))
except StopIteration:
return None
def find_one_and_delete(self, filter, projection=None, sort=None, **kwargs):
kwargs['remove'] = True
validate_is_mapping('filter', filter)
return self._find_and_modify(filter, projection, sort=sort, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
validate_is_mapping('filter', filter)
validate_ok_for_replace(replacement)
return self._find_and_modify(filter, projection, replacement, upsert,
sort, return_document, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
validate_is_mapping('filter', filter)
validate_ok_for_update(update)
return self._find_and_modify(filter, projection, update, upsert,
sort, return_document, **kwargs)
def find_and_modify(self, query={}, update=None, upsert=False, sort=None,
full_response=False, manipulate=False, **kwargs):
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
return self._find_and_modify(query, update=update, upsert=upsert,
sort=sort, **kwargs)
def _find_and_modify(self, query, projection=None, update=None,
upsert=False, sort=None,
return_document=ReturnDocument.BEFORE, **kwargs):
remove = kwargs.get("remove", False)
if kwargs.get("new", False) and remove:
# message from mongodb
raise OperationFailure("remove and returnNew can't co-exist")
if not (remove or update):
raise ValueError("Must either update or remove")
if remove and update:
raise ValueError("Can't do both update and remove")
old = self.find_one(query, projection=projection, sort=sort)
if not old and not upsert:
return
if old and '_id' in old:
query = {'_id': old['_id']}
if remove:
self.delete_one(query)
else:
self._update(query, update, upsert)
if return_document is ReturnDocument.AFTER or kwargs.get('new'):
return self.find_one(query, projection)
return old
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
validate_is_mutable_mapping("to_save", to_save)
validate_write_concern_params(**kwargs)
if "_id" not in to_save:
return self.insert(to_save)
else:
self._update({"_id": to_save["_id"]}, to_save, True,
manipulate, check_keys=True, **kwargs)
return to_save.get("_id", None)
def delete_one(self, filter):
validate_is_mapping('filter', filter)
return DeleteResult(self._delete(filter), True)
def delete_many(self, filter):
validate_is_mapping('filter', filter)
return DeleteResult(self._delete(filter, multi=True), True)
def _delete(self, filter, multi=False):
if filter is None:
filter = {}
if not isinstance(filter, collections.Mapping):
filter = {'_id': filter}
to_delete = list(self.find(filter))
deleted_count = 0
for doc in to_delete:
doc_id = doc['_id']
if isinstance(doc_id, dict):
doc_id = helpers.hashdict(doc_id)
del self._documents[doc_id]
deleted_count += 1
if not multi:
break
return {
"connectionId": self.database.client._id,
"n": deleted_count,
"ok": 1.0,
"err": None,
}
def remove(self, spec_or_id=None, multi=True, **kwargs):
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
validate_write_concern_params(**kwargs)
return self._delete(spec_or_id, multi=multi)
def count(self, filter=None, **kwargs):
if filter is None:
return len(self._documents)
else:
return self.find(filter).count()
def drop(self):
self.database.drop_collection(self.name)
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
self.create_index(key_or_list, cache_for, **kwargs)
def create_index(self, key_or_list, cache_for=300, **kwargs):
if kwargs.pop('unique', False):
self._uniques.append((helpers.index_list(key_or_list), kwargs.pop('sparse', False)))
def drop_index(self, index_or_name):
pass
def index_information(self):
return {}
def map_reduce(self, map_func, reduce_func, out, full_response=False,
query=None, limit=0):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to run Map-Reduce. "
"Use 'pip install pyexecjs pymongo' to support Map-Reduce mock."
)
if limit == 0:
limit = None
start_time = time.clock()
out_collection = None
reduced_rows = None
full_dict = {
'counts': {
'input': 0,
'reduce': 0,
'emit': 0,
'output': 0},
'timeMillis': 0,
'ok': 1.0,
'result': None}
map_ctx = execjs.compile("""
function doMap(fnc, docList) {
var mappedDict = {};
function emit(key, val) {
if (key['$oid']) {
mapped_key = '$oid' + key['$oid'];
}
else {
mapped_key = key;
}
if(!mappedDict[mapped_key]) {
mappedDict[mapped_key] = [];
}
mappedDict[mapped_key].push(val);
}
mapper = eval('('+fnc+')');
var mappedList = new Array();
for(var i=0; i<docList.length; i++) {
var thisDoc = eval('('+docList[i]+')');
var mappedVal = (mapper).call(thisDoc);
}
return mappedDict;
}
""")
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
var reducedList = new Array();
reducer = eval('('+fnc+')');
for(var key in docList) {
var reducedVal = {'_id': key,
'value': reducer(key, docList[key])};
reducedList.push(reducedVal);
}
return reducedList;
}
""")
doc_list = [json.dumps(doc, default=json_util.default)
for doc in self.find(query)]
mapped_rows = map_ctx.call('doMap', map_func, doc_list)
reduced_rows = reduce_ctx.call('doReduce', reduce_func, mapped_rows)[:limit]
for reduced_row in reduced_rows:
if reduced_row['_id'].startswith('$oid'):
reduced_row['_id'] = ObjectId(reduced_row['_id'][4:])
reduced_rows = sorted(reduced_rows, key=lambda x: x['_id'])
if full_response:
full_dict['counts']['input'] = len(doc_list)
for key in mapped_rows.keys():
emit_count = len(mapped_rows[key])
full_dict['counts']['emit'] += emit_count
if emit_count > 1:
full_dict['counts']['reduce'] += 1
full_dict['counts']['output'] = len(reduced_rows)
if isinstance(out, (str, bytes)):
out_collection = getattr(self.database, out)
out_collection.drop()
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = out
elif isinstance(out, SON) and out.get('replace') and out.get('db'):
# Must be of the format SON([('replace','results'),('db','outdb')])
out_db = getattr(self.database._client, out['db'])
out_collection = getattr(out_db, out['replace'])
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = {'db': out['db'], 'collection': out['replace']}
elif isinstance(out, dict) and out.get('inline'):
ret_val = reduced_rows
full_dict['result'] = reduced_rows
else:
raise TypeError("'out' must be an instance of string, dict or bson.SON")
full_dict['timeMillis'] = int(round((time.clock() - start_time) * 1000))
if full_response:
ret_val = full_dict
return ret_val
def inline_map_reduce(self, map_func, reduce_func, full_response=False,
query=None, limit=0):
return self.map_reduce(
map_func, reduce_func, {'inline': 1}, full_response, query, limit)
def distinct(self, key, filter=None):
return self.find(filter).distinct(key)
def group(self, key, condition, initial, reduce, finalize=None):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to use group. "
"Use 'pip install pyexecjs pymongo' to support group mock."
)
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
reducer = eval('('+fnc+')');
for(var i=0, l=docList.length; i<l; i++) {
try {
reducedVal = reducer(docList[i-1], docList[i]);
}
catch (err) {
continue;
}
}
return docList[docList.length - 1];
}
""")
ret_array = []
doc_list_copy = []
ret_array_copy = []
reduced_val = {}
doc_list = [doc for doc in self.find(condition)]
for doc in doc_list:
doc_copy = copy.deepcopy(doc)
for k in doc:
if isinstance(doc[k], ObjectId):
doc_copy[k] = str(doc[k])
if k not in key and k not in reduce:
del doc_copy[k]
for initial_key in initial:
if initial_key in doc.keys():
pass
else:
doc_copy[initial_key] = initial[initial_key]
doc_list_copy.append(doc_copy)
doc_list = doc_list_copy
for k in key:
doc_list = sorted(doc_list, key=lambda x: _resolve_key(k, x))
for k in key:
if not isinstance(k, helpers.basestring):
raise TypeError(
"Keys must be a list of key names, "
"each an instance of %s" % helpers.basestring.__name__)
for k2, group in itertools.groupby(doc_list, lambda item: item[k]):
group_list = ([x for x in group])
reduced_val = reduce_ctx.call('doReduce', reduce, group_list)
ret_array.append(reduced_val)
for doc in ret_array:
doc_copy = copy.deepcopy(doc)
for k in doc:
if k not in key and k not in initial.keys():
del doc_copy[k]
ret_array_copy.append(doc_copy)
ret_array = ret_array_copy
return ret_array
def aggregate(self, pipeline, **kwargs):
pipeline_operators = [
'$project',
'$match',
'$redact',
'$limit',
'$skip',
'$unwind',
'$group',
'$sample'
'$sort',
'$geoNear',
'$lookup'
'$out',
'$indexStats']
group_operators = [
'$addToSet',
'$first',
'$last',
'$max',
'$min',
'$avg',
'$push',
'$sum',
'$stdDevPop',
'$stdDevSamp']
project_operators = [
'$max',
'$min',
'$avg',
'$sum',
'$stdDevPop',
'$stdDevSamp',
'$arrayElemAt'
]
boolean_operators = ['$and', '$or', '$not'] # noqa
set_operators = [ # noqa
'$setEquals',
'$setIntersection',
'$setDifference',
'$setUnion',
'$setIsSubset',
'$anyElementTrue',
'$allElementsTrue']
comparison_operators = [ # noqa
'$cmp',
'$eq',
'$gt',
'$gte',
'$lt',
'$lte',
'$ne']
arithmetic_operators = [ # noqa
'$abs',
'$add',
'$ceil',
'$divide',
'$exp',
'$floor',
'$ln',
'$log',
'$log10',
'$mod',
'$multiply',
'$pow',
'$sqrt',
'$subtract',
'$trunc']
string_operators = [ # noqa
'$concat',
'$strcasecmp',
'$substr',
'$toLower',
'$toUpper']
text_search_operators = ['$meta'] # noqa
array_operators = [ # noqa
'$arrayElemAt',
'$concatArrays',
'$filter',
'$isArray',
'$size',
'$slice']
projection_operators = ['$map', '$let', '$literal'] # noqa
date_operators = [ # noqa
'$dayOfYear',
'$dayOfMonth',
'$dayOfWeek',
'$year',
'$month',
'$week',
'$hour',
'$minute',
'$second',
'$millisecond',
'$dateToString']
def _handle_arithmetic_operator(operator, values, doc_dict):
if operator == '$abs':
return abs(_parse_expression(values, doc_dict))
elif operator == '$ceil':
return math.ceil(_parse_expression(values, doc_dict))
elif operator == '$divide':
assert len(values) == 2, 'divide must have only 2 items'
return _parse_expression(values[0], doc_dict) / _parse_expression(values[1],
doc_dict)
elif operator == '$exp':
return math.exp(_parse_expression(values, doc_dict))
elif operator == '$floor':
return math.floor(_parse_expression(values, doc_dict))
elif operator == '$ln':
return math.log(_parse_expression(values, doc_dict))
elif operator == '$log':
assert len(values) == 2, 'log must have only 2 items'
return math.log(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$log10':
return math.log10(_parse_expression(values, doc_dict))
elif operator == '$mod':
assert len(values) == 2, 'mod must have only 2 items'
return math.fmod(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$pow':
assert len(values) == 2, 'pow must have only 2 items'
return math.pow(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$sqrt':
return math.sqrt(_parse_expression(values, doc_dict))
elif operator == '$subtract':
assert len(values) == 2, 'subtract must have only 2 items'
return _parse_expression(values[0], doc_dict) - _parse_expression(values[1],
doc_dict)
else:
raise NotImplementedError("Although '%s' is a valid aritmetic operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_comparison_operator(operator, values, doc_dict):
assert len(values) == 2, 'Comparison requires two expressions'
if operator == '$eq':
return _parse_expression(values[0], doc_dict) == \
_parse_expression(values[1], doc_dict)
elif operator == '$gt':
return _parse_expression(values[0], doc_dict) > \
_parse_expression(values[1], doc_dict)
elif operator == '$gte':
return _parse_expression(values[0], doc_dict) >= \
_parse_expression(values[1], doc_dict)
elif operator == '$lt':
return _parse_expression(values[0], doc_dict) < \
_parse_expression(values[1], doc_dict)
elif operator == '$lte':
return _parse_expression(values[0], doc_dict) <= \
_parse_expression(values[1], doc_dict)
elif operator == '$ne':
return _parse_expression(values[0], doc_dict) != \
_parse_expression(values[1], doc_dict)
else:
raise NotImplementedError(
"Although '%s' is a valid comparison operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_date_operator(operator, values, doc_dict):
out_value = _parse_expression(values, doc_dict)
if operator == '$dayOfYear':
return out_value.timetuple().tm_yday
elif operator == '$dayOfMonth':
return out_value.day
elif operator == '$dayOfWeek':
return out_value.isoweekday()
elif operator == '$year':
return out_value.year
elif operator == '$month':
return out_value.month
elif operator == '$week':
return out_value.isocalendar()[1]
elif operator == '$hour':
return out_value.hour
elif operator == '$minute':
return out_value.minute
elif operator == '$second':
return out_value.second
elif operator == '$millisecond':
return int(out_value.microsecond / 1000)
else:
raise NotImplementedError(
"Although '%s' is a valid date operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_project_operator(operator, values, doc_dict):
if operator == '$min':
if len(values) > 2:
raise NotImplementedError("Although %d is a valid amount of elements in "
"aggregation pipeline, it is currently not "
" implemented in Mongomock" % len(values))
return min(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$arrayElemAt':
key, index = values
array = _parse_basic_expression(key, doc_dict)
v = array[index]
return v
else:
raise NotImplementedError("Although '%s' is a valid project operator for the "
"aggregation pipeline, it is currently not implemented "
"in Mongomock." % operator)
def _parse_basic_expression(expression, doc_dict):
if isinstance(expression, str) and expression.startswith('$'):
get_value = helpers.embedded_item_getter(expression.replace('$', ''))
return get_value(doc_dict)
else:
return expression
def _parse_expression(expression, doc_dict):
if not isinstance(expression, dict):
return _parse_basic_expression(expression, doc_dict)
value_dict = {}
for k, v in iteritems(expression):
if k in arithmetic_operators:
return _handle_arithmetic_operator(k, v, doc_dict)
elif k in project_operators:
return _handle_project_operator(k, v, doc_dict)
elif k in comparison_operators:
return _handle_comparison_operator(k, v, doc_dict)
elif k in date_operators:
return _handle_date_operator(k, v, doc_dict)
else:
value_dict[k] = _parse_expression(v, doc_dict)
return value_dict
def _extend_collection(out_collection, field, expression):
field_exists = False
for doc in out_collection:
if field in doc:
field_exists = True
break
if not field_exists:
for doc in out_collection:
if isinstance(expression, str) and expression.startswith('$'):
try:
doc[field] = get_value_by_dot(doc, expression.lstrip('$'))
except KeyError:
pass
else:
# verify expression has operator as first
doc[field] = _parse_expression(expression.copy(), doc)
return out_collection
conditional_operators = ['$cond', '$ifNull'] # noqa
out_collection = [doc for doc in self.find()]
for stage in pipeline:
for k, v in iteritems(stage):
if k == '$match':
out_collection = [doc for doc in out_collection
if filter_applies(v, doc)]
elif k == '$group':
grouped_collection = []
_id = stage['$group']['_id']
if _id:
key_getter = functools.partial(_parse_expression, _id)
out_collection = sorted(out_collection, key=key_getter)
grouped = itertools.groupby(out_collection, key_getter)
else:
grouped = [(None, out_collection)]
for doc_id, group in grouped:
group_list = ([x for x in group])
doc_dict = {'_id': doc_id}
for field, value in iteritems(v):
if field == '_id':
continue
for operator, key in iteritems(value):
if operator in (
"$sum",
"$avg",
"$min",
"$max",
"$first",
"$last",
"$addToSet",
'$push'
):
key_getter = functools.partial(_parse_expression, key)
values = [key_getter(doc) for doc in group_list]
if operator == "$sum":
val_it = (val or 0 for val in values)
doc_dict[field] = sum(val_it)
elif operator == "$avg":
values = [val or 0 for val in values]
doc_dict[field] = sum(values) / max(len(values), 1)
elif operator == "$min":
val_it = (val or MAXSIZE for val in values)
doc_dict[field] = min(val_it)
elif operator == "$max":
val_it = (val or -MAXSIZE for val in values)
doc_dict[field] = max(val_it)
elif operator == "$first":
doc_dict[field] = values[0]
elif operator == "$last":
doc_dict[field] = values[-1]
elif operator == "$addToSet":
val_it = (val or None for val in values)
doc_dict[field] = set(val_it)
elif operator == '$push':
if field not in doc_dict:
doc_dict[field] = []
doc_dict[field].extend(values)
else:
if operator in group_operators:
raise NotImplementedError(
"Although %s is a valid group operator for the "
"aggregation pipeline, it is currently not implemented "
"in Mongomock." % operator)
else:
raise NotImplementedError(
"%s is not a valid group operator for the aggregation "
"pipeline. See http://docs.mongodb.org/manual/meta/"
"aggregation-quick-reference/ for a complete list of "
"valid operators." % operator)
grouped_collection.append(doc_dict)
out_collection = grouped_collection
elif k == '$sort':
sort_array = []
for x, y in v.items():
sort_array.append({x: y})
for sort_pair in reversed(sort_array):
for sortKey, sortDirection in sort_pair.items():
out_collection = sorted(
out_collection,
key=lambda x: _resolve_sort_key(sortKey, x),
reverse=sortDirection < 0)
elif k == '$skip':
out_collection = out_collection[v:]
elif k == '$limit':
out_collection = out_collection[:v]
elif k == '$unwind':
if not isinstance(v, helpers.basestring) or v[0] != '$':
raise ValueError(
"$unwind failed: exception: field path references must be prefixed "
"with a '$' '%s'" % v)
unwound_collection = []
for doc in out_collection:
array_value = get_value_by_dot(doc, v[1:])
if array_value in (None, []):
continue
elif not isinstance(array_value, list):
raise TypeError(
'$unwind must specify an array field, field: '
'"%s", value found: %s' % (v, array_value))
for field_item in array_value:
unwound_collection.append(copy.deepcopy(doc))
unwound_collection[-1] = set_value_by_dot(
unwound_collection[-1], v[1:], field_item)
out_collection = unwound_collection
elif k == '$project':
filter_list = ['_id']
for field, value in iteritems(v):
if field == '_id' and not value:
filter_list.remove('_id')
elif value:
filter_list.append(field)
out_collection = _extend_collection(out_collection, field, value)
out_collection = [{k: v for (k, v) in x.items() if k in filter_list}
for x in out_collection]
elif k == '$out':
# TODO(MetrodataTeam): should leave the origin collection unchanged
collection = self.database.get_collection(v)
if collection.count() > 0:
collection.drop()
collection.insert_many(out_collection)
else:
if k in pipeline_operators:
raise NotImplementedError(
"Although '%s' is a valid operator for the aggregation pipeline, it is "
"currently not implemented in Mongomock." % k)
else:
raise NotImplementedError(
"%s is not a valid operator for the aggregation pipeline. "
"See http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ "
"for a complete list of valid operators." % k)
return CommandCursor(out_collection)
def with_options(
self, codec_options=None, read_preference=None, write_concern=None, read_concern=None):
return self
def rename(self, new_name, **kwargs):
self.database.rename_collection(self.name, new_name, **kwargs)
def bulk_write(self, operations):
bulk = BulkOperationBuilder(self)
for operation in operations:
operation._add_to_bulk(bulk)
return BulkWriteResult(bulk.execute(), True)
def _resolve_key(key, doc):
return next(iter(iter_key_candidates(key, doc)), NOTHING)
def _resolve_sort_key(key, doc):
value = _resolve_key(key, doc)
# see http://docs.mongodb.org/manual/reference/method/cursor.sort/#ascending-descending-sort
if value is NOTHING:
return 0, value
return 1, value
class Cursor(object):
def __init__(self, collection, spec=None, sort=None, projection=None, skip=0, limit=0):
super(Cursor, self).__init__()
self.collection = collection
self._spec = spec
self._sort = sort
self._projection = projection
self._skip = skip
self.__id = None
self.__retrieved = False
self.__exhaust = None
self.__exhaust_mgr = None
self._factory = functools.partial(collection._get_dataset,
spec, sort, projection, dict)
# pymongo limit defaults to 0, returning everything
self._limit = limit if limit != 0 else None
self.rewind()
def __iter__(self):
return self
def clone(self):
return Cursor(self.collection,
self._spec, self._sort, self._projection, self._skip, self._limit)
def __next__(self):
if self._skip and not self._skipped:
for i in range(self._skip):
next(self._dataset)
self._skipped = self._skip
if self._limit is not None and self._limit <= self._emitted:
raise StopIteration()
if self._limit is not None:
self._emitted += 1
return {k: copy.deepcopy(v) for k, v in iteritems(next(self._dataset))}
next = __next__
def rewind(self):
self._dataset = self._factory()
self._emitted = 0
self._skipped = 0
def sort(self, key_or_list, direction=None):
if direction is None:
direction = 1
if isinstance(key_or_list, (tuple, list)):
for sortKey, sortDirection in reversed(key_or_list):
self._dataset = iter(
sorted(
self._dataset,
key=lambda x: _resolve_sort_key(
sortKey,
x),
reverse=sortDirection < 0))
else:
self._dataset = iter(
sorted(self._dataset,
key=lambda x: _resolve_sort_key(key_or_list, x),
reverse=direction < 0))
return self
def count(self, with_limit_and_skip=False):
arr = [x for x in self._dataset]
count = len(arr)
if with_limit_and_skip:
if self._skip:
count -= self._skip
if self._limit and count > self._limit:
count = self._limit
self._dataset = iter(arr)
return count
def skip(self, count):
self._skip = count
return self
def limit(self, count):
self._limit = count if count != 0 else None
return self
def batch_size(self, count):
return self
def close(self):
pass
def distinct(self, key):
if not isinstance(key, helpers.basestring):
raise TypeError('cursor.distinct key must be a string')
unique = set()
unique_dict_vals = []
for x in iter(self._dataset):
value = _resolve_key(key, x)
if value == NOTHING:
continue
if isinstance(value, dict):
if any(dict_val == value for dict_val in unique_dict_vals):
continue
unique_dict_vals.append(value)
else:
unique.update(
value if isinstance(
value, (tuple, list)) else [value])
return list(unique) + unique_dict_vals
def __getitem__(self, index):
if isinstance(index, slice):
# Limit the cursor to the given slice
self._dataset = (x for x in list(self._dataset)[index])
return self
elif not isinstance(index, int):
raise TypeError("index '%s' cannot be applied to Cursor instances" % index)
elif index < 0:
raise IndexError('Cursor instances do not support negativeindices')
else:
arr = list(self)
self._dataset = iter(arr)
return arr[index]
def _set_updater(doc, field_name, value):
if isinstance(value, (tuple, list)):
value = copy.deepcopy(value)
if isinstance(doc, dict):
doc[field_name] = value
def _unset_updater(doc, field_name, value):
if isinstance(doc, dict):
doc.pop(field_name, None)
def _inc_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = doc.get(field_name, 0) + value
def _max_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = max(doc.get(field_name, value), value)
def _min_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = min(doc.get(field_name, value), value)
def _sum_updater(doc, field_name, current, result):
if isinstance(doc, dict):
result = current + doc.get[field_name, 0]
return result
def _current_date_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = datetime.utcnow()
_updaters = {
'$set': _set_updater,
'$unset': _unset_updater,
'$inc': _inc_updater,
'$max': _max_updater,
'$min': _min_updater,
}
| 41.515676 | 100 | 0.507018 |
b9b53926bf1ae576cd6e1e64a029c9869e3be822 | 9,801 | py | Python | venus.py | AlexCPU/VictronVenus-InfluxDB | 1d04b8a44d6629736407463dcb904ccf6c6eb74c | [
"Apache-2.0"
] | null | null | null | venus.py | AlexCPU/VictronVenus-InfluxDB | 1d04b8a44d6629736407463dcb904ccf6c6eb74c | [
"Apache-2.0"
] | null | null | null | venus.py | AlexCPU/VictronVenus-InfluxDB | 1d04b8a44d6629736407463dcb904ccf6c6eb74c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import datetime
import logging
import numpy as np
from aiohttp import ClientConnectionError
from pyModbusTCP.client import ModbusClient
from pymodbus.constants import Endian
from pymodbus.payload import BinaryPayloadDecoder
import asyncio
from aioinflux import InfluxDBClient, InfluxDBWriteError
datapoint = {
'measurement': 'Victron',
'tags': {},
'fields': {}
}
reg_block = {}
logger = logging.getLogger('victron')
async def write_to_influx(dbhost, dbport, dbname='victron'):
global client
global datapoint
global reg_block
def trunc_float(floatval):
return float('%.2f' % floatval)
try:
solar_client = InfluxDBClient(host=dbhost, port=dbport, db=dbname)
await solar_client.create_database(db=dbname)
except ClientConnectionError as e:
logger.error(f'Error during connection to InfluxDb {dbhost}: {e}')
return
logger.info('Database opened and initialized')
while True:
try:
reg_block = {}
reg_block = client.read_holding_registers(800, 27)
if reg_block:
datapoint = {
'measurement': 'Victron',
'tags': {},
'fields': {}
}
# print(reg_block)
# reg_block[0] = Serial
# reg_block[1] = Serial
# reg_block[2] = Serial
# reg_block[3] = Serial
# reg_block[4] = Serial
# reg_block[5] = Serial
# reg_block[6] = CCGX Relay 1 State
# reg_block[7] = CCGX Relay 2 State
# reg_block[8] = PV - AC-coupled on output L1
# reg_block[9] = PV - AC-coupled on output L2
# reg_block[10] = PV - AC-coupled on output L3
# reg_block[11] = PV - AC-coupled on input L1
# reg_block[12] = PV - AC-coupled on input L2
# reg_block[13] = PV - AC-coupled on input L3
# reg_block[14] = PV - AC-coupled on generator L1
# reg_block[15] = PV - AC-coupled on generator L2
# reg_block[16] = PV - AC-coupled on generator L3
# reg_block[17] = AC Consumption L1
# reg_block[18] = AC Consumption L2
# reg_block[19] = AC Consumption L3
# reg_block[20] = Grid L1
# reg_block[21] = Grid L2
# reg_block[22] = Grid L3
# reg_block[23] = Genset L1
# reg_block[24] = Genset L2
# reg_block[25] = Genset L3
# reg_block[26] = Active input source
datapoint['tags']['system'] = 1
# AC Consumption
logger.debug(f'Block17: {str(reg_block[17])}')
logger.debug(f'Block18: {str(reg_block[18])}')
logger.debug(f'Block19: {str(reg_block[19])}')
scalefactor = 1
datapoint['fields']['AC Consumption L1'] = trunc_float(reg_block[17] * scalefactor)
datapoint['fields']['AC Consumption L2'] = trunc_float(reg_block[18] * scalefactor)
datapoint['fields']['AC Consumption L3'] = trunc_float(reg_block[19] * scalefactor)
# Grid
logger.debug(f'Block17: {str(reg_block[20])}')
logger.debug(f'Block18: {str(reg_block[21])}')
logger.debug(f'Block19: {str(reg_block[22])}')
scalefactor = 1
datapoint['fields']['Grid L1'] = trunc_float(np.int16(reg_block[20]) * scalefactor)
datapoint['fields']['Grid L2'] = trunc_float(np.int16(reg_block[21]) * scalefactor)
datapoint['fields']['Grid L3'] = trunc_float(np.int16(reg_block[22]) * scalefactor)
# PV On Input
logger.debug(f'Block11: {str(reg_block[11])}')
logger.debug(f'Block12: {str(reg_block[12])}')
logger.debug(f'Block13: {str(reg_block[13])}')
scalefactor = 1
datapoint['fields']['PV - AC-coupled on input L1'] = trunc_float(reg_block[11] * scalefactor)
datapoint['fields']['PV - AC-coupled on input L2'] = trunc_float(reg_block[12] * scalefactor)
datapoint['fields']['PV - AC-coupled on input L3'] = trunc_float(reg_block[13] * scalefactor)
# PV On Output
logger.debug(f'Block8: {str(reg_block[8])}')
logger.debug(f'Block9: {str(reg_block[9])}')
logger.debug(f'Block10: {str(reg_block[10])}')
scalefactor = 1
datapoint['fields']['PV - AC-coupled on output L1'] = trunc_float(reg_block[8] * scalefactor)
datapoint['fields']['PV - AC-coupled on output L2'] = trunc_float(reg_block[9] * scalefactor)
datapoint['fields']['PV - AC-coupled on output L3'] = trunc_float(reg_block[10] * scalefactor)
datapoint['time'] = str(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
logger.debug(f'Writing to Influx: {str(datapoint)}')
await solar_client.write(datapoint)
else:
# Error during data receive
if client.last_error() == 2:
logger.error(f'Failed to connect to Victron Host {client.host()}!')
elif client.last_error() == 3 or client.last_error() == 4:
logger.error('Send or receive error!')
elif client.last_error() == 5:
logger.error('Timeout during send or receive operation!')
reg_block = {}
reg_block = client.read_holding_registers(840, 7)
if reg_block:
datapoint = {
'measurement': 'Victron',
'tags': {},
'fields': {}
}
# print(reg_block)
# reg_block[0] = Battery Voltage (System)
# reg_block[1] = Battery Current (System)
# reg_block[2] = Battery Power (System)
# reg_block[3] = Battery State of Charge (System)
# reg_block[4] = Battery state (System)
# reg_block[5] = Battery Consumed Amphours (System)
# reg_block[6] = Battery Time to Go (System)
# Battery Voltage
logger.debug(f'Block0: {str(reg_block[0])}')
scalefactor = 0.1
datapoint['fields']['Battery Voltage'] = trunc_float(reg_block[0] * scalefactor)
# Battery Current
logger.debug(f'Block1: {str(reg_block[1])}')
scalefactor = 0.1
datapoint['fields']['Battery Current'] = trunc_float(np.int16(reg_block[1]) * scalefactor)
# Battery Power
logger.debug(f'Block2: {str(reg_block[2])}')
scalefactor = 1
datapoint['fields']['Battery Power'] = trunc_float(np.int16(reg_block[2]) * scalefactor)
# Battery State of Charge
logger.debug(f'Block3: {str(reg_block[3])}')
scalefactor = 1
datapoint['fields']['Battery State of Charge'] = trunc_float(reg_block[3] * scalefactor)
datapoint['time'] = str(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
logger.debug(f'Writing to Influx: {str(datapoint)}')
await solar_client.write(datapoint)
else:
# Error during data receive
if client.last_error() == 2:
logger.error(f'Failed to connect to Victron Host {client.host()}!')
elif client.last_error() == 3 or client.last_error() == 4:
logger.error('Send or receive error!')
elif client.last_error() == 5:
logger.error('Timeout during send or receive operation!')
except InfluxDBWriteError as e:
logger.error(f'Failed to write to InfluxDb: {e}')
except IOError as e:
logger.error(f'I/O exception during operation: {e}')
except Exception as e:
logger.error(f'Unhandled exception: {e}')
await asyncio.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--influxdb', default='localhost')
parser.add_argument('--influxport', type=int, default=8086)
parser.add_argument('--port', type=int, default=502, help='ModBus TCP port number to use')
parser.add_argument('--unitid', type=int, default=100, help='ModBus unit id to use in communication')
parser.add_argument('venus', metavar='Venus IP', help='IP address of the Venus Device to monitor')
parser.add_argument('--debug', '-d', action='count')
args = parser.parse_args()
logging.basicConfig()
if args.debug and args.debug >= 1:
logging.getLogger('victron').setLevel(logging.DEBUG)
if args.debug and args.debug == 2:
logging.getLogger('aioinflux').setLevel(logging.DEBUG)
print('Starting up Victron Venus monitoring')
print(f'Connecting to Victron Venus {args.venus} on port {args.port} using unitid {args.unitid}')
print(f'Writing data to influxDb {args.influxdb} on port {args.influxport}')
client = ModbusClient(args.venus, port=args.port, unit_id=args.unitid, auto_open=True)
logger.debug('Running eventloop')
asyncio.get_event_loop().run_until_complete(write_to_influx(args.influxdb, args.influxport))
| 44.753425 | 117 | 0.55678 |
879f520d53d38f557f3143d2bd9889b78edc9ed9 | 2,491 | py | Python | src/symbol_table.py | Wynjones1/pycompiler | dd6f0f3a5e4a4b6c7b110080d66e05724ade6627 | [
"MIT"
] | null | null | null | src/symbol_table.py | Wynjones1/pycompiler | dd6f0f3a5e4a4b6c7b110080d66e05724ade6627 | [
"MIT"
] | null | null | null | src/symbol_table.py | Wynjones1/pycompiler | dd6f0f3a5e4a4b6c7b110080d66e05724ade6627 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
import syntax_tree as ast
from collections import OrderedDict
_global_symbol_table = None
class SymbolTable(object):
""" SymbolTable contains the mapping of identifiers to types
it can contain point to one of
function or type
"""
def __init__(self, parent = None):
#TODO: remove this ugly hack
global _global_symbol_table
init_global_symbol_table()
self.data = OrderedDict()
if parent:
parent.children.append(self)
self.parent = parent if parent else _global_symbol_table
self.children = []
def __getitem__(self, key):
try:
return self.data[key]
except KeyError as e:
if self.parent:
return self.parent[key]
raise
def __setitem__(self, key, value):
assert(isinstance(value, (ast.Function, ast.Type, ast.Struct)))
assert(isinstance(key, ast.Identifier))
if key in self.data.keys():
raise KeyError()
self.data[key] = value
def set_parent(self, parent):
assert isinstance(parent, SymbolTable)
self.parent = parent
def __str__(self):
out = ""
if self.parent:
out = str(self.parent)
out += str(self.data) + "\n"
return out
class ParamTable(SymbolTable):
pass
class SubTable(SymbolTable):
pass
def dummy_function(name, return_type_name, *args):
return_type = ast.Type(ast.Identifier(return_type_name))
params = []
for i, j in enumerate(args):
param_name = "param_{}".format(i)
params.append(ast.Decl(ast.Type(j),
ast.Identifier(param_name),
None))
return ast.Function(ast.Identifier(name),
ast.ParamList(*params),
return_type,
ast.StatementList())
def add_dummy_function(name, return_type, *args):
_global_symbol_table[ast.Identifier(name)] = \
dummy_function(name, return_type, *args)
def init_global_symbol_table():
global _global_symbol_table
if not _global_symbol_table:
_global_symbol_table = True
_global_symbol_table = SymbolTable()
_global_symbol_table.parent = None
add_dummy_function("print", "void", "int")
add_dummy_function("prints", "void", "string")
add_dummy_function("putc", "void", "int")
| 29.654762 | 71 | 0.600161 |
9bf3a64ed0098c06e2a187d46b7342df36eab534 | 1,193 | py | Python | _train_resnet50.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | _train_resnet50.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | _train_resnet50.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | import logging
import shlex
import subprocess
import time
import torch
# from torchvision.models import resnet50
from pytorch_model.resnet import resnet50
_logger = logging.getLogger(__name__)
def get_memory_free() -> bool:
output = subprocess.check_output(
shlex.split(
'nvidia-smi --query-gpu=memory.used --format=csv,nounits,noheader'
)
)
memory_usage = output.decode().split('\n')
memory_usage = [int(m) for m in memory_usage if m != '']
_logger.info('memory usage: %s', memory_usage)
# 低于500MB,就return True
return memory_usage[0] < 500
def occupy_gpu():
_logger.info('start running resnet50')
model = resnet50().cuda()
model = torch.nn.DataParallel(model)
device = torch.device('cuda')
num_gpus = torch.cuda.device_count()
while True:
x = torch.rand(32 * num_gpus, 3, 224, 224, device=device)
y = model(x)
def main():
logging.basicConfig(level=logging.INFO)
gpus_free = False
while not gpus_free:
gpus_free = get_memory_free()
time.sleep(10)
occupy_gpu()
if __name__ == "__main__":
main()
| 22.509434 | 79 | 0.629505 |
3dc71b2a43a321e29b143f1d86112581a47ec2e4 | 943 | py | Python | issue2/imperative_gluon.py | KexinFeng/incubator-mxnet | 66cdc16f3ab9ebda0fe82dcdd612db615c80b007 | [
"Apache-2.0",
"MIT"
] | null | null | null | issue2/imperative_gluon.py | KexinFeng/incubator-mxnet | 66cdc16f3ab9ebda0fe82dcdd612db615c80b007 | [
"Apache-2.0",
"MIT"
] | null | null | null | issue2/imperative_gluon.py | KexinFeng/incubator-mxnet | 66cdc16f3ab9ebda0fe82dcdd612db615c80b007 | [
"Apache-2.0",
"MIT"
] | null | null | null | # This is bug free, since `_api_internal.invoke(CachedOp.handle, ...)`
# is called.
# `unit_test.py` has bug when `static_alloc` is set in `_bind`.
# It calls `_LIB.MXInvokeCachedOp`
import mxnet as mx
from mxnet.gluon import HybridBlock
from mxnet import autograd as ag
class AddBlock(HybridBlock):
def __init__(self):
super(AddBlock, self).__init__()
def forward(self, a, b):
c = a + b
return c
add = AddBlock()
add.initialize()
add.hybridize(static_alloc=True)
x = mx.np.array([0.4])
y = mx.np.array([0.5])
x.attach_grad(grad_req='null')
y.attach_grad(grad_req='write')
ag.set_recording(True)
# with ag.record():
out = add(x, y)
# out = add(x, y)
ag.set_recording(False)
# print(out)
out.backward()
print(x, y, out)
print(x.grad)
print(y.grad)
# print(out.grad)
# print("\nINPUT 1: {}\nINPUT 2: {}\nOUTPUT: {}\nGRAD 1: {}\n"
# "GRAD 2: {}\n".format(x, y, out, x.grad, y.grad, out.grad))
| 23 | 71 | 0.651113 |
7b0cf2e7e4d2654a37b6c8a9ad4efe8aa8e12f52 | 191 | py | Python | src/services/message_management/publish.py | b1team/trada | 22ceaf4d50fe3a38ff402315c029e574773ca9e0 | [
"MIT"
] | null | null | null | src/services/message_management/publish.py | b1team/trada | 22ceaf4d50fe3a38ff402315c029e574773ca9e0 | [
"MIT"
] | 1 | 2021-03-12T15:16:03.000Z | 2021-03-12T15:16:03.000Z | src/services/message_management/publish.py | b1team/trada | 22ceaf4d50fe3a38ff402315c029e574773ca9e0 | [
"MIT"
] | null | null | null | import redis
import json
def publish_event(redis_uri, channel, event):
r = redis.Redis.from_url(redis_uri)
pub = r.publish(channel=channel, message=json.dumps(event))
return pub | 23.875 | 63 | 0.73822 |
a5c22b272c1a7bf50883ddfbf6d899017fab7170 | 13,834 | py | Python | MNIST/Deep_AE_overfit.py | Sungyeop/IPRL | 6ee17f415998ac5cc058c63cea06a5cad40b267c | [
"MIT"
] | 2 | 2021-02-08T05:34:45.000Z | 2021-03-02T08:36:06.000Z | MNIST/Deep_AE_overfit.py | Sungyeop/IPRL | 6ee17f415998ac5cc058c63cea06a5cad40b267c | [
"MIT"
] | null | null | null | MNIST/Deep_AE_overfit.py | Sungyeop/IPRL | 6ee17f415998ac5cc058c63cea06a5cad40b267c | [
"MIT"
] | null | null | null | import numpy as np
import copy
import torch
import torchvision
from torch import nn, optim
from torchvision import transforms, datasets
from scipy.spatial.distance import pdist, squareform
from scipy.special import expit
import matplotlib.pyplot as plt
# Training Options
#==============================================================================================================
EPOCH = 500 # Epoch
batch = 100 # mini-batch size
n1 = 256 # the number of nodes in the first hidden layer (E1)
n2 = 128 # the number of nodes in the second hidden layer (E2)
n3 = 50 # the number of nodes in bottleneck layer (Z)
lr = 0.005 # learning rate
per = 10 # portion of data [%]
view = 15 # the number of sample images
gamma = 2 # constant in kernel bandwidth
alpha = 1.01 # Renyi's alpha-entropy
time_int = 'Iteration' # Time interval of Information Plane : iteration
# time_int = 'Epoch' # Time interval of Inforamtion Plane : Epoch
epsilon = 10**(-8) # divergence regulator
DEVICE = "cpu"
#==============================================================================================================
# Data Load
#==============================================================================================================
trainset = datasets.MNIST(root = './.data/', train = True, download = True, transform = transforms.ToTensor())
testset = datasets.MNIST(root = './.data/', train = False, download = True, transform = transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset = trainset, batch_size=batch, shuffle = True, num_workers=0)
test_loader = torch.utils.data.DataLoader(dataset = testset, batch_size=batch, shuffle = True, num_workers=0)
L = int(len(trainset.data)*per/100)
MNIST_torch = trainset.data[:L]
label_torch = trainset.targets[:L]
trainset.data = MNIST_torch
trainset.targets = label_torch
#==============================================================================================================
class Deep_AE(nn.Module):
def __init__(self, n1, n2, n3):
super(Deep_AE, self).__init__()
self.fc1 = nn.Linear(28*28,n1)
self.fc2 = nn.Linear(n1,n2)
self.fc3 = nn.Linear(n2,n3)
self.fc4 = nn.Linear(n3,n2)
self.fc5 = nn.Linear(n2,n1)
self.fc6 = nn.Linear(n1,28*28)
def forward(self,x):
X = x.view(-1,28*28)
E1 = torch.sigmoid(self.fc1(X))
E2 = torch.sigmoid(self.fc2(E1))
Z = torch.sigmoid(self.fc3(E2))
D1 = torch.sigmoid(self.fc4(Z))
D2 = torch.sigmoid(self.fc5(D1))
Y = torch.sigmoid(self.fc6(D2))
return Y
autoencoder = Deep_AE(n1,n2,n3).to(DEVICE)
optimizer = torch.optim.Adam(autoencoder.parameters(), lr = lr)
MSE = nn.MSELoss()
def train(autoencoder, train_loader, history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, \
history_W4, history_b4, history_W5, history_b5, history_W6, history_b6, history_trainloss, history_testloss):
autoencoder.train()
for step, (x,label) in enumerate(train_loader):
x = x.view(-1,28*28).to(DEVICE)
y = x.view(-1,28*28).to(DEVICE)
label = label.to(DEVICE)
Y = autoencoder(x)
W1 = autoencoder.fc1.weight.data.detach().numpy()
b1 = autoencoder.fc1.bias.data.detach().numpy()
W2 = autoencoder.fc2.weight.data.detach().numpy()
b2 = autoencoder.fc2.bias.data.detach().numpy()
W3 = autoencoder.fc3.weight.data.detach().numpy()
b3 = autoencoder.fc3.bias.data.detach().numpy()
W4 = autoencoder.fc4.weight.data.detach().numpy()
b4 = autoencoder.fc4.bias.data.detach().numpy()
W5 = autoencoder.fc5.weight.data.detach().numpy()
b5 = autoencoder.fc5.bias.data.detach().numpy()
W6 = autoencoder.fc6.weight.data.detach().numpy()
b6 = autoencoder.fc6.bias.data.detach().numpy()
history_W1.append(copy.deepcopy(W1))
history_b1.append(copy.deepcopy(b1))
history_W2.append(copy.deepcopy(W2))
history_b2.append(copy.deepcopy(b2))
history_W3.append(copy.deepcopy(W3))
history_b3.append(copy.deepcopy(b3))
history_W4.append(copy.deepcopy(W4))
history_b4.append(copy.deepcopy(b4))
history_W5.append(copy.deepcopy(W5))
history_b5.append(copy.deepcopy(b5))
history_W6.append(copy.deepcopy(W6))
history_b6.append(copy.deepcopy(b6))
trainloss = MSE(Y, y)
history_trainloss.append(trainloss.detach().numpy())
test_data = testset.data.view(-1,784).type(torch.FloatTensor)/255.
output = autoencoder(test_data)
testloss = MSE(output, test_data)
history_testloss.append(testloss.detach().numpy())
optimizer.zero_grad()
trainloss.backward()
optimizer.step()
return (history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, \
history_W4, history_b4, history_W5, history_b5, history_W6, history_b6, history_trainloss, history_testloss)
def sigmoid(x):
return expit(x)
def Ent(X,gamma,alpha):
N = np.size(X,0)
d = np.size(X,1)
sigma = gamma*N**(-1/(4+d))
X_norm = X
pairwise_dist = squareform(pdist(X_norm, 'euclidean'))
K = np.exp(-pairwise_dist**2/(2*sigma**2))
A = 1/N*K
_, eigenv, _ = np.linalg.svd(A)
S = 1/(1-alpha)*np.log2(np.sum(eigenv**alpha)+epsilon).real
return A, S
def MI(X,Y,gamma,alpha):
A_X, S_X = Ent(X,gamma,alpha)
A_Y, S_Y = Ent(Y,gamma,alpha)
A_XY = A_X*A_Y/np.trace(A_X*A_Y)
_, eigenv, _ = np.linalg.svd(A_XY)
S_XY = 1/(1-alpha)*np.log2(np.sum(eigenv**alpha)+epsilon).real
S = S_X + S_Y - S_XY
return S, S_XY
def encoder(test, W1, b1, W2, b2, W3, b3):
E1 = sigmoid(np.einsum('ij,jk->ik', test, W1.T) + b1)
E2 = sigmoid(np.einsum('ij,jk->ik', E1, W2.T) + b2)
Z = sigmoid(np.einsum('ij,jk->ik', E2, W3.T) + b3)
return E1, E2, Z
def decoder(Z, W4, b4, W5, b5, W6, b6):
D1 = sigmoid(np.einsum('ij,jk->ik', Z, W4.T) + b4)
D2 = sigmoid(np.einsum('ij,jk->ik', D1, W5.T) + b5)
output = sigmoid(np.einsum('ij,jk->ik', D2, W6.T) + b6)
return D1, D2, output
def IP(history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, history_W4, history_b4,
history_W5, history_b5, history_W6, history_b6, history_trainloss, history_testloss):
if time_int == 'Epoch':
step = EPOCH
ind = np.linspace(0,len(history_trainloss)*(1-1/step),step)
elif time_int == 'Iteration':
jump = 1
step = np.int(len(history_trainloss)/jump)
ind = np.linspace(0,len(history_trainloss)-jump,step)
I_XE1_cont = np.zeros((step,))
I_E1Y_cont = np.zeros((step,))
I_XE2_cont = np.zeros((step,))
I_E2Y_cont = np.zeros((step,))
I_XZ_cont = np.zeros((step,))
I_ZY_cont = np.zeros((step,))
I_XD1_cont = np.zeros((step,))
I_D1Y_cont = np.zeros((step,))
I_XD2_cont = np.zeros((step,))
I_D2Y_cont = np.zeros((step,))
train_E_cont = np.zeros((step,))
test_E_cont = np.zeros((step,))
MNIST_test = testset.data.view(-1,28*28).type(torch.FloatTensor)/255.
MNIST_test = MNIST_test.detach().numpy()
for j in range(step):
i = np.int(ind[j])
W1 = history_W1[i]
b1 = history_b1[i]
b1 = np.reshape(b1, (1,len(b1)))
W2 = history_W2[i]
b2 = history_b2[i]
b2 = np.reshape(b2, (1,len(b2)))
W3 = history_W3[i]
b3 = history_b3[i]
b3 = np.reshape(b3, (1,len(b3)))
W4 = history_W4[i]
b4 = history_b4[i]
b4 = np.reshape(b4, (1,len(b4)))
W5 = history_W5[i]
b5 = history_b5[i]
b5 = np.reshape(b5, (1,len(b5)))
W6 = history_W6[i]
b6 = history_b6[i]
b6 = np.reshape(b6, (1,len(b6)))
train_E = history_trainloss[i]
test_E = history_testloss[i]
X = MNIST_test[:batch,:]
E1, E2, Z = encoder(X, W1, b1, W2, b2, W3, b3)
D1, D2, Y = decoder(Z, W4, b4, W5, b5, W6, b6)
I_XE1, H_XE1 = MI(X,E1,gamma,alpha)
I_E1Y, H_E1Y = MI(E1,Y,gamma,alpha)
I_XE2, H_XE2 = MI(X,E2,gamma,alpha)
I_E2Y, H_E2Y = MI(E2,Y,gamma,alpha)
I_XZ, H_XZ = MI(X,Z,gamma,alpha)
I_ZY, H_ZY = MI(Z,Y,gamma,alpha)
I_XD1, H_XD1 = MI(X,D1,gamma,alpha)
I_D1Y, H_D1Y = MI(D1,Y,gamma,alpha)
I_XD2, H_XD2 = MI(X,D2,gamma,alpha)
I_D2Y, H_D2Y = MI(D2,Y,gamma,alpha)
I_XE1_cont[j] = I_XE1
I_E1Y_cont[j] = I_E1Y
I_XE2_cont[j] = I_XE2
I_E2Y_cont[j] = I_E2Y
I_XZ_cont[j] = I_XZ
I_ZY_cont[j] = I_ZY
I_XD1_cont[j] = I_XD1
I_D1Y_cont[j] = I_D1Y
I_XD2_cont[j] = I_XD2
I_D2Y_cont[j] = I_D2Y
train_E_cont[j] = train_E
test_E_cont[j] = test_E
# Information plane
D = 7
size = 7
xx = np.linspace(0,D,500)
yy = np.linspace(0,D,500)
num = np.linspace(0,step,step)
fig = plt.figure(figsize=(12,8))
suptitle = fig.suptitle('Information Plane of Deep Autoencoder', y=1.01, fontsize='20')
ax1 = plt.subplot(2,3,1)
plt.plot(xx, yy, 'k--')
im = plt.scatter(I_XE1_cont, I_E1Y_cont, c=num, cmap='rainbow', label = 'E1', s=size)
plt.ylabel(r"$I(T;X')$", fontsize=13)
ax1.axes.get_xaxis().set_ticks([])
plt.legend(fontsize='15')
ax2 = plt.subplot(2,3,2)
plt.plot(xx, yy, 'k--')
plt.scatter(I_XE2_cont, I_E2Y_cont, c=num, cmap='rainbow', label = 'E2', s=size)
ax2.axes.get_xaxis().set_ticks([])
ax2.axes.get_yaxis().set_ticks([])
plt.legend(fontsize='15')
ax3 = plt.subplot(2,3,3)
plt.plot(xx, yy, 'k--')
plt.scatter(I_XZ_cont, I_ZY_cont, c=num, cmap='rainbow', label = 'Z', s=size)
ax3.axes.get_xaxis().set_ticks([])
ax3.axes.get_yaxis().set_ticks([])
plt.legend(fontsize='15')
ax4 = plt.subplot(2,3,4)
plt.plot(xx, yy, 'k--')
plt.scatter(I_XZ_cont, I_ZY_cont, c=num, cmap='rainbow', label = 'Z', s=size)
plt.xlabel(r'$I(X;T)$', fontsize=13)
plt.ylabel(r"$I(T;X')$", fontsize=13)
plt.legend(fontsize='15')
ax5 = plt.subplot(2,3,5)
plt.plot(xx, yy, 'k--')
plt.scatter(I_XD1_cont, I_D1Y_cont, c=num, cmap='rainbow', label = 'D1', s=size)
plt.xlabel(r'$I(X;T)$', fontsize=13)
ax5.axes.get_yaxis().set_ticks([])
plt.legend(fontsize='15')
ax6 = plt.subplot(2,3,6)
plt.plot(xx, yy, 'k--')
plt.scatter(I_XD2_cont, I_D2Y_cont, c=num, cmap='rainbow', label = 'D2', s=size)
plt.xlabel(r'$I(X;T)$', fontsize=13)
ax6.axes.get_yaxis().set_ticks([])
plt.legend(fontsize='15')
plt.tight_layout()
b_ax = fig.add_axes([1.02, 0.15, 0.02, 0.7])
bar = fig.colorbar(im, cax=b_ax)
bar.set_label('{}'.format(time_int))
plt.show()
# DPI & Train/Test Loss
fig = plt.figure(figsize=(12,4))
ax1 = plt.subplot(1,3,1)
plt.plot(I_XE1_cont, label = r'$I(X;E_1)$')
plt.plot(I_XE2_cont, label = r'$I(X;E_2)$')
plt.plot(I_XZ_cont, label = 'I(X;Z)')
plt.xlabel('{}'.format(time_int))
plt.title('DPI of Encoder', fontsize=15)
plt.legend()
ax2 = plt.subplot(1,3,2)
plt.plot(I_D2Y_cont, label = r'$I(D_2;Y)$')
plt.plot(I_D1Y_cont, label = r'$I(D_1;Y)$')
plt.plot(I_ZY_cont, label = 'I(Z;Y)')
plt.xlabel('{}'.format(time_int))
plt.title('DPI of Decoder', fontsize=15)
plt.legend()
ax3 = plt.subplot(1,3,3)
plt.plot(np.log10(train_E_cont), label='Train')
plt.plot(np.log10(test_E_cont), label='Test')
plt.ylabel('log(Loss)')
plt.xlabel('{}'.format(time_int))
plt.title('Train/Test Loss', fontsize=15)
plt.legend()
plt.tight_layout()
plt.show()
def main():
history_W1 = []
history_b1 = []
history_W2 = []
history_b2 = []
history_W3 = []
history_b3 = []
history_W4 = []
history_b4 = []
history_W5 = []
history_b5 = []
history_W6 = []
history_b6 = []
history_trainloss = []
history_testloss = []
sample_data = trainset.data[:view].view(-1,28*28)
sample_data = sample_data.type(torch.FloatTensor)/255.
print('Training Starts!')
for epoch in range(1,EPOCH+1):
history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, \
history_W4, history_b4, history_W5, history_b5, history_W6, history_b6, history_trainloss, history_testloss = \
train(autoencoder, train_loader, history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, \
history_W4, history_b4, history_W5, history_b5, history_W6, history_b6, history_trainloss, history_testloss)
sample_x = sample_data.to(DEVICE)
sample_y = autoencoder(sample_x)
if epoch == EPOCH:
f,a = plt.subplots(2,view,figsize=(view,2))
for i in range(view):
img = np.reshape(sample_x.data.numpy()[i], (28,28))
a[0][i].imshow(img, cmap='gray')
a[0][i].set_xticks(()); a[0][i].set_yticks(())
for i in range(view):
img = np.reshape(sample_y.data.numpy()[i],(28,28))
a[1][i].imshow(img, cmap='gray')
a[1][i].set_xticks(()); a[1][i].set_yticks(())
plt.show()
print('Training Ends!')
print('Estimating Mutual Information...')
IP(history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, history_W4, history_b4,\
history_W5, history_b5, history_W6, history_b6, history_trainloss, history_testloss)
| 36.21466 | 122 | 0.582117 |
186e18187d344594ae0bcf33fa38b6ae55a6ab39 | 2,179 | py | Python | betconnect/utils.py | betcode-org/betconnect | 055158f8fc89a7491d19396f91373de0dfee81b4 | [
"MIT"
] | 4 | 2022-03-24T11:35:10.000Z | 2022-03-25T20:32:33.000Z | betconnect/utils.py | betcode-org/betconnect | 055158f8fc89a7491d19396f91373de0dfee81b4 | [
"MIT"
] | 6 | 2022-03-24T11:32:07.000Z | 2022-03-29T17:18:53.000Z | betconnect/utils.py | varneyo/betconnect | 5463977a9053fec1d9a759995aed44209ee73d43 | [
"MIT"
] | 1 | 2022-03-29T12:56:24.000Z | 2022-03-29T12:56:24.000Z | from __future__ import annotations
import hashlib
from typing import List, Union, TYPE_CHECKING
from uuid import UUID
if TYPE_CHECKING:
from betconnect.resources import SelectionsForMarket
# "https://stackoverflow.com/questions/19989481/how-to-determine-if-a-string-is-a-valid-v4-uuid"
def is_valid_uuid(uuid_to_test, version=4):
"""
Check if uuid_to_test is a valid UUID.
Parameters
----------
uuid_to_test : str
version : {1, 2, 3, 4}
Returns
-------
`True` if uuid_to_test is a valid UUID, otherwise `False`.
Examples
--------
>>> is_valid_uuid('c9bf9e57-1685-4c89-bafb-ff5af830be8a')
True
>>> is_valid_uuid('c9bf9e58')
False
"""
try:
uuid_obj = UUID(uuid_to_test, version=version)
except ValueError:
return False
return str(uuid_obj) == uuid_to_test
def calculate_book_percentage(market_selections: List[SelectionsForMarket]) -> float:
"""
Calculates the book % for a given market and runner
:param List[SelectionsForMarket] market_selections: List of available selections for the market
:return: A float value. Anything close to 1.0 is good. Anything 1.1-1.15 is a large markup
"""
book_value = 0
for selection in market_selections:
if selection.trading_status == "Trading":
if selection.max_price:
if selection.max_price > 1.01:
book_value += 1 / selection.max_price
return round(book_value, 3)
def parse_bet_request_id(bet_request_id: Union[str, UUID]) -> UUID:
"""
Parses a string or UUID to a UUID
:param bet_request_id: a string or UUID
:return: a UUID valid for a bet_request_id
"""
if isinstance(bet_request_id, UUID):
return bet_request_id
elif isinstance(bet_request_id, str):
return UUID(bet_request_id)
def create_cheap_hash(txt: str, length: int = 15) -> str:
# This is just a hash for debugging purposes.
# It does not need to be unique, just fast and short.
# https://stackoverflow.com/questions/14023350
hash_ = hashlib.sha1()
hash_.update(txt.encode())
return hash_.hexdigest()[:length]
| 29.849315 | 99 | 0.673245 |
fc4e3a4118e66e2502cc4e7a89b4df23c256f677 | 3,527 | py | Python | docs/conf.py | ajyoung/cheroot | b6edd3a2ff579e3e6d8276b0a262fd7fbb54adce | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | ajyoung/cheroot | b6edd3a2ff579e3e6d8276b0a262fd7fbb54adce | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | ajyoung/cheroot | b6edd3a2ff579e3e6d8276b0a262fd7fbb54adce | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
# Requires Python 3.6+
# pylint: disable=invalid-name
"""Configuration of Sphinx documentation generator."""
from pathlib import Path
import sys
# Make in-tree extension importable in non-tox setups/envs, like RTD.
# Refs:
# https://github.com/readthedocs/readthedocs.org/issues/6311
# https://github.com/readthedocs/readthedocs.org/issues/7182
sys.path.insert(0, str(Path(__file__).parent.resolve()))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'jaraco.packaging.sphinx',
'sphinx_tabs.tabs',
'sphinxcontrib.spelling',
'scm_tag_titles_ext',
]
master_doc = 'index'
spelling_ignore_acronyms = True
spelling_ignore_importable_modules = True
spelling_ignore_pypi_package_names = True
spelling_ignore_python_builtins = True
spelling_ignore_wiki_words = True
spelling_show_suggestions = True
spelling_word_list_filename = [
'spelling_wordlist.txt',
]
scm_version_title_settings = {
'scm': 'git',
'date_format': '%d %b %Y',
}
github_url = 'https://github.com'
github_repo_org = 'cherrypy'
github_repo_name = 'cheroot'
github_repo_slug = f'{github_repo_org}/{github_repo_name}'
github_repo_url = f'{github_url}/{github_repo_slug}'
cp_github_repo_url = f'{github_url}/{github_repo_org}/cherrypy'
github_sponsors_url = f'{github_url}/sponsors'
extlinks = {
'issue': (f'{github_repo_url}/issues/%s', '#'),
'pr': (f'{github_repo_url}/pull/%s', 'PR #'),
'commit': (f'{github_repo_url}/commit/%s', ''),
'cp-issue': (f'{cp_github_repo_url}/issues/%s', 'CherryPy #'),
'cp-pr': (f'{cp_github_repo_url}/pull/%s', 'CherryPy PR #'),
'gh': (f'{github_url}/%s', 'GitHub: '),
'user': (f'{github_sponsors_url}/%s', '@'),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'python2': ('https://docs.python.org/2', None),
'cherrypy': ('https://docs.cherrypy.org/en/latest/', None),
'trustme': ('https://trustme.readthedocs.io/en/latest/', None),
'ddt': ('https://ddt.readthedocs.io/en/latest/', None),
'pyopenssl': ('https://www.pyopenssl.org/en/latest/', None),
}
linkcheck_ignore = [
r'http://localhost:\d+/', # local URLs
r'https://codecov\.io/gh/cherrypy/cheroot/branch/master/graph/badge\.svg',
r'https://github\.com/cherrypy/cheroot/actions', # 404 if no auth
# Too many links to GitHub so they cause
# "429 Client Error: too many requests for url"
# Ref: https://github.com/sphinx-doc/sphinx/issues/7388
r'https://github\.com/cherrypy/cheroot/issues',
r'https://github\.com/cherrypy/cheroot/pull',
r'https://github\.com/cherrypy/cherrypy/issues',
r'https://github\.com/cherrypy/cherrypy/pull',
# Requires a more liberal 'Accept: ' HTTP request header:
# Ref: https://github.com/sphinx-doc/sphinx/issues/7247
r'https://github\.com/cherrypy/cheroot/workflows/[^/]+/badge\.svg',
]
linkcheck_workers = 25
nitpicky = True
# NOTE: consider having a separate ignore file
# Ref: https://stackoverflow.com/a/30624034/595220
nitpick_ignore = [
('py:const', 'socket.SO_PEERCRED'),
('py:class', '_pyio.BufferedWriter'),
('py:class', '_pyio.BufferedReader'),
('py:class', 'unittest.case.TestCase'),
]
# Ref:
# * https://github.com/djungelorm/sphinx-tabs/issues/26#issuecomment-422160463
sphinx_tabs_valid_builders = ['linkcheck'] # prevent linkcheck warning
# Ref: https://github.com/python-attrs/attrs/pull/571/files\
# #diff-85987f48f1258d9ee486e3191495582dR82
default_role = 'any'
| 32.357798 | 78 | 0.694074 |
8ea7cc4ea4b3cd2e2107e212d2c00d47db569b2a | 382 | py | Python | tests/fixtures/fabfile.py | kevin1024/fabric_remote | b564638cb54ec511d54945217ba4049dbcb824a1 | [
"Unlicense",
"MIT"
] | 19 | 2015-03-03T10:57:09.000Z | 2019-06-22T03:03:28.000Z | tests/fixtures/fabfile.py | chojayr/fabric_remote | 054496e675c3480979494d011c785bb2e4dbfd17 | [
"MIT",
"Unlicense"
] | 2 | 2017-05-16T12:26:04.000Z | 2021-10-21T01:59:22.000Z | tests/fixtures/fabfile.py | chojayr/fabric_remote | 054496e675c3480979494d011c785bb2e4dbfd17 | [
"MIT",
"Unlicense"
] | 5 | 2015-06-22T21:09:45.000Z | 2017-05-16T12:26:32.000Z | import os
from fabric.api import task, run, local, env
import time
@task
def host_type():
"""
test description
"""
local('uname -a')
local('ls -l /usr/lib')
local('echo blah')
time.sleep(4)
local('echo blah')
local('echo blah')
local('echo blah')
env['foo'] = 'bar'
return "shit worked"
@task
def check_foo():
return env['foo']
| 16.608696 | 44 | 0.58377 |
71e75a13ec6a4a915cdee8cf0fb8744111789294 | 1,266 | py | Python | var/spack/repos/builtin/packages/r-argparse/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/r-argparse/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/r-argparse/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RArgparse(RPackage):
"""Command Line Optional and Positional Argument Parser.
A command line parser to be used with Rscript to write "#!" shebang scripts
that gracefully accept positional and optional arguments and automatically
generate usage."""
cran = "argparse"
version('2.1.3', sha256='aeda31a54a8d7a0a511cfbf7c5868637e129922671d43938165867437fb6a66e')
version('2.0.3', sha256='d26139c610ea0adf8d6632699cd34c4595ae3e7963bfc7a00cb3b7504f2059b0')
version('2.0.1', sha256='949843920d14fc7c162aedab331a936499541736e7dafbb103fbfd79be8147ab')
version('1.1.1', sha256='441449f0816411a868fd1b15cf4b2bc45931bbd4b67d6592dbe48875905cf93b')
depends_on('r-r6', type=('build', 'run'), when='@2.0.0:')
depends_on('r-findpython', type=('build', 'run'))
depends_on('r-jsonlite', type=('build', 'run'))
depends_on('python@3.2:', type=('build', 'run'))
depends_on('r-proto@1:', type=('build', 'run'), when='@1.0.0:1.9.9')
depends_on('r-getopt', type=('build', 'run'), when='@1.0.0:1.9.9')
| 42.2 | 95 | 0.718799 |
6a9d07a07e1c6e3c00493cf351fd2d489f9be3c8 | 1,909 | py | Python | trac/upgrades/db40.py | pkdevbox/trac | d044fc469e4dcbc5901c992b1b4160e9cbecee25 | [
"BSD-3-Clause"
] | null | null | null | trac/upgrades/db40.py | pkdevbox/trac | d044fc469e4dcbc5901c992b1b4160e9cbecee25 | [
"BSD-3-Clause"
] | null | null | null | trac/upgrades/db40.py | pkdevbox/trac | d044fc469e4dcbc5901c992b1b4160e9cbecee25 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from trac.upgrades import backup_config_file
def do_upgrade(env, version, cursor):
"""Add notification-subscriber section.
"""
if 'notification-subscriber' in env.config:
env.log.info("Couldn't add notification-subscriber section because "
"it already exists.")
return
env.config.set('notification-subscriber', 'always_notify_cc',
'CarbonCopySubscriber')
if env.config.getbool('notification', 'always_notify_owner'):
env.config.set('notification-subscriber', 'always_notify_owner',
'TicketOwnerSubscriber')
if env.config.getbool('notification', 'always_notify_reporter'):
env.config.set('notification-subscriber', 'always_notify_reporter',
'TicketReporterSubscriber')
if env.config.getbool('notification', 'always_notify_updater'):
env.config.set('notification-subscriber', 'always_notify_updater',
'TicketUpdaterSubscriber')
env.config.set('notification-subscriber',
'always_notify_previous_updater',
'TicketPreviousUpdatersSubscriber')
env.config.remove('notification', 'always_notify_owner')
env.config.remove('notification', 'always_notify_reporter')
env.config.remove('notification', 'always_notify_updater')
backup_config_file(env, '.db40.bak')
env.config.save()
| 41.5 | 76 | 0.687271 |
a6fab152d4d180e7da42afd6811994f8380fb7df | 3,905 | py | Python | Dr. CovidAI/Dr. CovidAI/flaskblog/routs.py | Shreyas-l/Dr.CovidAI | a09eb779db8d7f38fe4545d3ab71fac8a78f1054 | [
"MIT"
] | null | null | null | Dr. CovidAI/Dr. CovidAI/flaskblog/routs.py | Shreyas-l/Dr.CovidAI | a09eb779db8d7f38fe4545d3ab71fac8a78f1054 | [
"MIT"
] | 1 | 2020-04-13T15:56:55.000Z | 2020-04-13T15:56:55.000Z | Dr. CovidAI/Dr. CovidAI/flaskblog/routs.py | Shreyas-l/CODE19-Pyrocrats | a09eb779db8d7f38fe4545d3ab71fac8a78f1054 | [
"MIT"
] | null | null | null |
from flask import render_template, url_for, flash, redirect, abort, request, jsonify, make_response
#from flaskblog.models import User,Post
#from flaskblog.forms import RegistrationForm, LoginForm,PostForm
from flaskblog import app
from flask_login import login_user, current_user, logout_user, login_required
import requests
import datetime
from flaskblog import pool
def getCountryCases(country):
url = "https://covid-193.p.rapidapi.com/statistics"
headers = {
'x-rapidapi-host': "covid-193.p.rapidapi.com",
'x-rapidapi-key': "4d9e908890mshba2fda3a9dfb5dfp11e63bjsndf1d7a33c10c"
}
response = requests.request("GET", url, headers=headers)
data = response.json()
for obj in data["response"]:
if obj["country"] == country:
cases = obj["cases"]
break
return cases
def getWorldCases(date):
url = "https://covid-19-statistics.p.rapidapi.com/reports/total"
querystring = {"date": f"{date}"}
headers = {
'x-rapidapi-host': "covid-19-statistics.p.rapidapi.com",
'x-rapidapi-key': "4d9e908890mshba2fda3a9dfb5dfp11e63bjsndf1d7a33c10c"
}
response = requests.request(
"GET", url, headers=headers, params=querystring)
return response.json()["data"]
def main():
todayDate = datetime.datetime.date(datetime.datetime.utcnow())
yestDate = todayDate - datetime.timedelta(days=1)
print(f"Covid-19 Cases updated as of {yestDate} :")
worldCases = getWorldCases(yestDate)
print(
f"World: \nTotal Confirmed: {worldCases['confirmed']}, New Confirmed: {worldCases['confirmed_diff']}, Death Toll: {worldCases['deaths']}, New Deaths: {worldCases['deaths_diff']}")
country = "India"
cases = getCountryCases(country)
print(
f"Cases in {country}: \nTotal: {cases['total']}, New: {cases['new']}, Active: {cases['active']}, Critical: {cases['critical']}, Recovered: {cases['recovered']}")
return(worldCases['confirmed'], worldCases['confirmed_diff'], worldCases['deaths'], worldCases['deaths_diff'], cases['total'], cases['new'], cases['active'], cases['recovered'])
var1, var2, var3, var4, var5, var6, var7, var8 = main()
pool.main()
@app.route('/')
@app.route('/home')
def home():
return render_template('index.html', var1=int(var1), var2=int(var2), var3=int(var3), var4=int(var4), var5=int(var5), var6=int(var6), var7=int(var7), var8=int(var8))
@app.route('/ngoconnect')
@app.route('/ngo')
def ngo():
return render_template('ngo.html')
@app.route('/cry')
def cry():
return render_template('cry.html')
@app.route('/giveindia')
def giveindia():
return render_template('giveindia.html')
@app.route('/goonj')
def goonj():
return render_template('goonj.html')
@app.route('/helpageindia')
def helpageindia():
return render_template('helpageindia.html')
@app.route('/nanhikali')
def nanhikali():
return render_template('nanhikali.html')
@app.route('/smile')
def smile():
return render_template('smile.html')
@app.route('/screeningtool')
def screeningtool():
resp = make_response(render_template('screeningtool.html'))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/pooling')
def pooling():
Pools = pool.main()
user_details = pool.fetch_pool_batch(Pools, 34631)
user_details = user_details[0]
# print(user_details)
dateStr = str(datetime.datetime.now() + datetime.timedelta(days=1))
return render_template('pooling.html', poolId=user_details[0], location=user_details[1], probability=user_details[2], assDoctor=user_details[3], assHosp=user_details[4], assLab="OurCareLabs", assPath="Dr. Alistair Baretto", assTime=f'{dateStr[:14]}00')
@app.route('/stageonedata', methods=['POST'])
def stageonedata():
input_json = request.get_json(force=True)
print("RecievedData", input_json)
return jsonify({"registeredData": True})
| 30.507813 | 256 | 0.691677 |
d724899859168e9c8bec42cae3ae2d011f5a757f | 3,923 | py | Python | neurogym/envs/delaycomparison.py | ruyuanzhang/neurogym | b4b456f3d41ab2f1648243828c2c907453f51f80 | [
"MIT"
] | 112 | 2020-07-31T14:52:53.000Z | 2022-03-30T16:53:25.000Z | neurogym/envs/delaycomparison.py | ruyuanzhang/neurogym | b4b456f3d41ab2f1648243828c2c907453f51f80 | [
"MIT"
] | 15 | 2020-10-13T15:59:41.000Z | 2021-05-22T03:30:12.000Z | neurogym/envs/delaycomparison.py | ruyuanzhang/neurogym | b4b456f3d41ab2f1648243828c2c907453f51f80 | [
"MIT"
] | 23 | 2020-10-24T04:26:06.000Z | 2022-03-09T07:40:57.000Z | # -*- coding: utf-8 -*-
import numpy as np
import neurogym as ngym
from neurogym import spaces
class DelayComparison(ngym.TrialEnv):
"""Delayed comparison.
The agent needs to compare the magnitude of two stimuli are separated by a
delay period. The agent reports its decision of the stronger stimulus
during the decision period.
"""
metadata = {
'paper_link': 'https://www.jneurosci.org/content/30/28/9424',
'paper_name': '''Neuronal Population Coding of Parametric
Working Memory''',
'tags': ['perceptual', 'working memory', 'two-alternative',
'supervised']
}
def __init__(self, dt=100, vpairs=None, rewards=None, timing=None,
sigma=1.0):
super().__init__(dt=dt)
# Pair of stimulus strengthes
if vpairs is None:
self.vpairs = [(18, 10), (22, 14), (26, 18), (30, 22), (34, 26)]
else:
self.vpairs = vpairs
self.sigma = sigma / np.sqrt(self.dt) # Input noise
# Rewards
self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
if rewards:
self.rewards.update(rewards)
self.timing = {
'fixation': 500,
'stimulus1': 500,
'delay': 1000,
'stimulus2': 500,
'decision': 100}
if timing:
self.timing.update(timing)
self.abort = False
# Input scaling
self.vall = np.ravel(self.vpairs)
self.vmin = np.min(self.vall)
self.vmax = np.max(self.vall)
# action and observation space
name = {'fixation': 0, 'stimulus': 1}
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(2,),
dtype=np.float32, name=name)
name = {'fixation': 0, 'choice': [1, 2]}
self.action_space = spaces.Discrete(3, name=name)
self.choices = [1, 2]
def _new_trial(self, **kwargs):
trial = {
'ground_truth': self.rng.choice(self.choices),
'vpair': self.vpairs[self.rng.choice(len(self.vpairs))]
}
trial.update(kwargs)
v1, v2 = trial['vpair']
if trial['ground_truth'] == 2:
v1, v2 = v2, v1
trial['v1'] = v1
trial['v2'] = v2
# Periods
periods = ['fixation', 'stimulus1', 'delay', 'stimulus2', 'decision']
self.add_period(periods)
self.add_ob(1, where='fixation')
self.add_ob(self.represent(v1), 'stimulus1', where='stimulus')
self.add_ob(self.represent(v2), 'stimulus2', where='stimulus')
self.set_ob(0, 'decision')
self.add_randn(0, self.sigma, ['stimulus1', 'stimulus2'])
self.set_groundtruth(trial['ground_truth'], 'decision')
return trial
def represent(self, v):
"""Input representation of stimulus value."""
# Scale to be between 0 and 1
v_ = (v - self.vmin) / (self.vmax - self.vmin)
# positive encoding, between 0.5 and 1
return (1 + v_) / 2
def _step(self, action):
# ---------------------------------------------------------------------
# Reward and inputs
# ---------------------------------------------------------------------
new_trial = False
gt = self.gt_now
ob = self.ob_now
# rewards
reward = 0
if self.in_period('fixation'):
if action != 0:
new_trial = self.abort
reward = self.rewards['abort']
elif self.in_period('decision'):
if action != 0:
new_trial = True
if action == gt:
reward = self.rewards['correct']
self.performance = 1
else:
reward = self.rewards['fail']
return ob, reward, False, {'new_trial': new_trial, 'gt': gt}
| 31.894309 | 79 | 0.513892 |
b90eda429a3ee4743ecb381d91eeb1a8a043b12f | 4,193 | py | Python | cinder/volume/drivers/macrosan/config.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 571 | 2015-01-01T17:47:26.000Z | 2022-03-23T07:46:36.000Z | cinder/volume/drivers/macrosan/config.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 37 | 2015-01-22T23:27:04.000Z | 2021-02-05T16:38:48.000Z | cinder/volume/drivers/macrosan/config.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 841 | 2015-01-04T17:17:11.000Z | 2022-03-31T12:06:51.000Z | # Copyright (c) 2019 MacroSAN Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume Drivers Config Registration documents for MacroSAN SAN."""
from oslo_config import cfg
macrosan_opts = [
# sdas login_info
cfg.ListOpt('macrosan_sdas_ipaddrs',
help="MacroSAN sdas devices' ip addresses"),
cfg.StrOpt('macrosan_sdas_username',
help="MacroSAN sdas devices' username"),
cfg.StrOpt('macrosan_sdas_password',
secret=True,
help="MacroSAN sdas devices' password"),
# replication login_info
cfg.ListOpt('macrosan_replication_ipaddrs',
help="MacroSAN replication devices' ip addresses"),
cfg.StrOpt('macrosan_replication_username',
help="MacroSAN replication devices' username"),
cfg.StrOpt('macrosan_replication_password',
secret=True,
help="MacroSAN replication devices' password"),
cfg.ListOpt('macrosan_replication_destination_ports',
sample_default="eth-1:0/eth-1:1, eth-2:0/eth-2:1",
help="Slave device"),
# device_features
cfg.StrOpt('macrosan_pool', quotes=True,
help='Pool to use for volume creation'),
cfg.IntOpt('macrosan_thin_lun_extent_size',
default=8,
help="Set the thin lun's extent size"),
cfg.IntOpt('macrosan_thin_lun_low_watermark',
default=5,
help="Set the thin lun's low watermark"),
cfg.IntOpt('macrosan_thin_lun_high_watermark',
default=20,
help="Set the thin lun's high watermark"),
cfg.BoolOpt('macrosan_force_unmap_itl',
default=True,
help="Force disconnect while deleting volume"),
cfg.FloatOpt('macrosan_snapshot_resource_ratio',
default=1.0,
help="Set snapshot's resource ratio"),
cfg.BoolOpt('macrosan_log_timing',
default=True,
help="Whether enable log timing"),
# fc connection
cfg.IntOpt('macrosan_fc_use_sp_port_nr',
default=1,
max=4,
help="The use_sp_port_nr parameter is the number of "
"online FC ports used by the single-ended memory "
"when the FC connection is established in the switch "
"non-all-pass mode. The maximum is 4"),
cfg.BoolOpt('macrosan_fc_keep_mapped_ports',
default=True,
help="In the case of an FC connection, the configuration "
"item associated with the port is maintained."),
# iscsi connection
cfg.ListOpt('macrosan_client',
help="""Macrosan iscsi_clients list.
You can configure multiple clients.
You can configure it in this format:
(host; client_name; sp1_iscsi_port; sp2_iscsi_port),
(host; client_name; sp1_iscsi_port; sp2_iscsi_port)
Important warning, Client_name has the following requirements:
[a-zA-Z0-9.-_:], the maximum number of characters is 31
E.g:
(controller1; device1; eth-1:0; eth-2:0),
(controller2; device2; eth-1:0/eth-1:1; eth-2:0/eth-2:1),
"""),
cfg.StrOpt('macrosan_client_default',
help="This is the default connection ports' name for iscsi. "
"This default configuration is used "
"when no host related information is obtained."
"E.g: eth-1:0/eth-1:1; eth-2:0/eth-2:1")
]
| 45.576087 | 78 | 0.611018 |
5babfc065e03f7fcb31d87ed78c899bab47b0ec1 | 764 | py | Python | setup.py | disqus/menagerie | 97f15624aeabfdf5378ba8232f6051695e4f1d6d | [
"Apache-2.0"
] | 18 | 2015-03-15T13:50:34.000Z | 2020-09-14T09:04:21.000Z | setup.py | disqus/menagerie | 97f15624aeabfdf5378ba8232f6051695e4f1d6d | [
"Apache-2.0"
] | 1 | 2015-03-11T16:44:35.000Z | 2015-03-12T10:42:00.000Z | setup.py | disqus/menagerie | 97f15624aeabfdf5378ba8232f6051695e4f1d6d | [
"Apache-2.0"
] | null | null | null | import sys
from setuptools import find_packages, setup
try:
import multiprocessing # noqa
except ImportError:
pass
install_requires = [
'Django>=1.2,<1.5',
'kazoo>=0.5,<0.9',
]
tests_require = [
'exam==0.6.2',
'nose',
'unittest2',
]
setup_requires = []
if 'nosetests' in sys.argv[1:]:
setup_requires.append('nose')
setup(
name='menagerie',
version='0.1.0',
url='http://github.com/disqus/menagerie',
author='ted kaemming, disqus',
author_email='ted@disqus.com',
packages=find_packages(exclude=('tests',)),
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
test_suite='nose.collector',
license='Apache License 2.0',
zip_safe=False,
)
| 19.589744 | 47 | 0.662304 |
471787d4a711c79ec9062babb411bd653b0297f2 | 1,954 | py | Python | lib/surface/compute/vpn_gateways/list.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/vpn_gateways/list.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/surface/compute/vpn_gateways/list.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list VPN Gateways."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import filter_rewrite
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute.vpn_gateways import vpn_gateways_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.vpn_gateways import flags
from googlecloudsdk.core import properties
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class List(base.ListCommand):
"""List Google Compute Engine Highly Available VPN Gateways."""
@staticmethod
def Args(parser):
parser.display_info.AddFormat(flags.DEFAULT_LIST_FORMAT)
lister.AddRegionsArg(parser)
def Run(self, args):
"""Issues the request to list all VPN Gateways."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
helper = vpn_gateways_utils.VpnGatewayHelper(holder)
project = properties.VALUES.core.project.GetOrFail()
args.filter, filter_expr = filter_rewrite.Rewriter().Rewrite(args.filter)
return helper.List(project=project, filter_expr=filter_expr)
List.detailed_help = base_classes.GetRegionalListerHelp(
'Highly Available VPN Gateways')
| 38.313725 | 77 | 0.790174 |
c917240c01e1db24b2d3a5c5a56ce45dcc13a604 | 8,271 | py | Python | cirq/experiments/google_v2_supremacy_circuit.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | 1 | 2020-05-23T08:23:26.000Z | 2020-05-23T08:23:26.000Z | cirq/experiments/google_v2_supremacy_circuit.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | null | null | null | cirq/experiments/google_v2_supremacy_circuit.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Callable, Iterable, TypeVar, cast, Sequence
from cirq.circuits import InsertStrategy
from cirq import circuits, devices, google, ops
def generate_supremacy_circuit_google_v2(qubits: Iterable[devices.GridQubit],
cz_depth: int,
seed: int) -> circuits.Circuit:
"""
Generates Google Random Circuits v2 as in github.com/sboixo/GRCS cz_v2.
See also https://arxiv.org/abs/1807.10749
Args:
qubits: qubit grid in which to generate the circuit.
cz_depth: number of layers with CZ gates.
seed: seed for the random instance.
Returns:
A circuit corresponding to instance
inst_{n_rows}x{n_cols}_{cz_depth+1}_{seed}
The mapping of qubits is cirq.GridQubit(j,k) -> q[j*n_cols+k]
(as in the QASM mapping)
"""
non_diagonal_gates = [ops.common_gates.X**(1/2), ops.common_gates.Y**(1/2)]
rand_gen = random.Random(seed).random
circuit = circuits.Circuit()
# Add an initial moment of Hadamards
circuit.append(ops.common_gates.H(qubit) for qubit in qubits)
layer_index = 0
if cz_depth:
layer_index = _add_cz_layer(layer_index, circuit)
# In the first moment, add T gates when possible
for qubit in qubits:
if not circuit.operation_at(qubit, 1):
circuit.append(ops.common_gates.T(qubit),
strategy=InsertStrategy.EARLIEST)
for moment_index in range(2, cz_depth+1):
layer_index = _add_cz_layer(layer_index, circuit)
# Add single qubit gates in the same moment
for qubit in qubits:
if not circuit.operation_at(qubit, moment_index):
last_op = circuit.operation_at(qubit, moment_index-1)
if last_op:
gate = cast(ops.GateOperation, last_op).gate
# Add a random non diagonal gate after a CZ
if gate == ops.CZ:
circuit.append(_choice(rand_gen,
non_diagonal_gates).on(qubit),
strategy=InsertStrategy.EARLIEST)
# Add a T gate after a non diagonal gate
elif not gate == ops.T:
circuit.append(ops.common_gates.T(qubit),
strategy=InsertStrategy.EARLIEST)
# Add a final moment of Hadamards
circuit.append(ops.common_gates.H(qubit) for qubit in qubits)
return circuit
def generate_supremacy_circuit_google_v2_grid(n_rows: int, n_cols: int,
cz_depth: int, seed: int
) -> circuits.Circuit:
"""
Generates Google Random Circuits v2 as in github.com/sboixo/GRCS cz_v2.
See also https://arxiv.org/abs/1807.10749
Args:
n_rows: number of rows of a 2D lattice.
n_cols: number of columns.
cz_depth: number of layers with CZ gates.
seed: seed for the random instance.
Returns:
A circuit corresponding to instance
inst_{n_rows}x{n_cols}_{cz_depth+1}_{seed}
The mapping of qubits is cirq.GridQubit(j,k) -> q[j*n_cols+k]
(as in the QASM mapping)
"""
qubits = [devices.GridQubit(i, j) for i in range(n_rows)
for j in range(n_cols)]
return generate_supremacy_circuit_google_v2(qubits, cz_depth, seed)
def generate_supremacy_circuit_google_v2_bristlecone(n_rows: int,
cz_depth: int, seed: int
) -> circuits.Circuit:
"""
Generates Google Random Circuits v2 in Bristlecone.
See also https://arxiv.org/abs/1807.10749
Args:
n_rows: number of rows in a Bristlecone lattice.
Note that we do not include single qubit corners.
cz_depth: number of layers with CZ gates.
seed: seed for the random instance.
Returns:
A circuit with given size and seed.
"""
def get_qubits(n_rows):
def count_neighbors(qubits, qubit):
"""Counts the qubits that the given qubit can interact with."""
possibles = [
devices.GridQubit(qubit.row + 1, qubit.col),
devices.GridQubit(qubit.row - 1, qubit.col),
devices.GridQubit(qubit.row, qubit.col + 1),
devices.GridQubit(qubit.row, qubit.col - 1),
]
return len(list(e for e in possibles if e in qubits))
assert 1 <= n_rows <= 11
max_row = n_rows - 1
dev = google.Bristlecone
# we need a consistent order of qubits
qubits = list(dev.qubits)
qubits.sort()
qubits = [q for q in qubits
if q.row <= max_row and q.row + q.col < n_rows + 6
and q.row - q.col < n_rows - 5]
qubits = [q for q in qubits if count_neighbors(qubits, q) > 1]
return qubits
qubits = get_qubits(n_rows)
return generate_supremacy_circuit_google_v2(qubits, cz_depth, seed)
T = TypeVar('T')
def _choice(rand_gen: Callable[[], float], sequence: Sequence[T]) -> T:
"""Choose a random element from a non-empty sequence.
Use this instead of random.choice, with random.random(), for reproducibility
"""
return sequence[int(rand_gen() * len(sequence))]
def _add_cz_layer(layer_index: int, circuit: circuits.Circuit) -> int:
cz_layer = None
while not cz_layer:
qubits = cast(Iterable[devices.GridQubit], circuit.all_qubits())
cz_layer = list(_make_cz_layer(qubits, layer_index))
layer_index += 1
circuit.append(cz_layer)
return layer_index
def _make_cz_layer(qubits: Iterable[devices.GridQubit], layer_index: int
) -> Iterable[ops.Operation]:
"""
Each layer index corresponds to a shift/transpose of this CZ pattern:
●───● ● ● ●───● ● ● . . .
● ● ●───● ● ● ●───● . . .
●───● ● ● ●───● ● ● . . .
● ● ●───● ● ● ●───● . . .
●───● ● ● ●───● ● ● . . .
● ● ●───● ● ● ●───● . . .
. . . . . . . . .
. . . . . . . . .
. . . . . . . . .
Labelled edges, showing the exact index-to-CZs mapping (mod 8):
●─0─●─2─●─4─●─6─●─0─. . .
3│ 7│ 3│ 7│ 3│
●─4─●─6─●─0─●─2─●─4─. . .
1│ 5│ 1│ 5│ 1│
●─0─●─2─●─4─●─6─●─0─. . .
7│ 3│ 7│ 3│ 7│
●─4─●─6─●─0─●─2─●─4─. . .
5│ 1│ 5│ 1│ 5│
●─0─●─2─●─4─●─6─●─0─. . .
3│ 7│ 3│ 7│ 3│
. . . . . .
. . . . . .
. . . . . .
Note that, for small devices, some layers will be empty because the layer
only contains edges not present on the device.
"""
# map to an internal layer index to match the cycle order of public circuits
layer_index_map = [0, 3, 2, 1, 4, 7, 6, 5]
internal_layer_index = layer_index_map[layer_index % 8]
dir_row = internal_layer_index % 2
dir_col = 1 - dir_row
shift = (internal_layer_index >> 1) % 4
for q in qubits:
q2 = devices.GridQubit(q.row + dir_row, q.col + dir_col)
if q2 not in qubits:
continue # This edge isn't on the device.
if (q.row * (2 - dir_row) + q.col * (2 - dir_col)) % 4 != shift:
continue # No CZ along this edge for this layer.
yield ops.common_gates.CZ(q, q2)
| 36.117904 | 80 | 0.56015 |
583991e53bd59b60fbc4278b31de7411fbcaa6dd | 5,120 | py | Python | src/util.py | pyomeca/pyosim_aws | 91c225b597d52523a1cf30839082c134fb88a4c9 | [
"Apache-2.0"
] | 2 | 2020-09-14T22:22:06.000Z | 2021-06-07T20:44:09.000Z | src/util.py | pyomeca/pyosim_aws | 91c225b597d52523a1cf30839082c134fb88a4c9 | [
"Apache-2.0"
] | 1 | 2020-04-03T16:41:23.000Z | 2020-04-03T16:41:23.000Z | src/util.py | pyomeca/pyosim_aws | 91c225b597d52523a1cf30839082c134fb88a4c9 | [
"Apache-2.0"
] | 1 | 2019-11-30T23:27:02.000Z | 2019-11-30T23:27:02.000Z | import numpy as np
import pandas as pd
import numpy as np
import altair as alt
def parse_conditions(df, filename_col="filename", prefix="", suffix=""):
return df.assign(
filename=lambda x: x[filename_col]
.str.replace(prefix, "")
.str.replace(suffix, ""),
participant=lambda x: x[filename_col].str[:4].str.lower(),
men=lambda x: x[filename_col].str[4].replace({"H": 1, "F": 0, "M": 1}),
height=lambda x: x[filename_col].str[-3].astype(int),
mass=lambda x: x[filename_col].str[5:].str.split("H").str[0].astype(int),
n_trial=lambda x: x[filename_col].str[-1].astype(int),
)
def condition_counter(d, drop_on='filename'):
lines = "-" * 10
d = d.drop_duplicates(drop_on)
print(f"n. participants: {d['participant'].nunique()}")
print(lines)
cols = ["men", "height", "mass", ["men", "mass"]]
for icol in cols:
print(d.groupby(icol).size().to_string())
print(lines)
def random_balanced_design(d, params, random_state, participant=False, trial=False):
output_cols = ["filename", "participant"] if participant else ["filename"]
g = d.drop_duplicates(output_cols).groupby(params)
minimum = g.size().min()
return pd.concat(
[
igroupby.sample(minimum, random_state=random_state)[output_cols]
for idx, igroupby in g
]
)
def get_spm_cluster(spm, labels=None, mult=1):
labels = labels if labels else {}
out = []
for ieffect in spm:
for icluster in ieffect.clusters:
out.append(
pd.Series(
{
"effect": ieffect.effect,
"p": icluster.P,
"start": icluster.endpoints[0] * mult,
"end": icluster.endpoints[1] * mult,
}
)
)
return pd.concat(out, axis=1).T.assign(effect=lambda x: x["effect"].replace(labels))
def ridge_plot(d, value, groupby, step=30, overlap=0.8, sort=None):
return (
alt.Chart(d)
.transform_joinaggregate(mean_value=f"mean({value})", groupby=[groupby])
.transform_bin(["bin_max", "bin_min"], value)
.transform_aggregate(
value="count()", groupby=[groupby, "mean_value", "bin_min", "bin_max"]
)
.transform_impute(
impute="value", groupby=[groupby, "mean_value"], key="bin_min", value=0
)
.mark_area(
interpolate="monotone", fillOpacity=0.8, stroke="lightgray", strokeWidth=0.5
)
.encode(
alt.X("bin_min:Q", bin="binned", title='activation', axis=alt.Axis(format='%', labelFlush=False)),
alt.Y("value:Q", scale=alt.Scale(range=[step, -step * overlap]), axis=None),
alt.Fill(
"mean_value:Q",
legend=None,
scale=alt.Scale(
domain=[d[value].max(), d[value].min()], scheme="redyellowblue"
),
),
alt.Row(
f"{groupby}:N",
title=None,
sort=alt.SortArray(sort) if sort else None,
header=alt.Header(labelAngle=0, labelAlign="right", format="%B"),
),
)
.properties(bounds="flush", height=step)
.configure_facet(spacing=0)
.configure_view(stroke=None)
)
def cohend(a, b):
d = (a.mean() - b.mean()) / (np.sqrt((a.std() ** 2 + b.std() ** 2) / 2))
if np.abs(d) >= 0.8:
effect = "large"
elif np.abs(d) >= 0.5:
effect = "medium"
elif np.abs(d) >= 0.2:
effect = "small"
else:
effect = "no"
return d, effect
def describe_clusters(
clusters,
data,
effect,
round=["mean diff", "cohen d"],
):
c = clusters.assign(
**{i: pd.to_numeric(clusters[i]) for i in ["p", "start", "end"]}
)
c[["start", "end"]] = c[["start", "end"]].round(2)
previous_end = {value: None for key, value in effect.items()}
for i, icluster in c.iterrows():
if icluster["end"] - icluster["start"] < 0.05:
c = c.drop(i, axis="index")
continue
s = (
previous_end[effect[icluster["effect"]]]
if previous_end[effect[icluster["effect"]]]
and icluster["start"] - previous_end[effect[icluster["effect"]]] < 0.05
else icluster["start"]
)
e = icluster["end"]
a, b = [
i.values
for _, i in data.query("@s < index < @e").groupby(
effect[icluster["effect"]]
)["value"]
]
columns = data[effect[icluster["effect"]]].unique()
c.loc[i, "diff desc"] = f"{columns[0]} - {columns[1]}"
c.loc[i, "mean diff"] = a.mean() - b.mean()
cohen_d, cohen_effect = cohend(a, b)
c.loc[i, "cohen d"] = cohen_d
c.loc[i, "cohen effect"] = cohen_effect
previous_end[effect[icluster["effect"]]] = e
if c.empty:
return pd.DataFrame()
if round:
c[round] = c[round].round(2)
return c | 33.684211 | 110 | 0.532617 |
46d3dcd26b97747db61943c6fd427a5502ebc4cd | 696 | py | Python | Gentree/cfg.py | hoefkensj/gentree | 2cf19cb534ac3c756cace2a79e0725a1bfda5f81 | [
"MIT"
] | null | null | null | Gentree/cfg.py | hoefkensj/gentree | 2cf19cb534ac3c756cace2a79e0725a1bfda5f81 | [
"MIT"
] | null | null | null | Gentree/cfg.py | hoefkensj/gentree | 2cf19cb534ac3c756cace2a79e0725a1bfda5f81 | [
"MIT"
] | null | null | null | def spawn(file_name):
with open(f'{file_name}.ini' , 'w') as file:
file.write(f'[DEFAULT]\nfilename\t:\t{file_name}\nfiletype\t:\tini')
def create(file_Config,dct_Config, cfg_config):
cfg= tocfg(dct_Config, cfg_config)
write(file_Config, cfg)
def read(file_Config, cfg_Config):
cfg_Config.read(file_Config)
return cfg_Config
def todct(cfg,dct={}):
for section in cfg.keys():
dct[section]= dict(cfg[section])
return dct
def tocfg(dct,cfg):
cfg.read_dict(dct)
return dct
def write(file_Config, cfg_Config):
cfg = cfg_Config
with open(file_Config, 'w') as configfile:
cfg.write(configfile)
def main():
return spawn
if __name__ == '__main__':
main = main()
main('test')
| 19.885714 | 70 | 0.718391 |
acebe039ac554a40c93300e61eb0ac13d0bb90a3 | 3,509 | py | Python | json_ref_dict/uri.py | RangelReale/json-ref-dict | 1ed1c96707359b5d648bafd3062a4446b469b682 | [
"MIT"
] | null | null | null | json_ref_dict/uri.py | RangelReale/json-ref-dict | 1ed1c96707359b5d648bafd3062a4446b469b682 | [
"MIT"
] | null | null | null | json_ref_dict/uri.py | RangelReale/json-ref-dict | 1ed1c96707359b5d648bafd3062a4446b469b682 | [
"MIT"
] | null | null | null | from os import path
import re
from typing import NamedTuple
from urllib.parse import urlparse
from json_ref_dict.exceptions import ReferenceParseError
JSON_REF_REGEX = r"^((?P<uri_base>.*)\/)?(?P<uri_name>.*)#(?P<pointer>\/.*)"
class URI(NamedTuple):
"""URI for a schema or subschema."""
uri_base: str
uri_name: str
pointer: str
@classmethod
def from_string(cls, string: str) -> "URI":
"""Contruct from string."""
if "#" not in string:
string += "#/"
if string.endswith("#"):
string += "/"
match = re.match(JSON_REF_REGEX, string, re.DOTALL)
if not match:
raise ReferenceParseError(
f"Couldn't parse '{string}' as a valid reference. "
"References must be of the format "
"{base_uri}#{json_pointer}, where 'json_pointer' "
"begins with '/'"
)
return URI(**match.groupdict())
@property
def root(self):
"""String representation excluding the JSON pointer."""
return path.join(*filter(None, [self.uri_base, self.uri_name]))
def _get_relative(self, reference: str) -> "URI":
"""Get a new URI relative to the current root."""
if not isinstance(reference, str):
raise TypeError(f"Got invalid value for '$ref': {reference}.")
if not reference.split("#")[0]: # Local reference.
reference = reference.split("#")[1] or "/"
return URI(self.uri_base, self.uri_name, reference)
# Remote reference.
return self.from_string(
path.join(*filter(None, [self.uri_base, reference]))
)
def relative(self, reference: str) -> "URI":
"""Get a new URI relative to the current root.
:raises ReferenceParseError: if relative reference is equal
to the current reference.
:return: The URI of the reference relative to the current URI.
"""
if is_absolute(reference):
relative_uri = URI.from_string(reference)
else:
relative_uri = self._get_relative(reference)
if relative_uri == self:
raise ReferenceParseError(
f"Reference: '{reference}' from context '{self}' is "
"self-referential. Cannot resolve."
)
return relative_uri
def get(self, *pointer_segments: str) -> "URI":
"""Get a new URI representing a member of the current URI."""
return self.__class__(
uri_base=self.uri_base,
uri_name=self.uri_name,
pointer=path.join(self.pointer, *pointer_segments),
)
def back(self) -> "URI":
"""Pop a segment from the pointer."""
segments = self.pointer.split("/")
pointer = path.join("/", *segments[:-1])
return self.__class__(
uri_base=self.uri_base, uri_name=self.uri_name, pointer=pointer
)
def __repr__(self) -> str:
"""String representation of the URI."""
return self.root + f"#{self.pointer}"
def is_absolute(ref: str) -> bool:
"""Check if URI is absolute based on scheme."""
parsed = urlparse(ref)
if parsed.scheme:
return True
return False
def parse_segment(segment: str) -> str:
"""Parse a pointer segment.
Individual segments need to replace special chars, as per RFC-6901:
https://tools.ietf.org/html/rfc6901
"""
return segment.replace("~", "~0").replace("/", "~1")
| 32.794393 | 76 | 0.589057 |
5d8aaf65d57e463c41d33fd140cef0a69eec3eed | 27,160 | py | Python | ckan/logic/schema.py | doc22940/ckan | fb0174b77a5ac1c614717643d9b1b2a0c82ee088 | [
"Apache-2.0"
] | 1 | 2020-08-05T21:21:56.000Z | 2020-08-05T21:21:56.000Z | ckan/logic/schema.py | doc22940/ckan | fb0174b77a5ac1c614717643d9b1b2a0c82ee088 | [
"Apache-2.0"
] | null | null | null | ckan/logic/schema.py | doc22940/ckan | fb0174b77a5ac1c614717643d9b1b2a0c82ee088 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
from functools import wraps
import inspect
from six import text_type
import ckan.model
import ckan.plugins as plugins
from ckan.logic import get_validator
def validator_args(fn):
u'''collect validator names from argument names
and pass them to wrapped function'''
args = inspect.getargspec(fn).args
@wraps(fn)
def wrapper():
kwargs = {
arg: get_validator(arg)
for arg in args}
return fn(**kwargs)
return wrapper
@validator_args
def default_resource_schema(
ignore_empty, unicode_safe, ignore, ignore_missing,
remove_whitespace, if_empty_guess_format, clean_format, isodate,
int_validator, extras_unicode_convert, keep_extras):
return {
'id': [ignore_empty, unicode_safe],
'package_id': [ignore],
'url': [ignore_missing, unicode_safe, remove_whitespace],
'description': [ignore_missing, unicode_safe],
'format': [if_empty_guess_format, ignore_missing, clean_format,
unicode_safe],
'hash': [ignore_missing, unicode_safe],
'state': [ignore],
'position': [ignore],
'name': [ignore_missing, unicode_safe],
'resource_type': [ignore_missing, unicode_safe],
'url_type': [ignore_missing, unicode_safe],
'mimetype': [ignore_missing, unicode_safe],
'mimetype_inner': [ignore_missing, unicode_safe],
'cache_url': [ignore_missing, unicode_safe],
'size': [ignore_missing, int_validator],
'created': [ignore_missing, isodate],
'last_modified': [ignore_missing, isodate],
'cache_last_updated': [ignore_missing, isodate],
'tracking_summary': [ignore_missing],
'datastore_active': [ignore_missing],
'__extras': [ignore_missing, extras_unicode_convert, keep_extras],
}
@validator_args
def default_update_resource_schema(ignore):
schema = default_resource_schema()
return schema
@validator_args
def default_tags_schema(
not_missing, not_empty, unicode_safe, tag_length_validator,
tag_name_validator, ignore_missing, vocabulary_id_exists,
ignore):
return {
'name': [not_missing,
not_empty,
unicode_safe,
tag_length_validator,
tag_name_validator,
],
'vocabulary_id': [ignore_missing,
unicode_safe,
vocabulary_id_exists],
'revision_timestamp': [ignore],
'state': [ignore],
'display_name': [ignore],
}
@validator_args
def default_create_tag_schema(
not_missing, not_empty, unicode_safe, vocabulary_id_exists,
tag_not_in_vocabulary, empty):
schema = default_tags_schema()
# When creating a tag via the tag_create() logic action function, a
# vocabulary_id _must_ be given (you cannot create free tags via this
# function).
schema['vocabulary_id'] = [not_missing, not_empty, unicode_safe,
vocabulary_id_exists, tag_not_in_vocabulary]
# You're not allowed to specify your own ID when creating a tag.
schema['id'] = [empty]
return schema
@validator_args
def default_create_package_schema(
duplicate_extras_key, ignore, empty_if_not_sysadmin, ignore_missing,
unicode_safe, package_id_does_not_exist, not_empty, name_validator,
package_name_validator, if_empty_same_as, email_validator,
package_version_validator, ignore_not_package_admin,
boolean_validator, datasets_with_no_organization_cannot_be_private,
empty, tag_string_convert, owner_org_validator, no_http):
return {
'__before': [duplicate_extras_key, ignore],
'id': [empty_if_not_sysadmin, ignore_missing, unicode_safe,
package_id_does_not_exist],
'name': [
not_empty, unicode_safe, name_validator, package_name_validator],
'title': [if_empty_same_as("name"), unicode_safe],
'author': [ignore_missing, unicode_safe],
'author_email': [ignore_missing, unicode_safe, email_validator],
'maintainer': [ignore_missing, unicode_safe],
'maintainer_email': [ignore_missing, unicode_safe, email_validator],
'license_id': [ignore_missing, unicode_safe],
'notes': [ignore_missing, unicode_safe],
'url': [ignore_missing, unicode_safe],
'version': [ignore_missing, unicode_safe, package_version_validator],
'state': [ignore_not_package_admin, ignore_missing],
'type': [ignore_missing, unicode_safe],
'owner_org': [owner_org_validator, unicode_safe],
'log_message': [ignore_missing, unicode_safe, no_http],
'private': [ignore_missing, boolean_validator,
datasets_with_no_organization_cannot_be_private],
'__extras': [ignore],
'__junk': [empty],
'resources': default_resource_schema(),
'tags': default_tags_schema(),
'tag_string': [ignore_missing, tag_string_convert],
'extras': default_extras_schema(),
'save': [ignore],
'return_to': [ignore],
'relationships_as_object': default_relationship_schema(),
'relationships_as_subject': default_relationship_schema(),
'groups': {
'id': [ignore_missing, unicode_safe],
'name': [ignore_missing, unicode_safe],
'title': [ignore_missing, unicode_safe],
'__extras': [ignore],
}
}
@validator_args
def default_update_package_schema(
ignore_missing, package_id_not_changed, name_validator,
package_name_validator, unicode_safe, owner_org_validator):
schema = default_create_package_schema()
schema['resources'] = default_update_resource_schema()
# Users can (optionally) supply the package id when updating a package, but
# only to identify the package to be updated, they cannot change the id.
schema['id'] = [ignore_missing, package_id_not_changed]
# Supplying the package name when updating a package is optional (you can
# supply the id to identify the package instead).
schema['name'] = [ignore_missing, name_validator, package_name_validator,
unicode_safe]
# Supplying the package title when updating a package is optional, if it's
# not supplied the title will not be changed.
schema['title'] = [ignore_missing, unicode_safe]
schema['owner_org'] = [ignore_missing, owner_org_validator, unicode_safe]
return schema
@validator_args
def default_show_package_schema(
keep_extras, ignore_missing, clean_format, unicode_safe, not_empty):
schema = default_create_package_schema()
# Don't strip ids from package dicts when validating them.
schema['id'] = []
schema.update({
'tags': {'__extras': [keep_extras]}})
# Add several keys to the 'resources' subschema so they don't get stripped
# from the resource dicts by validation.
schema['resources'].update({
'format': [ignore_missing, clean_format, unicode_safe],
'created': [ignore_missing],
'position': [not_empty],
'last_modified': [],
'cache_last_updated': [],
'package_id': [],
'size': [],
'state': [],
'mimetype': [],
'cache_url': [],
'name': [],
'mimetype_inner': [],
'resource_type': [],
'url_type': [],
})
schema.update({
'state': [ignore_missing],
'isopen': [ignore_missing],
'license_url': [ignore_missing],
})
schema['groups'].update({
'description': [ignore_missing],
'display_name': [ignore_missing],
'image_display_url': [ignore_missing],
})
# Remove validators for several keys from the schema so validation doesn't
# strip the keys from the package dicts if the values are 'missing' (i.e.
# None).
schema['author'] = []
schema['author_email'] = []
schema['maintainer'] = []
schema['maintainer_email'] = []
schema['license_id'] = []
schema['notes'] = []
schema['url'] = []
schema['version'] = []
# Add several keys that are missing from default_create_package_schema(),
# so validation doesn't strip the keys from the package dicts.
schema['metadata_created'] = []
schema['metadata_modified'] = []
schema['creator_user_id'] = []
schema['num_resources'] = []
schema['num_tags'] = []
schema['organization'] = []
schema['owner_org'] = []
schema['private'] = []
schema['tracking_summary'] = [ignore_missing]
schema['license_title'] = []
return schema
@validator_args
def default_group_schema(
ignore_missing, unicode_safe, ignore, not_empty, name_validator,
group_name_validator, package_id_or_name_exists,
no_loops_in_hierarchy, ignore_not_group_admin):
return {
'id': [ignore_missing, unicode_safe],
'name': [
not_empty, unicode_safe, name_validator, group_name_validator],
'title': [ignore_missing, unicode_safe],
'description': [ignore_missing, unicode_safe],
'image_url': [ignore_missing, unicode_safe],
'image_display_url': [ignore_missing, unicode_safe],
'type': [ignore_missing, unicode_safe],
'state': [ignore_not_group_admin, ignore_missing],
'created': [ignore],
'is_organization': [ignore_missing],
'approval_status': [ignore_missing, unicode_safe],
'extras': default_extras_schema(),
'__extras': [ignore],
'__junk': [ignore],
'packages': {
"id": [not_empty, unicode_safe, package_id_or_name_exists],
"title": [ignore_missing, unicode_safe],
"name": [ignore_missing, unicode_safe],
"__extras": [ignore]
},
'users': {
"name": [not_empty, unicode_safe],
"capacity": [ignore_missing],
"__extras": [ignore]
},
'groups': {
"name": [not_empty, no_loops_in_hierarchy, unicode_safe],
"capacity": [ignore_missing],
"__extras": [ignore]
}
}
@validator_args
def group_form_schema(
not_empty, unicode_safe, ignore_missing, ignore):
schema = default_group_schema()
# schema['extras_validation'] = [duplicate_extras_key, ignore]
schema['packages'] = {
"name": [not_empty, unicode_safe],
"title": [ignore_missing],
"__extras": [ignore]
}
schema['users'] = {
"name": [not_empty, unicode_safe],
"capacity": [ignore_missing],
"__extras": [ignore]
}
return schema
@validator_args
def default_update_group_schema(
ignore_missing, group_name_validator, unicode_safe):
schema = default_group_schema()
schema["name"] = [ignore_missing, group_name_validator, unicode_safe]
return schema
@validator_args
def default_show_group_schema(
keep_extras, ignore_missing):
schema = default_group_schema()
# make default show schema behave like when run with no validation
schema['num_followers'] = []
schema['created'] = []
schema['display_name'] = []
schema['extras'] = {'__extras': [keep_extras]}
schema['package_count'] = [ignore_missing]
schema['packages'] = {'__extras': [keep_extras]}
schema['state'] = []
schema['users'] = {'__extras': [keep_extras]}
return schema
@validator_args
def default_extras_schema(
ignore, not_empty, extra_key_not_in_root_schema, unicode_safe,
not_missing, ignore_missing):
return {
'id': [ignore],
'key': [not_empty, extra_key_not_in_root_schema, unicode_safe],
'value': [not_missing],
'state': [ignore],
'deleted': [ignore_missing],
'revision_timestamp': [ignore],
'__extras': [ignore],
}
@validator_args
def default_relationship_schema(
ignore_missing, unicode_safe, not_empty, one_of, ignore):
return {
'id': [ignore_missing, unicode_safe],
'subject': [ignore_missing, unicode_safe],
'object': [ignore_missing, unicode_safe],
'type': [not_empty,
one_of(ckan.model.PackageRelationship.get_all_types())],
'comment': [ignore_missing, unicode_safe],
'state': [ignore],
}
@validator_args
def default_create_relationship_schema(
empty, not_empty, unicode_safe, package_id_or_name_exists):
schema = default_relationship_schema()
schema['id'] = [empty]
schema['subject'] = [not_empty, unicode_safe, package_id_or_name_exists]
schema['object'] = [not_empty, unicode_safe, package_id_or_name_exists]
return schema
@validator_args
def default_update_relationship_schema(
ignore_missing, package_id_not_changed):
schema = default_relationship_schema()
schema['id'] = [ignore_missing, package_id_not_changed]
# Todo: would like to check subject, object & type haven't changed, but
# no way to do this in schema
schema['subject'] = [ignore_missing]
schema['object'] = [ignore_missing]
schema['type'] = [ignore_missing]
return schema
@validator_args
def default_user_schema(
ignore_missing, unicode_safe, name_validator, user_name_validator,
user_password_validator, user_password_not_empty, email_is_unique,
ignore_not_sysadmin, not_empty, email_validator,
user_about_validator, ignore, boolean_validator):
return {
'id': [ignore_missing, unicode_safe],
'name': [
not_empty, name_validator, user_name_validator, unicode_safe],
'fullname': [ignore_missing, unicode_safe],
'password': [user_password_validator, user_password_not_empty,
ignore_missing, unicode_safe],
'password_hash': [ignore_missing, ignore_not_sysadmin, unicode_safe],
'email': [not_empty, email_validator, email_is_unique, unicode_safe],
'about': [ignore_missing, user_about_validator, unicode_safe],
'created': [ignore],
'sysadmin': [ignore_missing, ignore_not_sysadmin],
'apikey': [ignore],
'reset_key': [ignore],
'activity_streams_email_notifications': [ignore_missing,
boolean_validator],
'state': [ignore_missing],
}
@validator_args
def user_new_form_schema(
unicode_safe, user_both_passwords_entered,
user_password_validator, user_passwords_match,
email_is_unique):
schema = default_user_schema()
schema['email'] = [email_is_unique]
schema['password1'] = [text_type, user_both_passwords_entered,
user_password_validator, user_passwords_match]
schema['password2'] = [text_type]
return schema
@validator_args
def user_edit_form_schema(
ignore_missing, unicode_safe, user_both_passwords_entered,
user_password_validator, user_passwords_match, email_is_unique):
schema = default_user_schema()
schema['email'] = [email_is_unique]
schema['password'] = [ignore_missing]
schema['password1'] = [ignore_missing, unicode_safe,
user_password_validator, user_passwords_match]
schema['password2'] = [ignore_missing, unicode_safe]
return schema
@validator_args
def default_update_user_schema(
ignore_missing, name_validator, user_name_validator,
unicode_safe, user_password_validator, email_is_unique,
not_empty, email_validator):
schema = default_user_schema()
schema['name'] = [
ignore_missing, name_validator, user_name_validator, unicode_safe]
schema['email'] = [
not_empty, email_validator, email_is_unique, unicode_safe]
schema['password'] = [
user_password_validator, ignore_missing, unicode_safe]
return schema
@validator_args
def default_generate_apikey_user_schema(
not_empty, unicode_safe):
schema = default_update_user_schema()
schema['apikey'] = [not_empty, unicode_safe]
return schema
@validator_args
def default_user_invite_schema(
not_empty, unicode_safe):
return {
'email': [not_empty, text_type],
'group_id': [not_empty],
'role': [not_empty],
}
@validator_args
def default_task_status_schema(
ignore, not_empty, unicode_safe, ignore_missing):
return {
'id': [ignore],
'entity_id': [not_empty, unicode_safe],
'entity_type': [not_empty, unicode_safe],
'task_type': [not_empty, unicode_safe],
'key': [not_empty, unicode_safe],
'value': [ignore_missing],
'state': [ignore_missing],
'last_updated': [ignore_missing],
'error': [ignore_missing]
}
@validator_args
def default_vocabulary_schema(
ignore_missing, unicode_safe, vocabulary_id_exists,
not_empty, vocabulary_name_validator):
return {
'id': [ignore_missing, unicode_safe, vocabulary_id_exists],
'name': [not_empty, unicode_safe, vocabulary_name_validator],
'tags': default_tags_schema(),
}
@validator_args
def default_create_vocabulary_schema(
empty):
schema = default_vocabulary_schema()
schema['id'] = [empty]
return schema
@validator_args
def default_update_vocabulary_schema(
ignore_missing, vocabulary_id_not_changed,
vocabulary_name_validator):
schema = default_vocabulary_schema()
schema['id'] = [ignore_missing, vocabulary_id_not_changed]
schema['name'] = [ignore_missing, vocabulary_name_validator]
return schema
@validator_args
def default_create_activity_schema(
ignore, not_missing, not_empty, unicode_safe,
convert_user_name_or_id_to_id, object_id_validator,
activity_type_exists, ignore_empty, ignore_missing):
return {
'id': [ignore],
'timestamp': [ignore],
'user_id': [not_missing, not_empty, unicode_safe,
convert_user_name_or_id_to_id],
'object_id': [
not_missing, not_empty, unicode_safe, object_id_validator],
'activity_type': [not_missing, not_empty, unicode_safe,
activity_type_exists],
'data': [ignore_empty, ignore_missing],
}
@validator_args
def default_follow_user_schema(
not_missing, not_empty, unicode_safe, convert_user_name_or_id_to_id,
ignore_missing):
return {
'id': [not_missing, not_empty, unicode_safe,
convert_user_name_or_id_to_id],
'q': [ignore_missing]
}
@validator_args
def default_follow_dataset_schema(
not_missing, not_empty, unicode_safe,
convert_package_name_or_id_to_id):
return {
'id': [not_missing, not_empty, unicode_safe,
convert_package_name_or_id_to_id]
}
@validator_args
def member_schema(
not_missing, group_id_or_name_exists, unicode_safe,
user_id_or_name_exists, role_exists):
return {
'id': [not_missing, group_id_or_name_exists, unicode_safe],
'username': [not_missing, user_id_or_name_exists, unicode_safe],
'role': [not_missing, role_exists, unicode_safe],
}
@validator_args
def default_follow_group_schema(
not_missing, not_empty, unicode_safe,
convert_group_name_or_id_to_id):
return {
'id': [not_missing, not_empty, unicode_safe,
convert_group_name_or_id_to_id]
}
@validator_args
def default_package_list_schema(
ignore_missing, natural_number_validator, is_positive_integer):
return {
'limit': [ignore_missing, natural_number_validator],
'offset': [ignore_missing, natural_number_validator],
'page': [ignore_missing, is_positive_integer]
}
@validator_args
def default_pagination_schema(ignore_missing, natural_number_validator):
return {
'limit': [ignore_missing, natural_number_validator],
'offset': [ignore_missing, natural_number_validator]
}
@validator_args
def default_dashboard_activity_list_schema(
configured_default, natural_number_validator,
limit_to_configured_maximum):
schema = default_pagination_schema()
schema['limit'] = [
configured_default('ckan.activity_list_limit', 31),
natural_number_validator,
limit_to_configured_maximum('ckan.activity_list_limit_max', 100)]
return schema
@validator_args
def default_activity_list_schema(
not_missing, unicode_safe, configured_default,
natural_number_validator, limit_to_configured_maximum,
ignore_missing, boolean_validator, ignore_not_sysadmin):
schema = default_pagination_schema()
schema['id'] = [not_missing, unicode_safe]
schema['limit'] = [
configured_default('ckan.activity_list_limit', 31),
natural_number_validator,
limit_to_configured_maximum('ckan.activity_list_limit_max', 100)]
schema['include_hidden_activity'] = [
ignore_missing, ignore_not_sysadmin, boolean_validator]
return schema
@validator_args
def default_autocomplete_schema(
not_missing, unicode_safe, ignore_missing, natural_number_validator):
return {
'q': [not_missing, unicode_safe],
'ignore_self': [ignore_missing],
'limit': [ignore_missing, natural_number_validator]
}
@validator_args
def default_package_search_schema(
ignore_missing, unicode_safe, list_of_strings,
natural_number_validator, int_validator, convert_to_json_if_string,
convert_to_list_if_string, limit_to_configured_maximum, default):
return {
'q': [ignore_missing, unicode_safe],
'fl': [ignore_missing, convert_to_list_if_string],
'fq': [ignore_missing, unicode_safe],
'rows': [default(10), natural_number_validator,
limit_to_configured_maximum('ckan.search.rows_max', 1000)],
'sort': [ignore_missing, unicode_safe],
'start': [ignore_missing, natural_number_validator],
'qf': [ignore_missing, unicode_safe],
'facet': [ignore_missing, unicode_safe],
'facet.mincount': [ignore_missing, natural_number_validator],
'facet.limit': [ignore_missing, int_validator],
'facet.field': [ignore_missing, convert_to_json_if_string,
list_of_strings],
'extras': [ignore_missing] # Not used by Solr,
# but useful for extensions
}
@validator_args
def default_resource_search_schema(
ignore_missing, unicode_safe, natural_number_validator):
schema = {
'query': [ignore_missing], # string or list of strings
'fields': [ignore_missing], # dict of fields
'order_by': [ignore_missing, unicode_safe],
'offset': [ignore_missing, natural_number_validator],
'limit': [ignore_missing, natural_number_validator]
}
return schema
def create_schema_for_required_keys(keys):
''' helper function that creates a schema definition where
each key from keys is validated against ``not_missing``.
'''
not_missing = get_validator('not_missing')
return {x: [not_missing] for x in keys}
def default_create_resource_view_schema(resource_view):
if resource_view.info().get('filterable'):
return default_create_resource_view_schema_filtered()
return default_create_resource_view_schema_unfiltered()
@validator_args
def default_create_resource_view_schema_unfiltered(
not_empty, resource_id_exists, unicode_safe, ignore_missing, empty):
return {
'resource_id': [not_empty, resource_id_exists],
'title': [not_empty, unicode_safe],
'description': [ignore_missing, unicode_safe],
'view_type': [not_empty, unicode_safe],
'__extras': [empty],
}
@validator_args
def default_create_resource_view_schema_filtered(
ignore_missing, convert_to_list_if_string,
filter_fields_and_values_should_have_same_length,
filter_fields_and_values_exist_and_are_valid):
schema = default_create_resource_view_schema_unfiltered()
schema['filter_fields'] = [
ignore_missing,
convert_to_list_if_string,
filter_fields_and_values_should_have_same_length,
filter_fields_and_values_exist_and_are_valid]
schema['filter_values'] = [ignore_missing, convert_to_list_if_string]
return schema
def default_update_resource_view_schema(resource_view):
schema = default_create_resource_view_schema(resource_view)
schema.update(default_update_resource_view_schema_changes())
return schema
@validator_args
def default_update_resource_view_schema_changes(
not_missing, not_empty, unicode_safe, resource_id_exists, ignore,
ignore_missing):
return {
'id': [not_missing, not_empty, unicode_safe],
'resource_id': [ignore_missing, resource_id_exists],
'title': [ignore_missing, unicode_safe],
'view_type': [ignore], # cannot change after create
'package_id': [ignore]
}
@validator_args
def default_update_configuration_schema(
unicode_safe, is_positive_integer, ignore_missing):
return {
'ckan.site_title': [ignore_missing, unicode_safe],
'ckan.site_logo': [ignore_missing, unicode_safe],
'ckan.site_url': [ignore_missing, unicode_safe],
'ckan.site_description': [ignore_missing, unicode_safe],
'ckan.site_about': [ignore_missing, unicode_safe],
'ckan.site_intro_text': [ignore_missing, unicode_safe],
'ckan.site_custom_css': [ignore_missing, unicode_safe],
'ckan.main_css': [ignore_missing, unicode_safe],
'ckan.homepage_style': [ignore_missing, is_positive_integer],
'logo_upload': [ignore_missing, unicode_safe],
'clear_logo_upload': [ignore_missing, unicode_safe],
}
def update_configuration_schema():
'''
Returns the schema for the config options that can be edited during runtime
By default these are the keys of the
:py:func:`ckan.logic.schema.default_update_configuration_schema`.
Extensions can add or remove keys from this schema using the
:py:meth:`ckan.plugins.interfaces.IConfigurer.update_config_schema`
method.
These configuration options can be edited during runtime via the web
interface or using
the :py:func:`ckan.logic.action.update.config_option_update` API call.
:returns: a dictionary mapping runtime-editable configuration option keys
to lists of validator and converter functions to be applied to those
keys
:rtype: dictionary
'''
schema = default_update_configuration_schema()
for plugin in plugins.PluginImplementations(plugins.IConfigurer):
if hasattr(plugin, 'update_config_schema'):
schema = plugin.update_config_schema(schema)
return schema
@validator_args
def job_list_schema(ignore_missing, list_of_strings):
return {
u'queues': [ignore_missing, list_of_strings],
}
@validator_args
def job_clear_schema(ignore_missing, list_of_strings):
return {
u'queues': [ignore_missing, list_of_strings],
}
| 34.292929 | 79 | 0.677025 |
4fce944e04c4f2868eb1dfd2a8b9ea093f20f8fd | 2,063 | py | Python | source_test.py | Prachigarg1/Prachi | 1a84a1af46ac8a1cd06c3317130dc44072e3c80e | [
"Apache-2.0"
] | 3 | 2015-03-01T07:46:49.000Z | 2020-02-24T18:14:10.000Z | source_test.py | Prachigarg1/Prachi | 1a84a1af46ac8a1cd06c3317130dc44072e3c80e | [
"Apache-2.0"
] | null | null | null | source_test.py | Prachigarg1/Prachi | 1a84a1af46ac8a1cd06c3317130dc44072e3c80e | [
"Apache-2.0"
] | 2 | 2016-01-04T21:01:04.000Z | 2020-02-24T18:14:12.000Z | import scanner
import source
import unittest
import symboltypes
class SourceTestCase(unittest.TestCase):
def testScanSource(self):
test_source = source.ScanScript(_TEST_SCRIPT)
self.assertEquals(
set(['goog.aaa', 'goog.bbb']), test_source.provides)
self.assertEquals(
set(['goog.ccc', 'goog.ddd']), test_source.requires)
self.assertEquals(1, len(test_source.symbols))
symbol = list(test_source.symbols)[0]
self.assertEquals('goog.aaa.bbb', symbol.identifier)
self.assertTrue(symbol.static)
self.assertEquals('goog.aaa', symbol.namespace)
self.assertEquals(symboltypes.FUNCTION, symbol.type)
comment = symbol.comment
self.assertEquals('Testing testing.\n@return {string} Dog.', comment.text)
self.assertEquals(['Testing testing.'], comment.description_sections)
self.assertEquals(1, len(comment.flags))
flag = comment.flags[0]
self.assertEquals('@return', flag.name)
self.assertEquals('{string} Dog.', flag.text)
def testIsIgnorableIdentifier(self):
match = scanner.FindCommentTarget(' aaa.bbb = 3');
self.assertEquals('aaa.bbb', match.group())
self.assertFalse(source._IsIgnorableIdentifier(match))
match = scanner.FindCommentTarget(' aaa.bbb(3)');
self.assertEquals('aaa.bbb', match.group())
self.assertTrue(source._IsIgnorableIdentifier(match))
match = scanner.FindCommentTarget(' aaa.bbb[3])');
self.assertEquals('aaa.bbb', match.group())
self.assertTrue(source._IsIgnorableIdentifier(match))
def testScanPrototypeProperty(self):
test_source = source.ScanScript("""\
goog.provide('abc.Def');
/**
* Test.
*/
abc.Def.prototype.ghi;
""")
symbol = list(test_source.symbols)[0]
self.assertEquals('ghi', symbol.property)
self.assertFalse(symbol.static)
_TEST_SCRIPT = """
goog.provide('goog.aaa');
goog.provide('goog.bbb');
goog.require('goog.ccc');
goog.require('goog.ddd');
/**
* Testing testing.
* @return {string} Dog.
*/
goog.aaa.bbb;
"""
if __name__ == '__main__':
unittest.main()
| 25.7875 | 78 | 0.696558 |
5a7f094825e36fbaa602c11c09a3626698d9be29 | 303 | py | Python | glimmer/util/__init__.py | phip123/glimmer | 5e7e1ac22379e69e869afa1d83a3c0d92ef91e7f | [
"MIT"
] | 1 | 2020-02-27T12:03:31.000Z | 2020-02-27T12:03:31.000Z | glimmer/util/__init__.py | phip123/glimmer | 5e7e1ac22379e69e869afa1d83a3c0d92ef91e7f | [
"MIT"
] | null | null | null | glimmer/util/__init__.py | phip123/glimmer | 5e7e1ac22379e69e869afa1d83a3c0d92ef91e7f | [
"MIT"
] | null | null | null | import dataclasses
import json
import time
class EnhancedJSONEncoder(json.JSONEncoder):
def default(self, o):
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
return super().default(o)
def generate_node_name() -> str:
return str(time.time_ns())[5:-5]
| 20.2 | 44 | 0.679868 |
ab8408e280d388472b888456342ae3a61c59a365 | 22,733 | py | Python | readthedocs/builds/tasks.py | codeday-init/readthedocs.org | 464f20e77b63f550d10dcdaa0de9bab7a7f2e349 | [
"MIT"
] | null | null | null | readthedocs/builds/tasks.py | codeday-init/readthedocs.org | 464f20e77b63f550d10dcdaa0de9bab7a7f2e349 | [
"MIT"
] | null | null | null | readthedocs/builds/tasks.py | codeday-init/readthedocs.org | 464f20e77b63f550d10dcdaa0de9bab7a7f2e349 | [
"MIT"
] | null | null | null | import json
from io import BytesIO
import requests
import structlog
from django.conf import settings
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from readthedocs import __version__
from readthedocs.api.v2.serializers import BuildCommandSerializer
from readthedocs.api.v2.utils import (
delete_versions_from_db,
get_deleted_active_versions,
run_automation_rules,
sync_versions_to_db,
)
from readthedocs.builds.constants import (
BRANCH,
BUILD_STATUS_FAILURE,
BUILD_STATUS_PENDING,
BUILD_STATUS_SUCCESS,
EXTERNAL,
LOCK_EXPIRE,
MAX_BUILD_COMMAND_SIZE,
TAG,
)
from readthedocs.builds.models import Build, Version
from readthedocs.builds.utils import memcache_lock
from readthedocs.core.permissions import AdminPermission
from readthedocs.core.utils import send_email, trigger_build
from readthedocs.integrations.models import HttpExchange
from readthedocs.oauth.notifications import GitBuildStatusFailureNotification
from readthedocs.projects.constants import GITHUB_BRAND, GITLAB_BRAND
from readthedocs.projects.models import Project, WebHookEvent
from readthedocs.storage import build_commands_storage
from readthedocs.worker import app
log = structlog.get_logger(__name__)
class TaskRouter:
"""
Celery tasks router.
It allows us to decide which queue is where we want to execute the task
based on project's settings.
1. the project is using conda
2. new project with less than N successful builds
3. version to be built is external
It ignores projects that have already set ``build_queue`` attribute.
https://docs.celeryproject.org/en/stable/userguide/routing.html#manual-routing
https://docs.celeryproject.org/en/stable/userguide/configuration.html#std:setting-task_routes
"""
MIN_SUCCESSFUL_BUILDS = 5
N_LAST_BUILDS = 15
TIME_AVERAGE = 350
BUILD_DEFAULT_QUEUE = 'build:default'
BUILD_LARGE_QUEUE = 'build:large'
def route_for_task(self, task, args, kwargs, **__):
log.debug('Executing TaskRouter.', task=task)
if task not in (
'readthedocs.projects.tasks.builds.update_docs_task',
'readthedocs.projects.tasks.builds.sync_repository_task',
):
log.debug('Skipping routing non-build task.', task=task)
return
version = self._get_version(task, args, kwargs)
if not version:
log.debug('No Build/Version found. No routing task.', task=task)
return
project = version.project
# Do not override the queue defined in the project itself
if project.build_queue:
log.info(
'Skipping routing task because project has a custom queue.',
project_slug=project.slug,
queue=project.build_queue,
)
return project.build_queue
# Use last queue used by the default version for external versions
# We always want the same queue as the previous default version,
# so that users will have the same outcome for PR's as normal builds.
if version.type == EXTERNAL:
last_build_for_default_version = (
project.builds
.filter(version__slug=project.get_default_version(), builder__isnull=False)
.order_by('-date')
.first()
)
if last_build_for_default_version:
if 'default' in last_build_for_default_version.builder:
routing_queue = self.BUILD_DEFAULT_QUEUE
else:
routing_queue = self.BUILD_LARGE_QUEUE
log.info(
'Routing task because is a external version.',
project_slug=project.slug,
queue=routing_queue,
)
return routing_queue
last_builds = version.builds.order_by('-date')[:self.N_LAST_BUILDS]
# Version has used conda in previous builds
for build in last_builds.iterator():
build_tools_python = ''
conda = None
if build.config:
build_tools_python = (
build.config
.get('build', {})
.get('tools', {})
.get('python', {})
.get('version', '')
)
conda = build.config.get('conda', None)
uses_conda = any([
conda,
build_tools_python.startswith('miniconda'),
])
if uses_conda:
log.info(
'Routing task because project uses conda.',
project_slug=project.slug,
queue=self.BUILD_LARGE_QUEUE,
)
return self.BUILD_LARGE_QUEUE
successful_builds_count = (
version.builds
.filter(success=True)
.order_by('-date')
.count()
)
# We do not have enough builds for this version yet
if successful_builds_count < self.MIN_SUCCESSFUL_BUILDS:
log.info(
'Routing task because it does not have enough successful builds yet.',
project_slug=project.slug,
queue=self.BUILD_LARGE_QUEUE,
)
return self.BUILD_LARGE_QUEUE
log.debug(
'No routing task because no conditions were met.',
project_slug=project.slug,
)
return
def _get_version(self, task, args, kwargs):
tasks = [
'readthedocs.projects.tasks.builds.update_docs_task',
'readthedocs.projects.tasks.builds.sync_repository_task',
]
version = None
if task in tasks:
version_pk = args[0]
try:
version = Version.objects.get(pk=version_pk)
except Version.DoesNotExist:
log.debug(
'Version does not exist. Routing task to default queue.',
version_id=version_pk,
)
return version
@app.task(queue='web', bind=True)
def archive_builds_task(self, days=14, limit=200, delete=False):
"""
Task to archive old builds to cold storage.
:arg days: Find builds older than `days` days.
:arg delete: If True, deletes BuildCommand objects after archiving them
"""
if not settings.RTD_SAVE_BUILD_COMMANDS_TO_STORAGE:
return
lock_id = '{0}-lock'.format(self.name)
with memcache_lock(lock_id, LOCK_EXPIRE, self.app.oid) as acquired:
if not acquired:
log.warning('Archive Builds Task still locked')
return False
max_date = timezone.now() - timezone.timedelta(days=days)
queryset = (
Build.objects
.exclude(cold_storage=True)
.filter(date__lt=max_date)
.prefetch_related('commands')
.only('date', 'cold_storage')
[:limit]
)
for build in queryset:
commands = BuildCommandSerializer(build.commands, many=True).data
if commands:
for cmd in commands:
if len(cmd['output']) > MAX_BUILD_COMMAND_SIZE:
cmd['output'] = cmd['output'][-MAX_BUILD_COMMAND_SIZE:]
cmd['output'] = "... (truncated) ...\n\nCommand output too long. Truncated to last 1MB.\n\n" + cmd['output'] # noqa
log.warning('Truncating build command for build.', build_id=build.id)
output = BytesIO(json.dumps(commands).encode('utf8'))
filename = '{date}/{id}.json'.format(date=str(build.date.date()), id=build.id)
try:
build_commands_storage.save(name=filename, content=output)
if delete:
build.commands.all().delete()
except IOError:
log.exception('Cold Storage save failure')
continue
build.cold_storage = True
build.save()
@app.task(queue='web')
def delete_inactive_external_versions(limit=200, days=30 * 3):
"""
Delete external versions that have been marked as inactive after ``days``.
The commit status is updated to link to the build page, as the docs are removed.
"""
days_ago = timezone.now() - timezone.timedelta(days=days)
queryset = Version.external.filter(
active=False,
modified__lte=days_ago,
)[:limit]
for version in queryset:
try:
last_build = version.last_build
if last_build:
status = BUILD_STATUS_PENDING
if last_build.finished:
status = BUILD_STATUS_SUCCESS if last_build.success else BUILD_STATUS_FAILURE
send_build_status(
build_pk=last_build.pk,
commit=last_build.commit,
status=status,
link_to_build=True,
)
except Exception:
log.exception(
"Failed to send status",
project_slug=version.project.slug,
version_slug=version.slug,
)
else:
log.info(
"Removing external version.",
project_slug=version.project.slug,
version_slug=version.slug,
)
version.delete()
@app.task(
max_retries=1,
default_retry_delay=60,
queue='web'
)
def sync_versions_task(project_pk, tags_data, branches_data, **kwargs):
"""
Sync the version data in the repo (from build server) into our database.
Creates new Version objects for tags/branches that aren't tracked in the database,
and deletes Version objects for tags/branches that don't exists in the repository.
:param tags_data: List of dictionaries with ``verbose_name`` and ``identifier``.
:param branches_data: Same as ``tags_data`` but for branches.
:returns: `True` or `False` if the task succeeded.
"""
project = Project.objects.get(pk=project_pk)
# If the currently highest non-prerelease version is active, then make
# the new latest version active as well.
current_stable = project.get_original_stable_version()
if current_stable is not None:
activate_new_stable = current_stable.active
else:
activate_new_stable = False
try:
# Update All Versions
added_versions = set()
result = sync_versions_to_db(
project=project,
versions=tags_data,
type=TAG,
)
added_versions.update(result)
result = sync_versions_to_db(
project=project,
versions=branches_data,
type=BRANCH,
)
added_versions.update(result)
delete_versions_from_db(
project=project,
tags_data=tags_data,
branches_data=branches_data,
)
deleted_active_versions = get_deleted_active_versions(
project=project,
tags_data=tags_data,
branches_data=branches_data,
)
except Exception:
log.exception('Sync Versions Error')
return False
try:
# The order of added_versions isn't deterministic.
# We don't track the commit time or any other metadata.
# We usually have one version added per webhook.
run_automation_rules(project, added_versions, deleted_active_versions)
except Exception:
# Don't interrupt the request if something goes wrong
# in the automation rules.
log.exception(
'Failed to execute automation rules.',
project_slug=project.slug,
versions=added_versions,
)
# TODO: move this to an automation rule
promoted_version = project.update_stable_version()
new_stable = project.get_stable_version()
if promoted_version and new_stable and new_stable.active:
log.info(
'Triggering new stable build.',
project_slug=project.slug,
version_identifier=new_stable.identifier,
)
trigger_build(project=project, version=new_stable)
# Marking the tag that is considered the new stable version as
# active and building it if it was just added.
if (
activate_new_stable and
promoted_version.slug in added_versions
):
promoted_version.active = True
promoted_version.save()
trigger_build(project=project, version=promoted_version)
return True
@app.task(
max_retries=3,
default_retry_delay=60,
queue='web'
)
def send_build_status(build_pk, commit, status, link_to_build=False):
"""
Send Build Status to Git Status API for project external versions.
It tries using these services' account in order:
1. user's account that imported the project
2. each user's account from the project's maintainers
:param build_pk: Build primary key
:param commit: commit sha of the pull/merge request
:param status: build status failed, pending, or success to be sent.
"""
# TODO: Send build status for BitBucket.
build = Build.objects.filter(pk=build_pk).first()
if not build:
return
provider_name = build.project.git_provider_name
log.bind(
build_id=build.pk,
project_slug=build.project.slug,
commit=commit,
status=status,
)
log.debug('Sending build status.')
if provider_name in [GITHUB_BRAND, GITLAB_BRAND]:
# get the service class for the project e.g: GitHubService.
service_class = build.project.git_service_class()
users = AdminPermission.admins(build.project)
if build.project.remote_repository:
remote_repository = build.project.remote_repository
remote_repository_relations = (
remote_repository.remote_repository_relations.filter(
account__isnull=False,
# Use ``user_in=`` instead of ``user__projects=`` here
# because User's are not related to Project's directly in
# Read the Docs for Business
user__in=AdminPermission.members(build.project),
).select_related('account', 'user').only('user', 'account')
)
# Try using any of the users' maintainer accounts
# Try to loop through all remote repository relations for the projects users
for relation in remote_repository_relations:
service = service_class(relation.user, relation.account)
# Send status report using the API.
success = service.send_build_status(
build=build,
commit=commit,
state=status,
link_to_build=link_to_build,
)
if success:
log.debug(
'Build status report sent correctly.',
user_username=relation.user.username,
)
return True
else:
log.warning('Project does not have a RemoteRepository.')
# Try to send build status for projects with no RemoteRepository
for user in users:
services = service_class.for_user(user)
# Try to loop through services for users all social accounts
# to send successful build status
for service in services:
success = service.send_build_status(build, commit, status)
if success:
log.debug(
'Build status report sent correctly using an user account.',
user_username=user.username,
)
return True
for user in users:
# Send Site notification about Build status reporting failure
# to all the users of the project.
notification = GitBuildStatusFailureNotification(
context_object=build.project,
extra_context={'provider_name': provider_name},
user=user,
success=False,
)
notification.send()
log.info('No social account or repository permission available.')
return False
@app.task(queue='web')
def send_build_notifications(version_pk, build_pk, event):
version = Version.objects.get_object_or_log(pk=version_pk)
if not version or version.type == EXTERNAL:
return
build = Build.objects.filter(pk=build_pk).first()
if not build:
return
sender = BuildNotificationSender(
version=version,
build=build,
event=event,
)
sender.send()
class BuildNotificationSender:
webhook_timeout = 2
def __init__(self, version, build, event):
self.version = version
self.build = build
self.project = version.project
self.event = event
def send(self):
"""
Send email and webhook notifications for `project` about the `build`.
Email notifications are only send for build:failed events.
Webhooks choose to what events they subscribe to.
"""
if self.event == WebHookEvent.BUILD_FAILED:
email_addresses = (
self.project.emailhook_notifications.all()
.values_list('email', flat=True)
)
for email in email_addresses:
try:
self.send_email(email)
except Exception:
log.exception(
'Failed to send email notification.',
email=email,
project_slug=self.project.slug,
version_slug=self.version.slug,
build_id=self.build.id,
)
webhooks = (
self.project.webhook_notifications
.filter(events__name=self.event)
)
for webhook in webhooks:
try:
self.send_webhook(webhook)
except Exception:
log.exception(
'Failed to send webhook.',
webhook_id=webhook.id,
project_slug=self.project.slug,
version_slug=self.version.slug,
build_id=self.build.id,
)
def send_email(self, email):
"""Send email notifications for build failures."""
# We send only what we need from the Django model objects here to avoid
# serialization problems in the ``readthedocs.core.tasks.send_email_task``
protocol = 'http' if settings.DEBUG else 'https'
context = {
'version': {
'verbose_name': self.version.verbose_name,
},
'project': {
'name': self.project.name,
},
'build': {
'pk': self.build.pk,
'error': self.build.error,
},
'build_url': '{}://{}{}'.format(
protocol,
settings.PRODUCTION_DOMAIN,
self.build.get_absolute_url(),
),
'unsubscribe_url': '{}://{}{}'.format(
protocol,
settings.PRODUCTION_DOMAIN,
reverse('projects_notifications', args=[self.project.slug]),
),
}
if self.build.commit:
title = _('Failed: {project[name]} ({commit})').format(
commit=self.build.commit[:8],
**context,
)
else:
title = _('Failed: {project[name]} ({version[verbose_name]})').format(
**context
)
log.info(
'Sending email notification.',
email=email,
project_slug=self.project.slug,
version_slug=self.version.slug,
build_id=self.build.id,
)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context,
)
def send_webhook(self, webhook):
"""
Send webhook notification.
The payload is signed using HMAC-SHA256,
for users to be able to verify the authenticity of the request.
Webhooks that don't have a payload,
are from the old implementation, for those we keep sending the
old default payload.
An HttpExchange object is created for each transaction.
"""
payload = webhook.get_payload(
version=self.version,
build=self.build,
event=self.event,
)
if not payload:
# Default payload from old webhooks.
payload = json.dumps({
'name': self.project.name,
'slug': self.project.slug,
'build': {
'id': self.build.id,
'commit': self.build.commit,
'state': self.build.state,
'success': self.build.success,
'date': self.build.date.strftime('%Y-%m-%d %H:%M:%S'),
},
})
headers = {
'content-type': 'application/json',
'User-Agent': f'Read-the-Docs/{__version__} ({settings.PRODUCTION_DOMAIN})',
'X-RTD-Event': self.event,
}
if webhook.secret:
headers['X-Hub-Signature'] = webhook.sign_payload(payload)
try:
log.info(
'Sending webhook notification.',
webhook_id=webhook.id,
project_slug=self.project.slug,
version_slug=self.version.slug,
build_id=self.build.id,
)
response = requests.post(
webhook.url,
data=payload,
headers=headers,
timeout=self.webhook_timeout,
)
HttpExchange.objects.from_requests_exchange(
response=response,
related_object=webhook,
)
except Exception:
log.exception(
'Failed to POST to webhook url.',
webhook_id=webhook.id,
webhook_url=webhook.url,
)
| 35.027735 | 140 | 0.577706 |
dc6241d87643216fbf1b884643596f40aa212846 | 14,434 | py | Python | string_grouper/test/test_string_grouper.py | Nuttenscl/string_grouper-1 | 75a946d0a954a25e3056f90a62c32f189112a78c | [
"MIT"
] | null | null | null | string_grouper/test/test_string_grouper.py | Nuttenscl/string_grouper-1 | 75a946d0a954a25e3056f90a62c32f189112a78c | [
"MIT"
] | null | null | null | string_grouper/test/test_string_grouper.py | Nuttenscl/string_grouper-1 | 75a946d0a954a25e3056f90a62c32f189112a78c | [
"MIT"
] | null | null | null | import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_MAX_N_MATCHES, DEFAULT_REGEX, \
DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, DEFAULT_MAX_N_MATCHES)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aa']))
## Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aa']))
## Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aa']))
## Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_tf_idf_matrices()
c = csr_matrix([[0., 0., 1.]
, [1., 0., 0.]
, [0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_tf_idf_matrices()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_tf_idf_matrices()
expected_matches = np.array([[1., 0., 0.]
, [0., 1., 0.]
, [0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe).toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = pd.Series(['FOO', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_get_matches_two_dataframes(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2).fit()
left_side = ['foo', 'bar']
right_side = ['foo', 'bar']
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_side': left_side, 'right_side': right_side, 'similarity': similarity})
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_single(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
sg = StringGrouper(test_series_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_side': left_side, 'right_side': right_side, 'similarity': similarity})
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_groups_single_df(self):
"""Should return a pd.series object with the same length as the original df. The series object will contain
a list of the grouped strings"""
test_series_1 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1)
sg = sg.fit()
result = sg.get_groups()
expected_result = pd.Series(['foooo', 'bar', 'baz', 'foooo'])
pd.testing.assert_series_equal(expected_result, result)
def test_get_groups_two_df(self):
"""Should return a pd.series object with the length of the dupes. The series will contain the master string
that matches the dupe with the highest similarity"""
test_series_1 = pd.Series(['foooo', 'bar', 'baz'])
test_series_2 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
result = sg.get_groups()
expected_result = pd.Series(['foooo', 'bar', 'baz', 'foooo'])
pd.testing.assert_series_equal(expected_result, result)
def test_get_groups_two_df_same_similarity(self):
"""Should return a pd.series object with the length of the dupes. If there are two dupes with the same
similarity, the first one is chosen"""
test_series_1 = pd.Series(['foooo', 'bar', 'baz', 'foooo'])
test_series_2 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
result = sg.get_groups()
expected_result = pd.Series(['foooo', 'bar', 'baz', 'foooo'])
pd.testing.assert_series_equal(expected_result, result)
def test_get_groups_two_df_no_match(self):
"""Should return a pd.series object with the length of the dupes. If no match is found in dupes,
the original will be returned"""
test_series_1 = pd.Series(['foooo', 'bar', 'baz'])
test_series_2 = pd.Series(['foooo', 'dooz', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
result = sg.get_groups()
expected_result = pd.Series(['foooo', 'dooz', 'bar', 'baz', 'foooo'])
pd.testing.assert_series_equal(expected_result, result)
def test_get_groups_raises_exception(self):
"""Should raise an exception if called before the StringGrouper is fit"""
test_series_1 = pd.Series(['foooo', 'bar', 'baz', 'foooo'])
test_series_2 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1, test_series_2)
with self.assertRaises(StringGrouperNotFitException):
_ = sg.get_groups()
def test_clean_groups(self):
"""Should clean up groups where the group id index is not in the group"""
orig_id = [0, 1, 2, 3]
group_id = [0, 0, 1, 2]
similarities = [1, 1, 1, 1]
grouped_id_tuples = pd.DataFrame({'original_id': orig_id,
'group_id': group_id,
'min_similarity': similarities})
expected_group_id = pd.Series([0, 0, 0, 0]).rename('group_id')
result = StringGrouper._clean_groups(grouped_id_tuples).group_id
pd.testing.assert_series_equal(expected_group_id, result)
def test_add_match_raises_exception_if_string_not_present(self):
test_series_1 = pd.Series(['foooo', 'no match', 'baz', 'foooo'])
test_series_2 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1).fit()
sg2 = StringGrouper(test_series_1, test_series_2).fit()
with self.assertRaises(ValueError):
sg.add_match('doesnt exist', 'baz')
with self.assertRaises(ValueError):
sg.add_match('baz', 'doesnt exist')
with self.assertRaises(ValueError):
sg2.add_match('doesnt exist', 'baz')
with self.assertRaises(ValueError):
sg2.add_match('baz', 'doesnt exist')
def test_add_match_single_occurence(self):
"""Should add the match if there are no exact duplicates"""
test_series_1 = pd.Series(['foooo', 'no match', 'baz', 'foooo'])
test_series_2 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1).fit()
sg.add_match('no match', 'baz')
matches = sg.get_matches()
matches = matches[(matches.left_side == 'no match') & (matches.right_side == 'baz')]
self.assertEqual(1, matches.shape[0])
sg2 = StringGrouper(test_series_1, test_series_2).fit()
sg2.add_match('no match', 'bar')
matches = sg2.get_matches()
matches = matches[(matches.left_side == 'no match') & (matches.right_side == 'bar')]
self.assertEqual(1, matches.shape[0])
def test_add_match_single_group_matches_symmetric(self):
"""New matches that are added to a SG with only a master series should be symmetric"""
test_series_1 = pd.Series(['foooo', 'no match', 'baz', 'foooo'])
sg = StringGrouper(test_series_1).fit()
sg.add_match('no match', 'baz')
matches = sg.get_matches()
matches_1 = matches[(matches.left_side == 'no match') & (matches.right_side == 'baz')]
self.assertEqual(1, matches_1.shape[0])
matches_2 = matches[(matches.left_side == 'baz') & (matches.right_side == 'no match')]
self.assertEqual(1, matches_2.shape[0])
def test_add_match_multiple_occurences(self):
"""Should add multiple matches if there are exact duplicates"""
test_series_1 = pd.Series(['foooo', 'no match', 'baz', 'foooo'])
test_series_2 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1, test_series_2).fit()
sg.add_match('foooo', 'baz')
matches = sg.get_matches()
matches = matches[(matches.left_side == 'foooo') & (matches.right_side == 'baz')]
self.assertEqual(2, matches.shape[0])
def test_remove_match(self):
"""Should remove a match"""
test_series_1 = pd.Series(['foooo', 'no match', 'baz', 'foooob'])
test_series_2 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1).fit()
sg.remove_match('foooo', 'foooob')
matches = sg.get_matches()
matches_1 = matches[(matches.left_side == 'foooo') & (matches.right_side == 'foooob')]
# In the case of only a master series, the matches are recursive, so both variants are to be removed
matches_2 = matches[(matches.left_side == 'foooob') & (matches.right_side == 'foooo')]
self.assertEqual(0, matches_1.shape[0])
self.assertEqual(0, matches_2.shape[0])
sg2 = StringGrouper(test_series_1, test_series_2).fit()
sg2.remove_match('foooo', 'foooob')
matches = sg2.get_matches()
matches = matches[(matches.left_side == 'foooo') & (matches.right_side == 'foooob')]
self.assertEqual(0, matches.shape[0])
def test_string_grouper_type_error(self):
"""StringGrouper should raise an typeerror master or duplicates are not a series of strings"""
with self.assertRaises(TypeError):
_ = StringGrouper('foo', 'bar')
with self.assertRaises(TypeError):
_ = StringGrouper(pd.Series(['foo', 'bar']), pd.Series(['foo', 1]))
with self.assertRaises(TypeError):
_ = StringGrouper(pd.Series(['foo', np.nan]), pd.Series(['foo', 'j']))
if __name__ == '__main__':
unittest.main()
| 50.292683 | 115 | 0.625606 |
78d15aa0a9e793ee716592c60b727a51047aaeba | 8,621 | py | Python | env/lib/python3.8/site-packages/prompt_toolkit/shortcuts/dialogs.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 1,318 | 2019-07-11T10:34:39.000Z | 2022-03-29T15:05:19.000Z | env/lib/python3.8/site-packages/prompt_toolkit/shortcuts/dialogs.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | env/lib/python3.8/site-packages/prompt_toolkit/shortcuts/dialogs.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 66 | 2019-11-11T15:33:12.000Z | 2022-03-01T07:55:55.000Z | import functools
from asyncio import get_event_loop
from typing import Any, Callable, List, Optional, Tuple, TypeVar
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.completion import Completer
from prompt_toolkit.eventloop import run_in_executor_with_context
from prompt_toolkit.filters import FilterOrBool
from prompt_toolkit.formatted_text import AnyFormattedText
from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
from prompt_toolkit.key_binding.defaults import load_key_bindings
from prompt_toolkit.key_binding.key_bindings import KeyBindings, merge_key_bindings
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.containers import AnyContainer, HSplit
from prompt_toolkit.layout.dimension import Dimension as D
from prompt_toolkit.styles import BaseStyle
from prompt_toolkit.widgets import (
Box,
Button,
CheckboxList,
Dialog,
Label,
ProgressBar,
RadioList,
TextArea,
)
__all__ = [
"yes_no_dialog",
"button_dialog",
"input_dialog",
"message_dialog",
"radiolist_dialog",
"checkboxlist_dialog",
"progress_dialog",
]
def yes_no_dialog(
title: AnyFormattedText = "",
text: AnyFormattedText = "",
yes_text: str = "Yes",
no_text: str = "No",
style: Optional[BaseStyle] = None,
) -> Application[bool]:
"""
Display a Yes/No dialog.
Return a boolean.
"""
def yes_handler() -> None:
get_app().exit(result=True)
def no_handler() -> None:
get_app().exit(result=False)
dialog = Dialog(
title=title,
body=Label(text=text, dont_extend_height=True),
buttons=[
Button(text=yes_text, handler=yes_handler),
Button(text=no_text, handler=no_handler),
],
with_background=True,
)
return _create_app(dialog, style)
_T = TypeVar("_T")
def button_dialog(
title: AnyFormattedText = "",
text: AnyFormattedText = "",
buttons: List[Tuple[str, _T]] = [],
style: Optional[BaseStyle] = None,
) -> Application[_T]:
"""
Display a dialog with button choices (given as a list of tuples).
Return the value associated with button.
"""
def button_handler(v: _T) -> None:
get_app().exit(result=v)
dialog = Dialog(
title=title,
body=Label(text=text, dont_extend_height=True),
buttons=[
Button(text=t, handler=functools.partial(button_handler, v))
for t, v in buttons
],
with_background=True,
)
return _create_app(dialog, style)
def input_dialog(
title: AnyFormattedText = "",
text: AnyFormattedText = "",
ok_text: str = "OK",
cancel_text: str = "Cancel",
completer: Optional[Completer] = None,
password: FilterOrBool = False,
style: Optional[BaseStyle] = None,
) -> Application[str]:
"""
Display a text input box.
Return the given text, or None when cancelled.
"""
def accept(buf: Buffer) -> bool:
get_app().layout.focus(ok_button)
return True # Keep text.
def ok_handler() -> None:
get_app().exit(result=textfield.text)
ok_button = Button(text=ok_text, handler=ok_handler)
cancel_button = Button(text=cancel_text, handler=_return_none)
textfield = TextArea(
multiline=False, password=password, completer=completer, accept_handler=accept
)
dialog = Dialog(
title=title,
body=HSplit(
[
Label(text=text, dont_extend_height=True),
textfield,
],
padding=D(preferred=1, max=1),
),
buttons=[ok_button, cancel_button],
with_background=True,
)
return _create_app(dialog, style)
def message_dialog(
title: AnyFormattedText = "",
text: AnyFormattedText = "",
ok_text: str = "Ok",
style: Optional[BaseStyle] = None,
) -> Application[None]:
"""
Display a simple message box and wait until the user presses enter.
"""
dialog = Dialog(
title=title,
body=Label(text=text, dont_extend_height=True),
buttons=[Button(text=ok_text, handler=_return_none)],
with_background=True,
)
return _create_app(dialog, style)
def radiolist_dialog(
title: AnyFormattedText = "",
text: AnyFormattedText = "",
ok_text: str = "Ok",
cancel_text: str = "Cancel",
values: Optional[List[Tuple[_T, AnyFormattedText]]] = None,
style: Optional[BaseStyle] = None,
) -> Application[_T]:
"""
Display a simple list of element the user can choose amongst.
Only one element can be selected at a time using Arrow keys and Enter.
The focus can be moved between the list and the Ok/Cancel button with tab.
"""
if values is None:
values = []
def ok_handler() -> None:
get_app().exit(result=radio_list.current_value)
radio_list = RadioList(values)
dialog = Dialog(
title=title,
body=HSplit(
[Label(text=text, dont_extend_height=True), radio_list],
padding=1,
),
buttons=[
Button(text=ok_text, handler=ok_handler),
Button(text=cancel_text, handler=_return_none),
],
with_background=True,
)
return _create_app(dialog, style)
def checkboxlist_dialog(
title: AnyFormattedText = "",
text: AnyFormattedText = "",
ok_text: str = "Ok",
cancel_text: str = "Cancel",
values: Optional[List[Tuple[_T, AnyFormattedText]]] = None,
style: Optional[BaseStyle] = None,
) -> Application[List[_T]]:
"""
Display a simple list of element the user can choose multiple values amongst.
Several elements can be selected at a time using Arrow keys and Enter.
The focus can be moved between the list and the Ok/Cancel button with tab.
"""
if values is None:
values = []
def ok_handler() -> None:
get_app().exit(result=cb_list.current_values)
cb_list = CheckboxList(values)
dialog = Dialog(
title=title,
body=HSplit(
[Label(text=text, dont_extend_height=True), cb_list],
padding=1,
),
buttons=[
Button(text=ok_text, handler=ok_handler),
Button(text=cancel_text, handler=_return_none),
],
with_background=True,
)
return _create_app(dialog, style)
def progress_dialog(
title: AnyFormattedText = "",
text: AnyFormattedText = "",
run_callback: Callable[[Callable[[int], None], Callable[[str], None]], None] = (
lambda *a: None
),
style: Optional[BaseStyle] = None,
) -> Application[None]:
"""
:param run_callback: A function that receives as input a `set_percentage`
function and it does the work.
"""
loop = get_event_loop()
progressbar = ProgressBar()
text_area = TextArea(
focusable=False,
# Prefer this text area as big as possible, to avoid having a window
# that keeps resizing when we add text to it.
height=D(preferred=10 ** 10),
)
dialog = Dialog(
body=HSplit(
[
Box(Label(text=text)),
Box(text_area, padding=D.exact(1)),
progressbar,
]
),
title=title,
with_background=True,
)
app = _create_app(dialog, style)
def set_percentage(value: int) -> None:
progressbar.percentage = int(value)
app.invalidate()
def log_text(text: str) -> None:
loop.call_soon_threadsafe(text_area.buffer.insert_text, text)
app.invalidate()
# Run the callback in the executor. When done, set a return value for the
# UI, so that it quits.
def start() -> None:
try:
run_callback(set_percentage, log_text)
finally:
app.exit()
def pre_run() -> None:
run_in_executor_with_context(start)
app.pre_run_callables.append(pre_run)
return app
def _create_app(dialog: AnyContainer, style: Optional[BaseStyle]) -> Application[Any]:
# Key bindings.
bindings = KeyBindings()
bindings.add("tab")(focus_next)
bindings.add("s-tab")(focus_previous)
return Application(
layout=Layout(dialog),
key_bindings=merge_key_bindings([load_key_bindings(), bindings]),
mouse_support=True,
style=style,
full_screen=True,
)
def _return_none() -> None:
" Button handler that returns None. "
get_app().exit()
| 27.195584 | 86 | 0.639021 |
68926399a9e5e6274010cb2c2e60d02aa23f9080 | 3,151 | py | Python | example/speech-demo/speechSGD.py | Liuxg16/BrainMatrix | 0ec70edd4e12dd3719d20dd14d4e24438c60326f | [
"Apache-2.0"
] | 9 | 2018-06-12T12:12:56.000Z | 2020-11-26T01:45:15.000Z | example/speech-demo/speechSGD.py | achao2013/mxnet-quantify | ae77c896da6db35530390e3cf8e524d553bba112 | [
"Apache-2.0"
] | 1 | 2020-01-26T19:53:49.000Z | 2020-01-26T19:53:49.000Z | example/speech-demo/speechSGD.py | achao2013/mxnet-quantify | ae77c896da6db35530390e3cf8e524d553bba112 | [
"Apache-2.0"
] | 14 | 2016-11-18T07:21:41.000Z | 2019-09-30T08:48:22.000Z | import mxnet as mx
from mxnet.base import _LIB, check_call
from mxnet.base import c_array, mx_uint, mx_float, c_str
from mxnet.base import OptimizerHandle, OptimizerCreator
from mxnet.ndarray import NDArray, zeros, clip, sqrt
from mxnet.random import normal
@mx.optimizer.register
class speechSGD(mx.optimizer.Optimizer):
"""A very simple SGD optimizer with momentum and weight regularization.
Parameters
----------
learning_rate : float, optional
learning_rate of SGD
momentum : float, optional
momentum value
wd : float, optional
L2 regularization coefficient add to all the weights
rescale_grad : float, optional
rescaling factor of gradient.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
param_idx2name : dict of string/int to float, optional
special treat weight decay in parameter ends with bias, gamma, and beta
"""
def __init__(self, momentum=0.0, **kwargs):
super(speechSGD, self).__init__(**kwargs)
self.momentum = momentum
def create_state(self, index, weight):
"""Create additional optimizer state such as momentum.
Parameters
----------
weight : NDArray
The weight data
"""
if self.momentum == 0.0:
return None
else:
return zeros(weight.shape, weight.context, dtype=weight.dtype)
def _get_lr(self, index):
"""get learning rate for index.
Parameters
----------
index : int
The index for weight
Returns
-------
lr : float
learning rate for this index
"""
mom = 0.0
if self.lr_scheduler is not None:
(lr, mom) = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if index in self.lr_mult:
lr *= self.lr_mult[index]
elif index in self.idx2name:
lr *= self.lr_mult.get(self.idx2name[index], 1.0)
return lr, mom
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
(lr, momentum) = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
if state:
mom = state
mom[:] *= momentum
mom[:] += -lr * (1.0 - momentum) * (grad + wd * weight)
weight[:] += mom
else:
assert self.momentum == 0.0
weight[:] += -lr * (grad + self.wd * weight)
| 27.640351 | 79 | 0.584259 |
74f1fc0f5f520119c8b56d1fd6771a966ec9a0ec | 3,393 | py | Python | src/apex/algo/iud_removal.py | kpwhri/apex_iud_nlp | f59afbd5d19d6bae21264f6de7ee24382ccb694e | [
"MIT"
] | 1 | 2021-09-23T18:24:15.000Z | 2021-09-23T18:24:15.000Z | src/apex/algo/iud_removal.py | kpwhri/apex_iud_nlp | f59afbd5d19d6bae21264f6de7ee24382ccb694e | [
"MIT"
] | null | null | null | src/apex/algo/iud_removal.py | kpwhri/apex_iud_nlp | f59afbd5d19d6bae21264f6de7ee24382ccb694e | [
"MIT"
] | 1 | 2021-09-23T18:24:19.000Z | 2021-09-23T18:24:19.000Z | from apex.algo.shared import IUD, boilerplate, safe_may, hypothetical, in_place
from apex.algo.pattern import Document, Pattern
from apex.algo.result import Status, Result
negation = r'(ready|should|sometimes|must|decline|\bnot\b)'
other = r'(fibroid|v25.1\d|tampon)'
tool_remove = r'(introducer|inserter|tenaculum|instruments?)( (was|were))? removed'
words_3 = r'( \w+){0,3}'
REMOVE_BY = Pattern(f'(remov\\w+|replac\\w+){IUD} by')
REMOVE = Pattern(r'(remov\w+|replac\w+)',
negates=[negation, boilerplate, hypothetical, in_place,
other, tool_remove, safe_may, 'rtc', 'easiest time', r'\bplan\b'])
PROB_REMOVE = Pattern(f'{IUD} was removed')
DEF_REMOVE = Pattern(f'{IUD} ('
f'(was )?remov(ed|al|e) '
f'({words_3} (difficulty|problem|traction|easy)|easily|quickly)'
f'|((easily|quickly|(un)?complicat) remov(ed|al|e))'
f'|{words_3} ?grasp(ed|ing)? {words_3} remov(ed|al|e)'
f')', negates=['risk', 'to (have|get)', 'come in'])
DEF_REPLACE = Pattern(f'{IUD} remov(ed|al) {words_3} replac')
TOOL = Pattern(r'((ring )?forceps?|hook|fashion|(alligator )? clamp|'
r'strings? (grasp|clasp)|(grasp|clasp)\w* strings?|'
r'(with|gentle|more|\bw) traction|technique)',
negates=[r'\bplaced\b', 'insertion', 'trimmed',
'unsuccessful', 'unable', r'not (recover|retriev|remove)\w+'])
PLAN = Pattern(r'\brem intrauterine device\b',
negates=[])
ALL = (REMOVE, DEF_REMOVE, PROB_REMOVE, DEF_REPLACE, TOOL, PLAN)
class RemoveStatus(Status):
NONE = -1
REMOVE = 1
TOOL_REMOVE = 2
PLAN = 3
DEF_REMOVE = 4
DEF_REPLACE = 5
SKIP = 99
def confirm_iud_removal(document: Document, expected=None):
for value, text in determine_iud_removal(document):
if value.value in [1, 2, 3, 4, 5]:
yield Result(value, value.value, expected, text)
def determine_iud_removal(document: Document):
if document.has_patterns(*ALL, ignore_negation=True):
section_text = []
for section in document.select_sentences_with_patterns(IUD):
if section.has_pattern(REMOVE_BY):
continue
# these definitely have correct language
if section.has_patterns(*ALL):
# require either REMOVE/PLAN since this could have other refs
if section.has_patterns(DEF_REMOVE):
yield RemoveStatus.DEF_REMOVE, section.text
if section.has_patterns(DEF_REPLACE):
yield RemoveStatus.DEF_REPLACE, section.text
if section.has_patterns(PROB_REMOVE):
yield RemoveStatus.REMOVE, section.text
if section.has_patterns(TOOL):
yield RemoveStatus.TOOL_REMOVE, section.text
if section.has_patterns(REMOVE):
yield RemoveStatus.REMOVE, section.text
if section.has_patterns(PLAN):
yield RemoveStatus.PLAN, section.text
else:
section_text.append(section.text)
if section_text:
yield RemoveStatus.NONE, ' '.join(section_text)
else:
yield RemoveStatus.SKIP, document.text
else:
yield RemoveStatus.SKIP, document.text
| 42.949367 | 92 | 0.604775 |
0722e739893b1f562b46b1b26325fbdd6c1a7fd8 | 4,161 | py | Python | server/src/weblab/core/login/web/uned_sso.py | romainrossi/weblabdeusto | 494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f | [
"BSD-2-Clause"
] | 15 | 2015-03-12T12:15:41.000Z | 2021-12-20T17:53:24.000Z | server/src/weblab/core/login/web/uned_sso.py | romainrossi/weblabdeusto | 494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f | [
"BSD-2-Clause"
] | 44 | 2015-01-07T09:22:05.000Z | 2017-01-31T22:44:21.000Z | server/src/weblab/core/login/web/uned_sso.py | romainrossi/weblabdeusto | 494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f | [
"BSD-2-Clause"
] | 22 | 2015-01-13T13:55:48.000Z | 2021-12-16T17:07:00.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
import urllib
import base64
import voodoo.log as log
from voodoo.log import logged
from flask import request, make_response, redirect
from weblab.core.login.web import weblab_api
from weblab.core.login.web import ExternalSystemManager
import weblab.core.login.exc as LoginErrors
from weblab.data.dto.users import User
from weblab.data.dto.users import StudentRole
try:
from M2Crypto import BIO, RSA, EVP
except ImportError:
M2CRYPTO_AVAILABLE = False
else:
M2CRYPTO_AVAILABLE = True
PEM_FILE_PATH = None
UNED_SSO = None
def process_cookie(original_message):
unquoted_message = urllib.unquote(original_message)
payload = unquoted_message[:unquoted_message.rfind('#')]
base64_signature = unquoted_message[unquoted_message.rfind('#')+1:]
signature = base64.decodestring(base64_signature)
try:
pem = open(PEM_FILE_PATH or '').read()
except:
raise Exception("Could not open PEM file")
bio = BIO.MemoryBuffer(pem)
rsa = RSA.load_pub_key_bio(bio)
pubkey = EVP.PKey()
pubkey.assign_rsa(rsa)
pubkey.reset_context(md='sha1')
pubkey.verify_init()
pubkey.verify_update(payload)
return_value = pubkey.verify_final(signature)
if not return_value:
raise Exception("UNED cookie not verified")
user_id = ''
email = ''
for elem in payload.split('#'):
if elem.startswith('ID:'):
user_id = base64.decodestring(elem.split(':')[1])
elif elem.startswith('EMAIL:'):
email = base64.decodestring(elem.split(':')[1])
return user_id, email
class UnedSSOManager(ExternalSystemManager):
NAME = 'UNED-SSO'
@logged(log.level.Warning)
def get_user(self, credentials):
if not M2CRYPTO_AVAILABLE:
raise Exception("M2Crypto module not available")
user_id, email = process_cookie(credentials)
login = '%s@uned' % email
full_name = user_id # We don't know the full name
return User(login, full_name, email, StudentRole())
def get_user_id(self, credentials):
if not M2CRYPTO_AVAILABLE:
raise Exception("M2Crypto module not available")
return self.get_user(credentials).email
@weblab_api.route_login_web('/unedsso/')
def uned_sso():
# Initialize global variables if not previously done
global PEM_FILE_PATH, UNED_SSO
if PEM_FILE_PATH is None:
PEM_FILE_PATH = weblab_api.config.get_value('uned_sso_public_key_path', '')
UNED_SSO = weblab_api.config.get_value('uned_sso', False)
# Reject user if UNED_SSO is not available
if not UNED_SSO:
return make_response("<html><body>UNED SSO system disabled</body></html>", content_type = 'text/html')
if not M2CRYPTO_AVAILABLE:
return make_response("<html><body>M2Crypto module not available</body></html>", content_type = 'text/html')
payload = request.cookies.get('usuarioUNEDv2', '')
if payload:
try:
session_id = weblab_api.api.extensible_login(UnedSSOManager.NAME, payload)
except LoginErrors.InvalidCredentialsError:
try:
_, email = process_cookie(payload)
except:
return "Invalid cookie found!"
else:
return "%s: you were verified, but you are not registered in this WebLab-Deusto instance. Contact the administrator." % email
else:
base_client_url = weblab_api.ctx.core_server_url + "client/"
url = '%s#session_id=%s;%s.%s' % (base_client_url, session_id.id, session_id.id, weblab_api.ctx.route)
return redirect(url)
else:
return redirect('https://sso.uned.es/sso/index.aspx?URL=' + request.url)
| 31.285714 | 141 | 0.68349 |
67a0e964273a6482ebf3794e3fdecba36a464a0d | 3,779 | py | Python | lib-python/2.7/json/tests/test_unicode.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | lib-python/2.7/json/tests/test_unicode.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 75 | 2016-01-14T16:03:02.000Z | 2020-04-29T22:51:53.000Z | lib-python/2.7/json/tests/test_unicode.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 55 | 2015-08-16T02:41:30.000Z | 2022-03-20T20:33:35.000Z | from collections import OrderedDict
from json.tests import PyTest, CTest
class TestUnicode(object):
def test_encoding1(self):
encoder = self.json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEqual(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = self.dumps(u, encoding='utf-8')
js = self.dumps(s, encoding='utf-8')
self.assertEqual(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps(u)
self.assertEqual(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps([u])
self.assertEqual(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps(u, ensure_ascii=False)
self.assertEqual(j, u'"{0}"'.format(u))
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps([u], ensure_ascii=False)
self.assertEqual(j, u'["{0}"]'.format(u))
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEqual(self.dumps(u), '"\\ud834\\udd20"')
self.assertEqual(self.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEqual(self.loads('"' + u + '"'), u)
self.assertEqual(self.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
s = '"\\u{0:04x}"'.format(i)
self.assertEqual(self.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(self.loads(s), eval(s))
self.assertEqual(self.loads(s, object_pairs_hook = lambda x: x), p)
od = self.loads(s, object_pairs_hook = OrderedDict)
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(self.loads(s,
object_pairs_hook = OrderedDict,
object_hook = lambda x: None),
OrderedDict(p))
def test_default_encoding(self):
self.assertEqual(self.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEqual(type(self.loads(u'""')), unicode)
self.assertEqual(type(self.loads(u'"a"')), unicode)
self.assertEqual(type(self.loads(u'["a"]')[0]), unicode)
# Issue 10038.
self.assertEqual(type(self.loads('"foo"')), unicode)
def test_encode_not_utf_8(self):
self.assertEqual(self.dumps('\xb1\xe6', encoding='iso8859-2'),
'"\\u0105\\u0107"')
self.assertEqual(self.dumps(['\xb1\xe6'], encoding='iso8859-2'),
'["\\u0105\\u0107"]')
def test_bad_encoding(self):
self.assertRaises(UnicodeEncodeError, self.loads, '"a"', u"rat\xe9")
self.assertRaises(TypeError, self.loads, '"a"', 1)
class TestPyUnicode(TestUnicode, PyTest): pass
class TestCUnicode(TestUnicode, CTest): pass
| 39.364583 | 78 | 0.582429 |
edb29b31a7494fb90c7c359bea71cde8036f8dd3 | 1,026 | py | Python | test/unit/app/command/test_dispatcher.py | Tastyep/Pi-OpenCast | 1ed07130c26e6dd70184446af3c9143094d0c9a0 | [
"MIT"
] | 31 | 2019-10-05T14:23:10.000Z | 2022-02-27T19:38:55.000Z | test/unit/app/command/test_dispatcher.py | Tastyep/RaspberryCast | 8ae8cf986b373b04c43d248ee72c77b1a29daa43 | [
"MIT"
] | 199 | 2020-08-30T16:33:03.000Z | 2022-03-28T04:10:06.000Z | test/unit/app/command/test_dispatcher.py | Tastyep/RaspberryCast | 8ae8cf986b373b04c43d248ee72c77b1a29daa43 | [
"MIT"
] | 1 | 2019-10-05T16:00:49.000Z | 2019-10-05T16:00:49.000Z | from test.util import TestCase
from unittest.mock import Mock
from OpenCast.app.command.dispatcher import CommandDispatcher
class CommandDispatcherTest(TestCase):
def setUp(self):
def execute_handler(handler, *args):
handler(*args)
self.executor = Mock()
self.executor.submit = Mock(side_effect=execute_handler)
self.dispatcher = CommandDispatcher(self.executor)
self.cmd = Mock()
self.handler = Mock()
def test_observe_command(self):
self.dispatcher.observe(type(self.cmd), self.handler)
self.dispatcher.dispatch(self.cmd)
self.handler.assert_called_once_with(self.cmd)
def test_multiple_observe_command(self):
other_handler = Mock()
self.dispatcher.observe(type(self.cmd), self.handler)
self.dispatcher.observe(type(self.cmd), other_handler)
self.dispatcher.dispatch(self.cmd)
self.handler.assert_called_once_with(self.cmd)
other_handler.assert_called_once_with(self.cmd)
| 32.0625 | 64 | 0.701754 |
52e0780f563481ab3430cb918939306d787076df | 7,893 | py | Python | research/cv/fairmot/fairmot_eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/fairmot/fairmot_eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/fairmot/fairmot_eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval fairmot."""
import os
import os.path as osp
import logging
from src.backbone_dla_conv import DLASegConv
from src.infer_net import InferNet
from src.opts import Opts
from src.fairmot_pose import WithNetCell
from src.tracking_utils import visualization as vis
from src.tracker.multitracker import JDETracker
from src.tracking_utils.log import logger
from src.tracking_utils.utils import mkdir_if_missing
from src.tracking_utils.evaluation import Evaluator
from src.tracking_utils.timer import Timer
import src.utils.jde as datasets
from mindspore import Tensor, context
from mindspore import dtype as mstype
from mindspore.train.serialization import load_checkpoint
import cv2
import motmetrics as mm
import numpy as np
def write_results(filename, results, data_type):
"""write eval results."""
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to %s', filename)
def eval_seq(opt, net, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
"""evaluation sequence."""
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
# for path, img, img0 in dataloader:
for _, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
# run tracking
timer.tic()
blob = np.expand_dims(img, 0)
blob = Tensor(blob, mstype.float32)
img0 = Tensor(img0, mstype.float32)
height, width = img0.shape[0], img0.shape[1]
inp_height, inp_width = [blob.shape[2], blob.shape[3]]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s, 'out_height': inp_height // opt.down_ratio,
'out_width': inp_width // opt.down_ratio}
id_feature, dets = net(blob)
online_targets = tracker.update(id_feature.asnumpy(), dets, meta)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root, seqs=None, exp_name='MOT17_test_public_dla34',
save_images=True, save_videos=False, show_image=False):
"""evaluation sequence."""
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
# eval=eval_seq(opt, data_type, show_image=True)
backbone_net = DLASegConv(opt.heads,
down_ratio=4,
final_kernel=1,
last_level=5,
head_conv=256)
load_checkpoint(opt.load_model, net=backbone_net)
infer_net = InferNet()
net = WithNetCell(backbone_net, infer_net)
net.set_train(False)
for sequence in seqs:
output_dir = os.path.join(data_root, '..', 'outputs', exp_name, sequence) \
if save_images or save_videos else None
logger.info('start seq: %s', sequence)
dataloader = datasets.LoadImages(osp.join(data_root, sequence, 'img1'), (1088, 608))
result_filename = os.path.join(result_root, '{}.txt'.format(sequence))
meta_info = open(os.path.join(data_root, sequence, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, net, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
logger.info('Evaluate seq: %s', sequence)
evaluator = Evaluator(data_root, sequence, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
print(output_dir)
output_video_path = osp.join(output_dir, '{}.mp4'.format(sequence))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
# get summary
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
opts = Opts().init()
context.set_context(
mode=context.GRAPH_MODE,
# mode=context.PYNATIVE_MODE,
device_target="Ascend",
device_id=opts.id,
save_graphs=False)
seqs_str = '''MOT20-01
MOT20-02
MOT20-03
MOT20-05 '''
data_roots = os.path.join(opts.data_dir, 'MOT20/train')
seq = [seq.strip() for seq in seqs_str.split()]
main(opts,
data_root=data_roots,
seqs=seq,
exp_name='MOT20_distribute_dla34_conv',
show_image=False,
save_images=False,
save_videos=False)
| 41.324607 | 112 | 0.627518 |
e519553538862e6f450cbc1e5368deb31e500460 | 305 | py | Python | config.py | lucienraeg/ai-social | bdc7e3022f6da2bf784a3b59664f8046445f3346 | [
"MIT"
] | null | null | null | config.py | lucienraeg/ai-social | bdc7e3022f6da2bf784a3b59664f8046445f3346 | [
"MIT"
] | null | null | null | config.py | lucienraeg/ai-social | bdc7e3022f6da2bf784a3b59664f8046445f3346 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__name__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'abcd1234'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False | 38.125 | 65 | 0.721311 |
d1482e103e0ef0be6eda027fce71e50510487966 | 3,154 | py | Python | Spyder/Sesiones/Sesion 3.1 y Sesion 4.py | AlexPC23/Python | 77689d74c5444faa1aa253a122602307e52ac581 | [
"Apache-2.0"
] | null | null | null | Spyder/Sesiones/Sesion 3.1 y Sesion 4.py | AlexPC23/Python | 77689d74c5444faa1aa253a122602307e52ac581 | [
"Apache-2.0"
] | null | null | null | Spyder/Sesiones/Sesion 3.1 y Sesion 4.py | AlexPC23/Python | 77689d74c5444faa1aa253a122602307e52ac581 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 20:30:28 2021
@author: Alex
"""
reset -f
import os #sistema operativo
import pandas as pd #gestionar datframes
import numpy as np #numeric python (vectores, matrices,...)
import matplotlib.pyplot as plt #graficos
#change working directory
os.chdir('C:/Programacion Estadistica PEP/code_and_data')
os.getcwd()
wbr = pd.read_csv('WBR_11_12_denormalized_temp.csv', sep=';', decimal=',')
wbr.shape
wbr.head
wbr.tail()
#QC OK
wbr.cnt.describe()
x=wbr['cnt']
plt.hist(x,edgecolor='black', bins=10) #borde de color negro
plt.xticks(np.arange(0,10000, step=1000)) #eje X
plt.title('Figure 1. Daily Bicycle rentals in Washington') #poner titulo
plt.ylabel('Frequency') #dar nombre al eje y
plt.xlabel('Number of rented bicycles') #dar nombre al eje x
plt.show()
#Guardar los resultados descriptivos en una lista
res = wbr.cnt.describe()
res[1]
m = res[1]
sd = res[2]
n = res[0]
#Subsetting (Elegir variables)
my_vars=['temp_celsius','cnt']
#Extraer variables y guardarlas
wbr_minimal=wbr[my_vars]
#Explore year
mytable = wbr.groupby(['yr']).size()
print(mytable)
wbr_2011 = wbr[wbr.yr == 0]
wbr_2011.cnt.describe()
plt.hist(wbr_2011.cnt)
#Ejercicio 1 (Ventas del invierno 2012)
wbr_winter_12 = wbr[(wbr.yr == 1) & (wbr.season == 1)] #subseting con dos condiciones PONER PARENTESIS
wbr_winter_12.shape
wbr_winter_12.cnt.describe()
plt.hist(wbr_winter_12.cnt)
#Ejercicio 1B (Describir ventas invierno y otoño)
wbr_fall_winter = wbr[(wbr.season ==1) | (wbr.season ==4)]
wbr_fall_winter.shape
wbr_fall_winter.cnt.describe()
plt.hist(wbr_fall_winter.cnt)
#Ejercicio 2 ()
import os #sistema operativo
import pandas as pd #gestionar datframes
import numpy as np #numeric python (vectores, matrices,...)
import matplotlib.pyplot as plt #graficos
os.chdir('C:/Programacion Estadistica PEP/code_and_data')
os.getcwd()
wbr_ue = pd.read_csv('wbr_ue.csv', sep=';', decimal=',')
wbr_ue.temp_celsius.describe()
#Limpiar el dataset
wbr_ue['temp_celsius_c'] = wbr_ue.temp_celsius.replace(99, np.nan)
wbr_ue.temp_celsius_c.describe()
plt.hist(wbr_ue.temp_celsius_c)
#Para las evita que salga error por los nan (dropna)
wbr_ue.temp_celsius_c.dropna()
#Nueva columna
wbr['cs_ratio'] = (wbr.casual)/(wbr.registered)
plt.hist(wbr.cs_ratio)
wbr['cnt'] = (wbr.casual) + (wbr.registered)
#Recodificar variable (darle nombre a las estaciones del año)
wbr.loc[(wbr['season'] ==1), "season_cat"] = "Winter"
wbr.loc[(wbr['season'] ==2), "season_cat"] = "Spring"
wbr.loc[(wbr['season'] ==3), "season_cat"] = "Summer"
wbr.loc[(wbr['season'] ==4), "season_cat"] = "Fall"
#Despues de recodificar hacer QC
pd.crosstab(wbr.season, wbr.season_cat)
#Recodificar 2
wbr.cnt.describe()
wbr.loc [(wbr['cnt']<2567), "cnt_cat2"] = "1: Low rentals"
wbr.loc [(wbr['cnt']>=2567) & (wbr['cnt']<6442), "cnt_cat2"] = "2: Average rentals"
wbr.loc [(wbr['cnt']>=6442), "cnt_cat2"] = "3: High rentals"
#QC?
plt.scatter(wbr.cnt, wbr.cnt_cat2)
| 29.476636 | 102 | 0.676918 |
54d6c67cd8872a0c1a45deeef793038f7270948c | 2,243 | py | Python | site_search/tests/test_view.py | AccentDesign/djangocms-site-search | 90ed1e5ab5fe96be8f1a4a74994f18164a7363aa | [
"MIT"
] | 1 | 2019-06-06T12:56:30.000Z | 2019-06-06T12:56:30.000Z | site_search/tests/test_view.py | AccentDesign/djangocms-site-search | 90ed1e5ab5fe96be8f1a4a74994f18164a7363aa | [
"MIT"
] | null | null | null | site_search/tests/test_view.py | AccentDesign/djangocms-site-search | 90ed1e5ab5fe96be8f1a4a74994f18164a7363aa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from cms.api import create_page
from ..helpers import get_request
from ..views import SearchResultsView
class URLTestCase(TestCase):
@override_settings(ROOT_URLCONF='site_search.tests.test_urls')
def test_search_url_view(self):
response = self.client.get(reverse('search:search_results'))
self.assertEqual(response.status_code, 200)
class ViewTestCase(TestCase):
def setUp(self):
self.view = SearchResultsView()
self.request = get_request('en')
self.request.GET = self.request.GET.copy()
self.request.GET['q'] = 'test_page'
self.view.request = self.request
self.user = User.objects.create_user(
username='jacob', email='jacob@…', password='top_secret')
def _create_page(self, **data):
return create_page(
title='test_page',
reverse_id='testpage',
template='test.html',
language='en',
**data
)
def test_view_returns_ok_response(self):
response = SearchResultsView.as_view()(self.request)
self.assertEqual(response.status_code, 200)
def test_view_returns_object_list(self):
page = self._create_page()
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 1)
def test_view_returns_blank_object_list_for_blank_search(self):
page = self._create_page()
page.publish('en')
self.view.request.GET['q'] = ''
self.assertEqual(len(self.view.get_queryset()), 0)
def test_view_excludes_future_publications(self):
page = self._create_page(
publication_date=datetime.today() + timedelta(days=1))
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 0)
def test_view_includes_past_publications(self):
page = self._create_page(
publication_date=datetime.today() - timedelta(days=1))
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 1)
| 32.985294 | 69 | 0.669193 |
dbf209ae724d0d600822b26749e7de200d0d36a6 | 548 | py | Python | jalabel_site/jalabel_app/models.py | JaewonAC/image-server | a93b44566800b74d07133c8098b3290f16ef62bb | [
"MIT"
] | null | null | null | jalabel_site/jalabel_app/models.py | JaewonAC/image-server | a93b44566800b74d07133c8098b3290f16ef62bb | [
"MIT"
] | 5 | 2021-03-19T01:41:58.000Z | 2021-09-22T18:51:03.000Z | jalabel_site/jalabel_app/models.py | JaewonAC/image-server | a93b44566800b74d07133c8098b3290f16ef62bb | [
"MIT"
] | null | null | null | from django.db import models
from sorl import thumbnail
from django.urls import reverse
class FixtureData(models.Model):
lot_number = models.CharField(max_length=20)
image_data = thumbnail.ImageField(upload_to='fixture/', blank=True)
message = models.CharField(max_length=20, default='unknown')
uploaded_date = models.DateTimeField(unique=True)
last_modified = models.DateTimeField(auto_now=True)
rle_csv = models.ImageField(upload_to='fixture_rle/', blank=True)
def get_absolute_url(self):
return '../add/'
| 32.235294 | 71 | 0.748175 |
7be396d7d8b068a57463a067f6758a1aae7be8ba | 1,025 | py | Python | easy/28_implement_strstr.py | Sukhrobjon/leetcode | 547c200b627c774535bc22880b16d5390183aeba | [
"MIT"
] | null | null | null | easy/28_implement_strstr.py | Sukhrobjon/leetcode | 547c200b627c774535bc22880b16d5390183aeba | [
"MIT"
] | null | null | null | easy/28_implement_strstr.py | Sukhrobjon/leetcode | 547c200b627c774535bc22880b16d5390183aeba | [
"MIT"
] | null | null | null | class Solution(object):
def strStr(self, text, pattern):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if pattern == "":
return 0
starter = 0 # the starting index of the patterin in the text
index = 0 # index for text
subindex = 0 # index for pattern
while index <= len(text) - 1:
if text[index] == pattern[subindex]:
index += 1
subindex += 1
if subindex == len(pattern): # check for if we checked all index of patter
# starter index of the text where pattern occured 1st time
return starter
else: # mismatch found
starter += 1 # shift the starter to next index
index = starter
subindex = 0 # reset the subindex
return -1
haystack = "mississippi"
needle = "issip"
obj = Solution()
result = obj.strStr(haystack, needle)
print(result)
| 26.282051 | 91 | 0.52 |
3a63c6e0cca065115f1e4bd60c0289000dcc6ce2 | 81,176 | py | Python | Lib/fontTools/feaLib/parser.py | tomarcher101/fonttools | 0b9348080853ab637c4826a4aa293a2b4d60449c | [
"Apache-2.0",
"MIT"
] | null | null | null | Lib/fontTools/feaLib/parser.py | tomarcher101/fonttools | 0b9348080853ab637c4826a4aa293a2b4d60449c | [
"Apache-2.0",
"MIT"
] | 1 | 2020-05-22T09:01:31.000Z | 2020-05-22T09:47:18.000Z | Lib/fontTools/feaLib/parser.py | tomarcher101/fonttools | 0b9348080853ab637c4826a4aa293a2b4d60449c | [
"Apache-2.0",
"MIT"
] | null | null | null | from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer
from fontTools.misc.encodingTools import getEncoding
from fontTools.misc.py23 import *
import fontTools.feaLib.ast as ast
import logging
import os
import re
log = logging.getLogger(__name__)
class Parser(object):
"""Initializes a Parser object.
Example:
.. code:: python
from fontTools.feaLib.parser import Parser
parser = Parser(file, font.getReverseGlyphMap())
parsetree = parser.parse()
Note: the ``glyphNames`` iterable serves a double role to help distinguish
glyph names from ranges in the presence of hyphens and to ensure that glyph
names referenced in a feature file are actually part of a font's glyph set.
If the iterable is left empty, no glyph name in glyph set checking takes
place, and all glyph tokens containing hyphens are treated as literal glyph
names, not as ranges. (Adding a space around the hyphen can, in any case,
help to disambiguate ranges from glyph names containing hyphens.)
By default, the parser will follow ``include()`` statements in the feature
file. To turn this off, pass ``followIncludes=False``.
"""
extensions = {}
ast = ast
SS_FEATURE_TAGS = {"ss%02d" % i for i in range(1, 20+1)}
CV_FEATURE_TAGS = {"cv%02d" % i for i in range(1, 99+1)}
def __init__(self, featurefile, glyphNames=(), followIncludes=True,
**kwargs):
if "glyphMap" in kwargs:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead")
if glyphNames:
raise TypeError("'glyphNames' and (deprecated) 'glyphMap' are "
"mutually exclusive")
glyphNames = kwargs.pop("glyphMap")
if kwargs:
raise TypeError("unsupported keyword argument%s: %s"
% ("" if len(kwargs) == 1 else "s",
", ".join(repr(k) for k in kwargs)))
self.glyphNames_ = set(glyphNames)
self.doc_ = self.ast.FeatureFile()
self.anchors_ = SymbolTable()
self.glyphclasses_ = SymbolTable()
self.lookups_ = SymbolTable()
self.valuerecords_ = SymbolTable()
self.symbol_tables_ = {
self.anchors_, self.valuerecords_
}
self.next_token_type_, self.next_token_ = (None, None)
self.cur_comments_ = []
self.next_token_location_ = None
lexerClass = IncludingLexer if followIncludes else NonIncludingLexer
self.lexer_ = lexerClass(featurefile)
self.advance_lexer_(comments=True)
def parse(self):
"""Parse the file, and return a :class:`fontTools.feaLib.ast.FeatureFile`
object representing the root of the abstract syntax tree containing the
parsed contents of the file."""
statements = self.doc_.statements
while self.next_token_type_ is not None or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_,
location=self.cur_token_location_))
elif self.is_cur_keyword_("include"):
statements.append(self.parse_include_())
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_(("anon", "anonymous")):
statements.append(self.parse_anonymous_())
elif self.is_cur_keyword_("anchorDef"):
statements.append(self.parse_anchordef_())
elif self.is_cur_keyword_("languagesystem"):
statements.append(self.parse_languagesystem_())
elif self.is_cur_keyword_("lookup"):
statements.append(self.parse_lookup_(vertical=False))
elif self.is_cur_keyword_("markClass"):
statements.append(self.parse_markClass_())
elif self.is_cur_keyword_("feature"):
statements.append(self.parse_feature_block_())
elif self.is_cur_keyword_("table"):
statements.append(self.parse_table_())
elif self.is_cur_keyword_("valueRecordDef"):
statements.append(
self.parse_valuerecord_definition_(vertical=False))
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions:
statements.append(self.extensions[self.cur_token_](self))
elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected feature, languagesystem, lookup, markClass, "
"table, or glyph class definition, got {} \"{}\"".format(self.cur_token_type_, self.cur_token_),
self.cur_token_location_)
return self.doc_
def parse_anchor_(self):
# Parses an anchor in any of the four formats given in the feature
# file specification (2.e.vii).
self.expect_symbol_("<")
self.expect_keyword_("anchor")
location = self.cur_token_location_
if self.next_token_ == "NULL": # Format D
self.expect_keyword_("NULL")
self.expect_symbol_(">")
return None
if self.next_token_type_ == Lexer.NAME: # Format E
name = self.expect_name_()
anchordef = self.anchors_.resolve(name)
if anchordef is None:
raise FeatureLibError(
'Unknown anchor "%s"' % name,
self.cur_token_location_)
self.expect_symbol_(">")
return self.ast.Anchor(anchordef.x, anchordef.y,
name=name,
contourpoint=anchordef.contourpoint,
xDeviceTable=None, yDeviceTable=None,
location=location)
x, y = self.expect_number_(), self.expect_number_()
contourpoint = None
if self.next_token_ == "contourpoint": # Format B
self.expect_keyword_("contourpoint")
contourpoint = self.expect_number_()
if self.next_token_ == "<": # Format C
xDeviceTable = self.parse_device_()
yDeviceTable = self.parse_device_()
else:
xDeviceTable, yDeviceTable = None, None
self.expect_symbol_(">")
return self.ast.Anchor(x, y, name=None,
contourpoint=contourpoint,
xDeviceTable=xDeviceTable,
yDeviceTable=yDeviceTable,
location=location)
def parse_anchor_marks_(self):
# Parses a sequence of ``[<anchor> mark @MARKCLASS]*.``
anchorMarks = [] # [(self.ast.Anchor, markClassName)*]
while self.next_token_ == "<":
anchor = self.parse_anchor_()
if anchor is None and self.next_token_ != "mark":
continue # <anchor NULL> without mark, eg. in GPOS type 5
self.expect_keyword_("mark")
markClass = self.expect_markClass_reference_()
anchorMarks.append((anchor, markClass))
return anchorMarks
def parse_anchordef_(self):
# Parses a named anchor definition (`section 2.e.viii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.vii>`_).
assert self.is_cur_keyword_("anchorDef")
location = self.cur_token_location_
x, y = self.expect_number_(), self.expect_number_()
contourpoint = None
if self.next_token_ == "contourpoint":
self.expect_keyword_("contourpoint")
contourpoint = self.expect_number_()
name = self.expect_name_()
self.expect_symbol_(";")
anchordef = self.ast.AnchorDefinition(name, x, y,
contourpoint=contourpoint,
location=location)
self.anchors_.define(name, anchordef)
return anchordef
def parse_anonymous_(self):
# Parses an anonymous data block (`section 10 <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#10>`_).
assert self.is_cur_keyword_(("anon", "anonymous"))
tag = self.expect_tag_()
_, content, location = self.lexer_.scan_anonymous_block(tag)
self.advance_lexer_()
self.expect_symbol_('}')
end_tag = self.expect_tag_()
assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()"
self.expect_symbol_(';')
return self.ast.AnonymousBlock(tag, content, location=location)
def parse_attach_(self):
# Parses a GDEF Attach statement (`section 9.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.b>`_)
assert self.is_cur_keyword_("Attach")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
contourPoints = {self.expect_number_()}
while self.next_token_ != ";":
contourPoints.add(self.expect_number_())
self.expect_symbol_(";")
return self.ast.AttachStatement(glyphs, contourPoints,
location=location)
def parse_enumerate_(self, vertical):
# Parse an enumerated pair positioning rule (`section 6.b.ii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#6.b.ii>`_).
assert self.cur_token_ in {"enumerate", "enum"}
self.advance_lexer_()
return self.parse_position_(enumerated=True, vertical=vertical)
def parse_GlyphClassDef_(self):
# Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;'
assert self.is_cur_keyword_("GlyphClassDef")
location = self.cur_token_location_
if self.next_token_ != ",":
baseGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
baseGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ",":
ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
ligatureGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ",":
markGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
markGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ";":
componentGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
componentGlyphs = None
self.expect_symbol_(";")
return self.ast.GlyphClassDefStatement(baseGlyphs, markGlyphs,
ligatureGlyphs, componentGlyphs,
location=location)
def parse_glyphclass_definition_(self):
# Parses glyph class definitions such as '@UPPERCASE = [A-Z];'
location, name = self.cur_token_location_, self.cur_token_
self.expect_symbol_("=")
glyphs = self.parse_glyphclass_(accept_glyphname=False)
self.expect_symbol_(";")
glyphclass = self.ast.GlyphClassDefinition(name, glyphs,
location=location)
self.glyphclasses_.define(name, glyphclass)
return glyphclass
def split_glyph_range_(self, name, location):
# Since v1.20, the OpenType Feature File specification allows
# for dashes in glyph names. A sequence like "a-b-c-d" could
# therefore mean a single glyph whose name happens to be
# "a-b-c-d", or it could mean a range from glyph "a" to glyph
# "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a
# range from glyph "a-b-c" to glyph "d".Technically, this
# example could be resolved because the (pretty complex)
# definition of glyph ranges renders most of these splits
# invalid. But the specification does not say that a compiler
# should try to apply such fancy heuristics. To encourage
# unambiguous feature files, we therefore try all possible
# splits and reject the feature file if there are multiple
# splits possible. It is intentional that we don't just emit a
# warning; warnings tend to get ignored. To fix the problem,
# font designers can trivially add spaces around the intended
# split point, and we emit a compiler error that suggests
# how exactly the source should be rewritten to make things
# unambiguous.
parts = name.split("-")
solutions = []
for i in range(len(parts)):
start, limit = "-".join(parts[0:i]), "-".join(parts[i:])
if start in self.glyphNames_ and limit in self.glyphNames_:
solutions.append((start, limit))
if len(solutions) == 1:
start, limit = solutions[0]
return start, limit
elif len(solutions) == 0:
raise FeatureLibError(
"\"%s\" is not a glyph in the font, and it can not be split "
"into a range of known glyphs" % name, location)
else:
ranges = " or ".join(["\"%s - %s\"" % (s, l) for s, l in solutions])
raise FeatureLibError(
"Ambiguous glyph range \"%s\"; "
"please use %s to clarify what you mean" % (name, ranges),
location)
def parse_glyphclass_(self, accept_glyphname):
# Parses a glyph class, either named or anonymous, or (if
# ``bool(accept_glyphname)``) a glyph name.
if (accept_glyphname and
self.next_token_type_ in (Lexer.NAME, Lexer.CID)):
glyph = self.expect_glyph_()
self.check_glyph_name_in_glyph_set(glyph)
return self.ast.GlyphName(glyph, location=self.cur_token_location_)
if self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_)
if isinstance(gc, self.ast.MarkClass):
return self.ast.MarkClassName(
gc, location=self.cur_token_location_)
else:
return self.ast.GlyphClassName(
gc, location=self.cur_token_location_)
self.expect_symbol_("[")
location = self.cur_token_location_
glyphs = self.ast.GlyphClass(location=location)
while self.next_token_ != "]":
if self.next_token_type_ is Lexer.NAME:
glyph = self.expect_glyph_()
location = self.cur_token_location_
if '-' in glyph and self.glyphNames_ and glyph not in self.glyphNames_:
start, limit = self.split_glyph_range_(glyph, location)
self.check_glyph_name_in_glyph_set(start, limit)
glyphs.add_range(
start, limit,
self.make_glyph_range_(location, start, limit))
elif self.next_token_ == "-":
start = glyph
self.expect_symbol_("-")
limit = self.expect_glyph_()
self.check_glyph_name_in_glyph_set(start, limit)
glyphs.add_range(
start, limit,
self.make_glyph_range_(location, start, limit))
else:
if '-' in glyph and not self.glyphNames_:
log.warning(str(FeatureLibError(
f"Ambiguous glyph name that looks like a range: {glyph!r}",
location
)))
self.check_glyph_name_in_glyph_set(glyph)
glyphs.append(glyph)
elif self.next_token_type_ is Lexer.CID:
glyph = self.expect_glyph_()
if self.next_token_ == "-":
range_location = self.cur_token_location_
range_start = self.cur_token_
self.expect_symbol_("-")
range_end = self.expect_cid_()
self.check_glyph_name_in_glyph_set(
f"cid{range_start:05d}",
f"cid{range_end:05d}",
)
glyphs.add_cid_range(range_start, range_end,
self.make_cid_range_(range_location,
range_start, range_end))
else:
glyph_name = f"cid{self.cur_token_:05d}"
self.check_glyph_name_in_glyph_set(glyph_name)
glyphs.append(glyph_name)
elif self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_)
if isinstance(gc, self.ast.MarkClass):
gc = self.ast.MarkClassName(
gc, location=self.cur_token_location_)
else:
gc = self.ast.GlyphClassName(
gc, location=self.cur_token_location_)
glyphs.add_class(gc)
else:
raise FeatureLibError(
"Expected glyph name, glyph range, "
f"or glyph class reference, found {self.next_token_!r}",
self.next_token_location_)
self.expect_symbol_("]")
return glyphs
def parse_class_name_(self):
# Parses named class - either a glyph class or mark class.
name = self.expect_class_name_()
gc = self.glyphclasses_.resolve(name)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % name,
self.cur_token_location_)
if isinstance(gc, self.ast.MarkClass):
return self.ast.MarkClassName(
gc, location=self.cur_token_location_)
else:
return self.ast.GlyphClassName(
gc, location=self.cur_token_location_)
def parse_glyph_pattern_(self, vertical):
# Parses a glyph pattern, including lookups and context, e.g.::
#
# a b
# a b c' d e
# a b c' lookup ChangeC d e
prefix, glyphs, lookups, values, suffix = ([], [], [], [], [])
hasMarks = False
while self.next_token_ not in {"by", "from", ";", ","}:
gc = self.parse_glyphclass_(accept_glyphname=True)
marked = False
if self.next_token_ == "'":
self.expect_symbol_("'")
hasMarks = marked = True
if marked:
if suffix:
# makeotf also reports this as an error, while FontForge
# silently inserts ' in all the intervening glyphs.
# https://github.com/fonttools/fonttools/pull/1096
raise FeatureLibError(
"Unsupported contextual target sequence: at most "
"one run of marked (') glyph/class names allowed",
self.cur_token_location_)
glyphs.append(gc)
elif glyphs:
suffix.append(gc)
else:
prefix.append(gc)
if self.is_next_value_():
values.append(self.parse_valuerecord_(vertical))
else:
values.append(None)
lookuplist = None
while self.next_token_ == "lookup":
if lookuplist is None:
lookuplist = []
self.expect_keyword_("lookup")
if not marked:
raise FeatureLibError(
"Lookups can only follow marked glyphs",
self.cur_token_location_)
lookup_name = self.expect_name_()
lookup = self.lookups_.resolve(lookup_name)
if lookup is None:
raise FeatureLibError(
'Unknown lookup "%s"' % lookup_name,
self.cur_token_location_)
lookuplist.append(lookup)
if marked:
lookups.append(lookuplist)
if not glyphs and not suffix: # eg., "sub f f i by"
assert lookups == []
return ([], prefix, [None] * len(prefix), values, [], hasMarks)
else:
assert not any(values[:len(prefix)]), values
format1 = values[len(prefix):][:len(glyphs)]
format2 = values[(len(prefix) + len(glyphs)):][:len(suffix)]
values = format2 if format2 and isinstance(format2[0], self.ast.ValueRecord) else format1
return (prefix, glyphs, lookups, values, suffix, hasMarks)
def parse_chain_context_(self):
location = self.cur_token_location_
prefix, glyphs, lookups, values, suffix, hasMarks = \
self.parse_glyph_pattern_(vertical=False)
chainContext = [(prefix, glyphs, suffix)]
hasLookups = any(lookups)
while self.next_token_ == ",":
self.expect_symbol_(",")
prefix, glyphs, lookups, values, suffix, hasMarks = \
self.parse_glyph_pattern_(vertical=False)
chainContext.append((prefix, glyphs, suffix))
hasLookups = hasLookups or any(lookups)
self.expect_symbol_(";")
return chainContext, hasLookups
def parse_ignore_(self):
# Parses an ignore sub/pos rule.
assert self.is_cur_keyword_("ignore")
location = self.cur_token_location_
self.advance_lexer_()
if self.cur_token_ in ["substitute", "sub"]:
chainContext, hasLookups = self.parse_chain_context_()
if hasLookups:
raise FeatureLibError(
"No lookups can be specified for \"ignore sub\"",
location)
return self.ast.IgnoreSubstStatement(chainContext,
location=location)
if self.cur_token_ in ["position", "pos"]:
chainContext, hasLookups = self.parse_chain_context_()
if hasLookups:
raise FeatureLibError(
"No lookups can be specified for \"ignore pos\"",
location)
return self.ast.IgnorePosStatement(chainContext,
location=location)
raise FeatureLibError(
"Expected \"substitute\" or \"position\"",
self.cur_token_location_)
def parse_include_(self):
assert self.cur_token_ == "include"
location = self.cur_token_location_
filename = self.expect_filename_()
# self.expect_symbol_(";")
return ast.IncludeStatement(filename, location=location)
def parse_language_(self):
assert self.is_cur_keyword_("language")
location = self.cur_token_location_
language = self.expect_language_tag_()
include_default, required = (True, False)
if self.next_token_ in {"exclude_dflt", "include_dflt"}:
include_default = (self.expect_name_() == "include_dflt")
if self.next_token_ == "required":
self.expect_keyword_("required")
required = True
self.expect_symbol_(";")
return self.ast.LanguageStatement(language,
include_default, required,
location=location)
def parse_ligatureCaretByIndex_(self):
assert self.is_cur_keyword_("LigatureCaretByIndex")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
carets = [self.expect_number_()]
while self.next_token_ != ";":
carets.append(self.expect_number_())
self.expect_symbol_(";")
return self.ast.LigatureCaretByIndexStatement(glyphs, carets,
location=location)
def parse_ligatureCaretByPos_(self):
assert self.is_cur_keyword_("LigatureCaretByPos")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
carets = [self.expect_number_()]
while self.next_token_ != ";":
carets.append(self.expect_number_())
self.expect_symbol_(";")
return self.ast.LigatureCaretByPosStatement(glyphs, carets,
location=location)
def parse_lookup_(self, vertical):
# Parses a ``lookup`` - either a lookup block, or a lookup reference
# inside a feature.
assert self.is_cur_keyword_("lookup")
location, name = self.cur_token_location_, self.expect_name_()
if self.next_token_ == ";":
lookup = self.lookups_.resolve(name)
if lookup is None:
raise FeatureLibError("Unknown lookup \"%s\"" % name,
self.cur_token_location_)
self.expect_symbol_(";")
return self.ast.LookupReferenceStatement(lookup,
location=location)
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
block = self.ast.LookupBlock(name, use_extension, location=location)
self.parse_block_(block, vertical)
self.lookups_.define(name, block)
return block
def parse_lookupflag_(self):
# Parses a ``lookupflag`` statement, either specified by number or
# in words.
assert self.is_cur_keyword_("lookupflag")
location = self.cur_token_location_
# format B: "lookupflag 6;"
if self.next_token_type_ == Lexer.NUMBER:
value = self.expect_number_()
self.expect_symbol_(";")
return self.ast.LookupFlagStatement(value, location=location)
# format A: "lookupflag RightToLeft MarkAttachmentType @M;"
value_seen = False
value, markAttachment, markFilteringSet = 0, None, None
flags = {
"RightToLeft": 1, "IgnoreBaseGlyphs": 2,
"IgnoreLigatures": 4, "IgnoreMarks": 8
}
seen = set()
while self.next_token_ != ";":
if self.next_token_ in seen:
raise FeatureLibError(
"%s can be specified only once" % self.next_token_,
self.next_token_location_)
seen.add(self.next_token_)
if self.next_token_ == "MarkAttachmentType":
self.expect_keyword_("MarkAttachmentType")
markAttachment = self.parse_class_name_()
elif self.next_token_ == "UseMarkFilteringSet":
self.expect_keyword_("UseMarkFilteringSet")
markFilteringSet = self.parse_class_name_()
elif self.next_token_ in flags:
value_seen = True
value = value | flags[self.expect_name_()]
else:
raise FeatureLibError(
'"%s" is not a recognized lookupflag' % self.next_token_,
self.next_token_location_)
self.expect_symbol_(";")
if not any([value_seen, markAttachment, markFilteringSet]):
raise FeatureLibError(
'lookupflag must have a value', self.next_token_location_)
return self.ast.LookupFlagStatement(value,
markAttachment=markAttachment,
markFilteringSet=markFilteringSet,
location=location)
def parse_markClass_(self):
assert self.is_cur_keyword_("markClass")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
anchor = self.parse_anchor_()
name = self.expect_class_name_()
self.expect_symbol_(";")
markClass = self.doc_.markClasses.get(name)
if markClass is None:
markClass = self.ast.MarkClass(name)
self.doc_.markClasses[name] = markClass
self.glyphclasses_.define(name, markClass)
mcdef = self.ast.MarkClassDefinition(markClass, anchor, glyphs,
location=location)
markClass.addDefinition(mcdef)
return mcdef
def parse_position_(self, enumerated, vertical):
assert self.cur_token_ in {"position", "pos"}
if self.next_token_ == "cursive": # GPOS type 3
return self.parse_position_cursive_(enumerated, vertical)
elif self.next_token_ == "base": # GPOS type 4
return self.parse_position_base_(enumerated, vertical)
elif self.next_token_ == "ligature": # GPOS type 5
return self.parse_position_ligature_(enumerated, vertical)
elif self.next_token_ == "mark": # GPOS type 6
return self.parse_position_mark_(enumerated, vertical)
location = self.cur_token_location_
prefix, glyphs, lookups, values, suffix, hasMarks = \
self.parse_glyph_pattern_(vertical)
self.expect_symbol_(";")
if any(lookups):
# GPOS type 8: Chaining contextual positioning; explicit lookups
if any(values):
raise FeatureLibError(
"If \"lookup\" is present, no values must be specified",
location)
return self.ast.ChainContextPosStatement(
prefix, glyphs, suffix, lookups, location=location)
# Pair positioning, format A: "pos V 10 A -10;"
# Pair positioning, format B: "pos V A -20;"
if not prefix and not suffix and len(glyphs) == 2 and not hasMarks:
if values[0] is None: # Format B: "pos V A -20;"
values.reverse()
return self.ast.PairPosStatement(
glyphs[0], values[0], glyphs[1], values[1],
enumerated=enumerated,
location=location)
if enumerated:
raise FeatureLibError(
'"enumerate" is only allowed with pair positionings', location)
return self.ast.SinglePosStatement(list(zip(glyphs, values)),
prefix, suffix, forceChain=hasMarks,
location=location)
def parse_position_cursive_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("cursive")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
'cursive attachment positioning',
location)
glyphclass = self.parse_glyphclass_(accept_glyphname=True)
entryAnchor = self.parse_anchor_()
exitAnchor = self.parse_anchor_()
self.expect_symbol_(";")
return self.ast.CursivePosStatement(
glyphclass, entryAnchor, exitAnchor, location=location)
def parse_position_base_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("base")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
'mark-to-base attachment positioning',
location)
base = self.parse_glyphclass_(accept_glyphname=True)
marks = self.parse_anchor_marks_()
self.expect_symbol_(";")
return self.ast.MarkBasePosStatement(base, marks, location=location)
def parse_position_ligature_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("ligature")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
'mark-to-ligature attachment positioning',
location)
ligatures = self.parse_glyphclass_(accept_glyphname=True)
marks = [self.parse_anchor_marks_()]
while self.next_token_ == "ligComponent":
self.expect_keyword_("ligComponent")
marks.append(self.parse_anchor_marks_())
self.expect_symbol_(";")
return self.ast.MarkLigPosStatement(ligatures, marks, location=location)
def parse_position_mark_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("mark")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
'mark-to-mark attachment positioning',
location)
baseMarks = self.parse_glyphclass_(accept_glyphname=True)
marks = self.parse_anchor_marks_()
self.expect_symbol_(";")
return self.ast.MarkMarkPosStatement(baseMarks, marks,
location=location)
def parse_script_(self):
assert self.is_cur_keyword_("script")
location, script = self.cur_token_location_, self.expect_script_tag_()
self.expect_symbol_(";")
return self.ast.ScriptStatement(script, location=location)
def parse_substitute_(self):
assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"}
location = self.cur_token_location_
reverse = self.cur_token_ in {"reversesub", "rsub"}
old_prefix, old, lookups, values, old_suffix, hasMarks = \
self.parse_glyph_pattern_(vertical=False)
if any(values):
raise FeatureLibError(
"Substitution statements cannot contain values", location)
new = []
if self.next_token_ == "by":
keyword = self.expect_keyword_("by")
while self.next_token_ != ";":
gc = self.parse_glyphclass_(accept_glyphname=True)
new.append(gc)
elif self.next_token_ == "from":
keyword = self.expect_keyword_("from")
new = [self.parse_glyphclass_(accept_glyphname=False)]
else:
keyword = None
self.expect_symbol_(";")
if len(new) == 0 and not any(lookups):
raise FeatureLibError(
'Expected "by", "from" or explicit lookup references',
self.cur_token_location_)
# GSUB lookup type 3: Alternate substitution.
# Format: "substitute a from [a.1 a.2 a.3];"
if keyword == "from":
if reverse:
raise FeatureLibError(
'Reverse chaining substitutions do not support "from"',
location)
if len(old) != 1 or len(old[0].glyphSet()) != 1:
raise FeatureLibError(
'Expected a single glyph before "from"',
location)
if len(new) != 1:
raise FeatureLibError(
'Expected a single glyphclass after "from"',
location)
return self.ast.AlternateSubstStatement(
old_prefix, old[0], old_suffix, new[0], location=location)
num_lookups = len([l for l in lookups if l is not None])
# GSUB lookup type 1: Single substitution.
# Format A: "substitute a by a.sc;"
# Format B: "substitute [one.fitted one.oldstyle] by one;"
# Format C: "substitute [a-d] by [A.sc-D.sc];"
if (not reverse and len(old) == 1 and len(new) == 1 and
num_lookups == 0):
glyphs = list(old[0].glyphSet())
replacements = list(new[0].glyphSet())
if len(replacements) == 1:
replacements = replacements * len(glyphs)
if len(glyphs) != len(replacements):
raise FeatureLibError(
'Expected a glyph class with %d elements after "by", '
'but found a glyph class with %d elements' %
(len(glyphs), len(replacements)), location)
return self.ast.SingleSubstStatement(
old, new,
old_prefix, old_suffix,
forceChain=hasMarks,
location=location
)
# GSUB lookup type 2: Multiple substitution.
# Format: "substitute f_f_i by f f i;"
if (not reverse and
len(old) == 1 and len(old[0].glyphSet()) == 1 and
len(new) > 1 and max([len(n.glyphSet()) for n in new]) == 1 and
num_lookups == 0):
return self.ast.MultipleSubstStatement(
old_prefix, tuple(old[0].glyphSet())[0], old_suffix,
tuple([list(n.glyphSet())[0] for n in new]),
forceChain=hasMarks, location=location)
# GSUB lookup type 4: Ligature substitution.
# Format: "substitute f f i by f_f_i;"
if (not reverse and
len(old) > 1 and len(new) == 1 and
len(new[0].glyphSet()) == 1 and
num_lookups == 0):
return self.ast.LigatureSubstStatement(
old_prefix, old, old_suffix,
list(new[0].glyphSet())[0], forceChain=hasMarks,
location=location)
# GSUB lookup type 8: Reverse chaining substitution.
if reverse:
if len(old) != 1:
raise FeatureLibError(
"In reverse chaining single substitutions, "
"only a single glyph or glyph class can be replaced",
location)
if len(new) != 1:
raise FeatureLibError(
'In reverse chaining single substitutions, '
'the replacement (after "by") must be a single glyph '
'or glyph class', location)
if num_lookups != 0:
raise FeatureLibError(
"Reverse chaining substitutions cannot call named lookups",
location)
glyphs = sorted(list(old[0].glyphSet()))
replacements = sorted(list(new[0].glyphSet()))
if len(replacements) == 1:
replacements = replacements * len(glyphs)
if len(glyphs) != len(replacements):
raise FeatureLibError(
'Expected a glyph class with %d elements after "by", '
'but found a glyph class with %d elements' %
(len(glyphs), len(replacements)), location)
return self.ast.ReverseChainSingleSubstStatement(
old_prefix, old_suffix, old, new, location=location)
if len(old) > 1 and len(new) > 1:
raise FeatureLibError(
'Direct substitution of multiple glyphs by multiple glyphs '
'is not supported',
location)
# If there are remaining glyphs to parse, this is an invalid GSUB statement
if len(new) != 0:
raise FeatureLibError(
'Invalid substitution statement',
location
)
# GSUB lookup type 6: Chaining contextual substitution.
rule = self.ast.ChainContextSubstStatement(
old_prefix, old, old_suffix, lookups, location=location)
return rule
def parse_subtable_(self):
assert self.is_cur_keyword_("subtable")
location = self.cur_token_location_
self.expect_symbol_(";")
return self.ast.SubtableStatement(location=location)
def parse_size_parameters_(self):
# Parses a ``parameters`` statement used in ``size`` features. See
# `section 8.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.b>`_.
assert self.is_cur_keyword_("parameters")
location = self.cur_token_location_
DesignSize = self.expect_decipoint_()
SubfamilyID = self.expect_number_()
RangeStart = 0
RangeEnd = 0
if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or \
SubfamilyID != 0:
RangeStart = self.expect_decipoint_()
RangeEnd = self.expect_decipoint_()
self.expect_symbol_(";")
return self.ast.SizeParameters(DesignSize, SubfamilyID,
RangeStart, RangeEnd,
location=location)
def parse_size_menuname_(self):
assert self.is_cur_keyword_("sizemenuname")
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
return self.ast.FeatureNameStatement("size", platformID,
platEncID, langID, string,
location=location)
def parse_table_(self):
assert self.is_cur_keyword_("table")
location, name = self.cur_token_location_, self.expect_tag_()
table = self.ast.TableBlock(name, location=location)
self.expect_symbol_("{")
handler = {
"GDEF": self.parse_table_GDEF_,
"head": self.parse_table_head_,
"hhea": self.parse_table_hhea_,
"vhea": self.parse_table_vhea_,
"name": self.parse_table_name_,
"BASE": self.parse_table_BASE_,
"OS/2": self.parse_table_OS_2_,
}.get(name)
if handler:
handler(table)
else:
raise FeatureLibError('"table %s" is not supported' % name.strip(),
location)
self.expect_symbol_("}")
end_tag = self.expect_tag_()
if end_tag != name:
raise FeatureLibError('Expected "%s"' % name.strip(),
self.cur_token_location_)
self.expect_symbol_(";")
return table
def parse_table_GDEF_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.is_cur_keyword_("Attach"):
statements.append(self.parse_attach_())
elif self.is_cur_keyword_("GlyphClassDef"):
statements.append(self.parse_GlyphClassDef_())
elif self.is_cur_keyword_("LigatureCaretByIndex"):
statements.append(self.parse_ligatureCaretByIndex_())
elif self.is_cur_keyword_("LigatureCaretByPos"):
statements.append(self.parse_ligatureCaretByPos_())
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected Attach, LigatureCaretByIndex, "
"or LigatureCaretByPos",
self.cur_token_location_)
def parse_table_head_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.is_cur_keyword_("FontRevision"):
statements.append(self.parse_FontRevision_())
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError("Expected FontRevision",
self.cur_token_location_)
def parse_table_hhea_(self, table):
statements = table.statements
fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
key = self.cur_token_.lower()
value = self.expect_number_()
statements.append(
self.ast.HheaField(key, value,
location=self.cur_token_location_))
if self.next_token_ != ";":
raise FeatureLibError("Incomplete statement", self.next_token_location_)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError("Expected CaretOffset, Ascender, "
"Descender or LineGap",
self.cur_token_location_)
def parse_table_vhea_(self, table):
statements = table.statements
fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
key = self.cur_token_.lower()
value = self.expect_number_()
statements.append(
self.ast.VheaField(key, value,
location=self.cur_token_location_))
if self.next_token_ != ";":
raise FeatureLibError("Incomplete statement", self.next_token_location_)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError("Expected VertTypoAscender, "
"VertTypoDescender or VertTypoLineGap",
self.cur_token_location_)
def parse_table_name_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.is_cur_keyword_("nameid"):
statement = self.parse_nameid_()
if statement:
statements.append(statement)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError("Expected nameid",
self.cur_token_location_)
def parse_name_(self):
"""Parses a name record. See `section 9.e <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.e>`_."""
platEncID = None
langID = None
if self.next_token_type_ in Lexer.NUMBERS:
platformID = self.expect_any_number_()
location = self.cur_token_location_
if platformID not in (1, 3):
raise FeatureLibError("Expected platform id 1 or 3", location)
if self.next_token_type_ in Lexer.NUMBERS:
platEncID = self.expect_any_number_()
langID = self.expect_any_number_()
else:
platformID = 3
location = self.cur_token_location_
if platformID == 1: # Macintosh
platEncID = platEncID or 0 # Roman
langID = langID or 0 # English
else: # 3, Windows
platEncID = platEncID or 1 # Unicode
langID = langID or 0x0409 # English
string = self.expect_string_()
self.expect_symbol_(";")
encoding = getEncoding(platformID, platEncID, langID)
if encoding is None:
raise FeatureLibError("Unsupported encoding", location)
unescaped = self.unescape_string_(string, encoding)
return platformID, platEncID, langID, unescaped
def parse_nameid_(self):
assert self.cur_token_ == "nameid", self.cur_token_
location, nameID = self.cur_token_location_, self.expect_any_number_()
if nameID > 32767:
raise FeatureLibError("Name id value cannot be greater than 32767",
self.cur_token_location_)
if 1 <= nameID <= 6:
log.warning("Name id %d cannot be set from the feature file. "
"Ignoring record" % nameID)
self.parse_name_() # skip to the next record
return None
platformID, platEncID, langID, string = self.parse_name_()
return self.ast.NameRecord(nameID, platformID, platEncID,
langID, string, location=location)
def unescape_string_(self, string, encoding):
if encoding == "utf_16_be":
s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string)
else:
unescape = lambda m: self.unescape_byte_(m, encoding)
s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string)
# We now have a Unicode string, but it might contain surrogate pairs.
# We convert surrogates to actual Unicode by round-tripping through
# Python's UTF-16 codec in a special mode.
utf16 = tobytes(s, "utf_16_be", "surrogatepass")
return tounicode(utf16, "utf_16_be")
@staticmethod
def unescape_unichr_(match):
n = match.group(0)[1:]
return unichr(int(n, 16))
@staticmethod
def unescape_byte_(match, encoding):
n = match.group(0)[1:]
return bytechr(int(n, 16)).decode(encoding)
def parse_table_BASE_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.is_cur_keyword_("HorizAxis.BaseTagList"):
horiz_bases = self.parse_base_tag_list_()
elif self.is_cur_keyword_("HorizAxis.BaseScriptList"):
horiz_scripts = self.parse_base_script_list_(len(horiz_bases))
statements.append(
self.ast.BaseAxis(horiz_bases,
horiz_scripts, False,
location=self.cur_token_location_))
elif self.is_cur_keyword_("VertAxis.BaseTagList"):
vert_bases = self.parse_base_tag_list_()
elif self.is_cur_keyword_("VertAxis.BaseScriptList"):
vert_scripts = self.parse_base_script_list_(len(vert_bases))
statements.append(
self.ast.BaseAxis(vert_bases,
vert_scripts, True,
location=self.cur_token_location_))
elif self.cur_token_ == ";":
continue
def parse_table_OS_2_(self, table):
statements = table.statements
numbers = ("FSType", "TypoAscender", "TypoDescender", "TypoLineGap",
"winAscent", "winDescent", "XHeight", "CapHeight",
"WeightClass", "WidthClass", "LowerOpSize", "UpperOpSize")
ranges = ("UnicodeRange", "CodePageRange")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.cur_token_type_ is Lexer.NAME:
key = self.cur_token_.lower()
value = None
if self.cur_token_ in numbers:
value = self.expect_number_()
elif self.is_cur_keyword_("Panose"):
value = []
for i in range(10):
value.append(self.expect_number_())
elif self.cur_token_ in ranges:
value = []
while self.next_token_ != ";":
value.append(self.expect_number_())
elif self.is_cur_keyword_("Vendor"):
value = self.expect_string_()
statements.append(
self.ast.OS2Field(key, value,
location=self.cur_token_location_))
elif self.cur_token_ == ";":
continue
def parse_base_tag_list_(self):
# Parses BASE table entries. (See `section 9.a <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.a>`_)
assert self.cur_token_ in ("HorizAxis.BaseTagList",
"VertAxis.BaseTagList"), self.cur_token_
bases = []
while self.next_token_ != ";":
bases.append(self.expect_script_tag_())
self.expect_symbol_(";")
return bases
def parse_base_script_list_(self, count):
assert self.cur_token_ in ("HorizAxis.BaseScriptList",
"VertAxis.BaseScriptList"), self.cur_token_
scripts = [(self.parse_base_script_record_(count))]
while self.next_token_ == ",":
self.expect_symbol_(",")
scripts.append(self.parse_base_script_record_(count))
self.expect_symbol_(";")
return scripts
def parse_base_script_record_(self, count):
script_tag = self.expect_script_tag_()
base_tag = self.expect_script_tag_()
coords = [self.expect_number_() for i in range(count)]
return script_tag, base_tag, coords
def parse_device_(self):
result = None
self.expect_symbol_("<")
self.expect_keyword_("device")
if self.next_token_ == "NULL":
self.expect_keyword_("NULL")
else:
result = [(self.expect_number_(), self.expect_number_())]
while self.next_token_ == ",":
self.expect_symbol_(",")
result.append((self.expect_number_(), self.expect_number_()))
result = tuple(result) # make it hashable
self.expect_symbol_(">")
return result
def is_next_value_(self):
return self.next_token_type_ is Lexer.NUMBER or self.next_token_ == "<"
def parse_valuerecord_(self, vertical):
if self.next_token_type_ is Lexer.NUMBER:
number, location = self.expect_number_(), self.cur_token_location_
if vertical:
val = self.ast.ValueRecord(yAdvance=number,
vertical=vertical,
location=location)
else:
val = self.ast.ValueRecord(xAdvance=number,
vertical=vertical,
location=location)
return val
self.expect_symbol_("<")
location = self.cur_token_location_
if self.next_token_type_ is Lexer.NAME:
name = self.expect_name_()
if name == "NULL":
self.expect_symbol_(">")
return self.ast.ValueRecord()
vrd = self.valuerecords_.resolve(name)
if vrd is None:
raise FeatureLibError("Unknown valueRecordDef \"%s\"" % name,
self.cur_token_location_)
value = vrd.value
xPlacement, yPlacement = (value.xPlacement, value.yPlacement)
xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
else:
xPlacement, yPlacement, xAdvance, yAdvance = (
self.expect_number_(), self.expect_number_(),
self.expect_number_(), self.expect_number_())
if self.next_token_ == "<":
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (
self.parse_device_(), self.parse_device_(),
self.parse_device_(), self.parse_device_())
allDeltas = sorted([
delta
for size, delta
in (xPlaDevice if xPlaDevice else ()) +
(yPlaDevice if yPlaDevice else ()) +
(xAdvDevice if xAdvDevice else ()) +
(yAdvDevice if yAdvDevice else ())])
if allDeltas[0] < -128 or allDeltas[-1] > 127:
raise FeatureLibError(
"Device value out of valid range (-128..127)",
self.cur_token_location_)
else:
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (
None, None, None, None)
self.expect_symbol_(">")
return self.ast.ValueRecord(
xPlacement, yPlacement, xAdvance, yAdvance,
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice,
vertical=vertical, location=location)
def parse_valuerecord_definition_(self, vertical):
# Parses a named value record definition. (See section `2.e.v <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.v>`_)
assert self.is_cur_keyword_("valueRecordDef")
location = self.cur_token_location_
value = self.parse_valuerecord_(vertical)
name = self.expect_name_()
self.expect_symbol_(";")
vrd = self.ast.ValueRecordDefinition(name, value, location=location)
self.valuerecords_.define(name, vrd)
return vrd
def parse_languagesystem_(self):
assert self.cur_token_ == "languagesystem"
location = self.cur_token_location_
script = self.expect_script_tag_()
language = self.expect_language_tag_()
self.expect_symbol_(";")
return self.ast.LanguageSystemStatement(script, language,
location=location)
def parse_feature_block_(self):
assert self.cur_token_ == "feature"
location = self.cur_token_location_
tag = self.expect_tag_()
vertical = (tag in {"vkrn", "vpal", "vhal", "valt"})
stylisticset = None
cv_feature = None
size_feature = False
if tag in self.SS_FEATURE_TAGS:
stylisticset = tag
elif tag in self.CV_FEATURE_TAGS:
cv_feature = tag
elif tag == "size":
size_feature = True
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
block = self.ast.FeatureBlock(tag, use_extension=use_extension,
location=location)
self.parse_block_(block, vertical, stylisticset, size_feature,
cv_feature)
return block
def parse_feature_reference_(self):
assert self.cur_token_ == "feature", self.cur_token_
location = self.cur_token_location_
featureName = self.expect_tag_()
self.expect_symbol_(";")
return self.ast.FeatureReferenceStatement(featureName,
location=location)
def parse_featureNames_(self, tag):
"""Parses a ``featureNames`` statement found in stylistic set features.
See section `8.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.c>`_."""
assert self.cur_token_ == "featureNames", self.cur_token_
block = self.ast.NestedBlock(tag, self.cur_token_,
location=self.cur_token_location_)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
block.statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
block.statements.append(
self.ast.FeatureNameStatement(tag, platformID,
platEncID, langID, string,
location=location))
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError('Expected "name"',
self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvParameters_(self, tag):
# Parses a ``cvParameters`` block found in Character Variant features.
# See section `8.d <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.d>`_.
assert self.cur_token_ == "cvParameters", self.cur_token_
block = self.ast.NestedBlock(tag, self.cur_token_,
location=self.cur_token_location_)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
statements = block.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.is_cur_keyword_({"FeatUILabelNameID",
"FeatUITooltipTextNameID",
"SampleTextNameID",
"ParamUILabelNameID"}):
statements.append(self.parse_cvNameIDs_(tag, self.cur_token_))
elif self.is_cur_keyword_("Character"):
statements.append(self.parse_cvCharacter_(tag))
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected statement: got {} {}".format(
self.cur_token_type_, self.cur_token_),
self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvNameIDs_(self, tag, block_name):
assert self.cur_token_ == block_name, self.cur_token_
block = self.ast.NestedBlock(tag, block_name,
location=self.cur_token_location_)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
block.statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
block.statements.append(
self.ast.CVParametersNameStatement(
tag, platformID, platEncID, langID, string,
block_name, location=location))
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError('Expected "name"',
self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvCharacter_(self, tag):
assert self.cur_token_ == "Character", self.cur_token_
location, character = self.cur_token_location_, self.expect_any_number_()
self.expect_symbol_(";")
if not (0xFFFFFF >= character >= 0):
raise FeatureLibError("Character value must be between "
"{:#x} and {:#x}".format(0, 0xFFFFFF),
location)
return self.ast.CharacterStatement(character, tag, location=location)
def parse_FontRevision_(self):
# Parses a ``FontRevision`` statement found in the head table. See
# `section 9.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.c>`_.
assert self.cur_token_ == "FontRevision", self.cur_token_
location, version = self.cur_token_location_, self.expect_float_()
self.expect_symbol_(";")
if version <= 0:
raise FeatureLibError("Font revision numbers must be positive",
location)
return self.ast.FontRevisionStatement(version, location=location)
def parse_block_(self, block, vertical, stylisticset=None,
size_feature=False, cv_feature=None):
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
statements = block.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(
self.cur_token_, location=self.cur_token_location_))
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_("anchorDef"):
statements.append(self.parse_anchordef_())
elif self.is_cur_keyword_({"enum", "enumerate"}):
statements.append(self.parse_enumerate_(vertical=vertical))
elif self.is_cur_keyword_("feature"):
statements.append(self.parse_feature_reference_())
elif self.is_cur_keyword_("ignore"):
statements.append(self.parse_ignore_())
elif self.is_cur_keyword_("language"):
statements.append(self.parse_language_())
elif self.is_cur_keyword_("lookup"):
statements.append(self.parse_lookup_(vertical))
elif self.is_cur_keyword_("lookupflag"):
statements.append(self.parse_lookupflag_())
elif self.is_cur_keyword_("markClass"):
statements.append(self.parse_markClass_())
elif self.is_cur_keyword_({"pos", "position"}):
statements.append(
self.parse_position_(enumerated=False, vertical=vertical))
elif self.is_cur_keyword_("script"):
statements.append(self.parse_script_())
elif (self.is_cur_keyword_({"sub", "substitute",
"rsub", "reversesub"})):
statements.append(self.parse_substitute_())
elif self.is_cur_keyword_("subtable"):
statements.append(self.parse_subtable_())
elif self.is_cur_keyword_("valueRecordDef"):
statements.append(self.parse_valuerecord_definition_(vertical))
elif stylisticset and self.is_cur_keyword_("featureNames"):
statements.append(self.parse_featureNames_(stylisticset))
elif cv_feature and self.is_cur_keyword_("cvParameters"):
statements.append(self.parse_cvParameters_(cv_feature))
elif size_feature and self.is_cur_keyword_("parameters"):
statements.append(self.parse_size_parameters_())
elif size_feature and self.is_cur_keyword_("sizemenuname"):
statements.append(self.parse_size_menuname_())
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions:
statements.append(self.extensions[self.cur_token_](self))
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected glyph class definition or statement: got {} {}".format(self.cur_token_type_, self.cur_token_),
self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
name = self.expect_name_()
if name != block.name.strip():
raise FeatureLibError("Expected \"%s\"" % block.name.strip(),
self.cur_token_location_)
self.expect_symbol_(";")
# A multiple substitution may have a single destination, in which case
# it will look just like a single substitution. So if there are both
# multiple and single substitutions, upgrade all the single ones to
# multiple substitutions.
# Check if we have a mix of non-contextual singles and multiples.
has_single = False
has_multiple = False
for s in statements:
if isinstance(s, self.ast.SingleSubstStatement):
has_single = not any([s.prefix, s.suffix, s.forceChain])
elif isinstance(s, self.ast.MultipleSubstStatement):
has_multiple = not any([s.prefix, s.suffix, s.forceChain])
# Upgrade all single substitutions to multiple substitutions.
if has_single and has_multiple:
statements = []
for s in block.statements:
if isinstance(s, self.ast.SingleSubstStatement):
glyphs = s.glyphs[0].glyphSet()
replacements = s.replacements[0].glyphSet()
if len(replacements) == 1:
replacements *= len(glyphs)
for i, glyph in enumerate(glyphs):
statements.append(
self.ast.MultipleSubstStatement(
s.prefix, glyph, s.suffix, [replacements[i]],
s.forceChain, location=s.location))
else:
statements.append(s)
block.statements = statements
def is_cur_keyword_(self, k):
if self.cur_token_type_ is Lexer.NAME:
if isinstance(k, type("")): # basestring is gone in Python3
return self.cur_token_ == k
else:
return self.cur_token_ in k
return False
def expect_class_name_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.GLYPHCLASS:
raise FeatureLibError("Expected @NAME", self.cur_token_location_)
return self.cur_token_
def expect_cid_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.CID:
return self.cur_token_
raise FeatureLibError("Expected a CID", self.cur_token_location_)
def expect_filename_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.FILENAME:
raise FeatureLibError("Expected file name",
self.cur_token_location_)
return self.cur_token_
def expect_glyph_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
self.cur_token_ = self.cur_token_.lstrip("\\")
if len(self.cur_token_) > 63:
raise FeatureLibError(
"Glyph names must not be longer than 63 characters",
self.cur_token_location_)
return self.cur_token_
elif self.cur_token_type_ is Lexer.CID:
return "cid%05d" % self.cur_token_
raise FeatureLibError("Expected a glyph name or CID",
self.cur_token_location_)
def check_glyph_name_in_glyph_set(self, *names):
"""Raises if glyph name (just `start`) or glyph names of a
range (`start` and `end`) are not in the glyph set.
If no glyph set is present, does nothing.
"""
if self.glyphNames_:
missing = [name for name in names if name not in self.glyphNames_]
if missing:
raise FeatureLibError(
"The following glyph names are referenced but are missing from the "
f"glyph set: {', '.join(missing)}",
self.cur_token_location_
)
def expect_markClass_reference_(self):
name = self.expect_class_name_()
mc = self.glyphclasses_.resolve(name)
if mc is None:
raise FeatureLibError("Unknown markClass @%s" % name,
self.cur_token_location_)
if not isinstance(mc, self.ast.MarkClass):
raise FeatureLibError("@%s is not a markClass" % name,
self.cur_token_location_)
return mc
def expect_tag_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.NAME:
raise FeatureLibError("Expected a tag", self.cur_token_location_)
if len(self.cur_token_) > 4:
raise FeatureLibError("Tags can not be longer than 4 characters",
self.cur_token_location_)
return (self.cur_token_ + " ")[:4]
def expect_script_tag_(self):
tag = self.expect_tag_()
if tag == "dflt":
raise FeatureLibError(
'"dflt" is not a valid script tag; use "DFLT" instead',
self.cur_token_location_)
return tag
def expect_language_tag_(self):
tag = self.expect_tag_()
if tag == "DFLT":
raise FeatureLibError(
'"DFLT" is not a valid language tag; use "dflt" instead',
self.cur_token_location_)
return tag
def expect_symbol_(self, symbol):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol:
return symbol
raise FeatureLibError("Expected '%s'" % symbol,
self.cur_token_location_)
def expect_keyword_(self, keyword):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
return self.cur_token_
raise FeatureLibError("Expected \"%s\"" % keyword,
self.cur_token_location_)
def expect_name_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
return self.cur_token_
raise FeatureLibError("Expected a name", self.cur_token_location_)
def expect_number_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NUMBER:
return self.cur_token_
raise FeatureLibError("Expected a number", self.cur_token_location_)
def expect_any_number_(self):
self.advance_lexer_()
if self.cur_token_type_ in Lexer.NUMBERS:
return self.cur_token_
raise FeatureLibError("Expected a decimal, hexadecimal or octal number",
self.cur_token_location_)
def expect_float_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.FLOAT:
return self.cur_token_
raise FeatureLibError("Expected a floating-point number",
self.cur_token_location_)
def expect_decipoint_(self):
if self.next_token_type_ == Lexer.FLOAT:
return self.expect_float_()
elif self.next_token_type_ is Lexer.NUMBER:
return self.expect_number_() / 10
else:
raise FeatureLibError("Expected an integer or floating-point number",
self.cur_token_location_)
def expect_string_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.STRING:
return self.cur_token_
raise FeatureLibError("Expected a string", self.cur_token_location_)
def advance_lexer_(self, comments=False):
if comments and self.cur_comments_:
self.cur_token_type_ = Lexer.COMMENT
self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0)
return
else:
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
self.next_token_type_, self.next_token_, self.next_token_location_)
while True:
try:
(self.next_token_type_, self.next_token_,
self.next_token_location_) = next(self.lexer_)
except StopIteration:
self.next_token_type_, self.next_token_ = (None, None)
if self.next_token_type_ != Lexer.COMMENT:
break
self.cur_comments_.append((self.next_token_, self.next_token_location_))
@staticmethod
def reverse_string_(s):
"""'abc' --> 'cba'"""
return ''.join(reversed(list(s)))
def make_cid_range_(self, location, start, limit):
"""(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]"""
result = list()
if start > limit:
raise FeatureLibError(
"Bad range: start should be less than limit", location)
for cid in range(start, limit + 1):
result.append("cid%05d" % cid)
return result
def make_glyph_range_(self, location, start, limit):
"""(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]"""
result = list()
if len(start) != len(limit):
raise FeatureLibError(
"Bad range: \"%s\" and \"%s\" should have the same length" %
(start, limit), location)
rev = self.reverse_string_
prefix = os.path.commonprefix([start, limit])
suffix = rev(os.path.commonprefix([rev(start), rev(limit)]))
if len(suffix) > 0:
start_range = start[len(prefix):-len(suffix)]
limit_range = limit[len(prefix):-len(suffix)]
else:
start_range = start[len(prefix):]
limit_range = limit[len(prefix):]
if start_range >= limit_range:
raise FeatureLibError(
"Start of range must be smaller than its end",
location)
uppercase = re.compile(r'^[A-Z]$')
if uppercase.match(start_range) and uppercase.match(limit_range):
for c in range(ord(start_range), ord(limit_range) + 1):
result.append("%s%c%s" % (prefix, c, suffix))
return result
lowercase = re.compile(r'^[a-z]$')
if lowercase.match(start_range) and lowercase.match(limit_range):
for c in range(ord(start_range), ord(limit_range) + 1):
result.append("%s%c%s" % (prefix, c, suffix))
return result
digits = re.compile(r'^[0-9]{1,3}$')
if digits.match(start_range) and digits.match(limit_range):
for i in range(int(start_range, 10), int(limit_range, 10) + 1):
number = ("000" + str(i))[-len(start_range):]
result.append("%s%s%s" % (prefix, number, suffix))
return result
raise FeatureLibError("Bad range: \"%s-%s\"" % (start, limit),
location)
class SymbolTable(object):
def __init__(self):
self.scopes_ = [{}]
def enter_scope(self):
self.scopes_.append({})
def exit_scope(self):
self.scopes_.pop()
def define(self, name, item):
self.scopes_[-1][name] = item
def resolve(self, name):
for scope in reversed(self.scopes_):
item = scope.get(name)
if item:
return item
return None
| 45.223398 | 160 | 0.575613 |
7180c75b394d74566e586ce53a8223ddcc0fb8b4 | 23,943 | py | Python | .venv/lib/python2.7/site-packages/smmap/mman.py | aruneli/rancher-tests | f0ff5539420ac354fc951ed239b002cecde52505 | [
"Apache-2.0"
] | null | null | null | .venv/lib/python2.7/site-packages/smmap/mman.py | aruneli/rancher-tests | f0ff5539420ac354fc951ed239b002cecde52505 | [
"Apache-2.0"
] | 11 | 2017-01-22T11:05:33.000Z | 2017-01-22T11:14:48.000Z | .venv/lib/python2.7/site-packages/smmap/mman.py | aruneli/rancher-tests | f0ff5539420ac354fc951ed239b002cecde52505 | [
"Apache-2.0"
] | null | null | null | """Module containing a memory memory manager which provides a sliding window on a number of memory mapped files"""
from .util import (
MapWindow,
MapRegion,
MapRegionList,
is_64_bit,
string_types,
buffer,
)
import sys
from functools import reduce
__all__ = ["StaticWindowMapManager", "SlidingWindowMapManager", "WindowCursor"]
#{ Utilities
#}END utilities
class WindowCursor(object):
"""
Pointer into the mapped region of the memory manager, keeping the map
alive until it is destroyed and no other client uses it.
Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
**Note:**: The current implementation is suited for static and sliding window managers, but it also means
that it must be suited for the somewhat quite different sliding manager. It could be improved, but
I see no real need to do so."""
__slots__ = (
'_manager', # the manger keeping all file regions
'_rlist', # a regions list with regions for our file
'_region', # our current region or None
'_ofs', # relative offset from the actually mapped area to our start area
'_size' # maximum size we should provide
)
def __init__(self, manager=None, regions=None):
self._manager = manager
self._rlist = regions
self._region = None
self._ofs = 0
self._size = 0
def __del__(self):
self._destroy()
def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass
# END exception handling
# END handle regions
def _copy_from(self, rhs):
"""Copy all data from rhs into this instance, handles usage count"""
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count()
# END handle regions
def __copy__(self):
"""copy module interface"""
cpy = type(self)()
cpy._copy_from(self)
return cpy
#{ Interface
def assign(self, rhs):
"""Assign rhs to this instance. This is required in order to get a real copy.
Alternativly, you can copy an existing instance using the copy module"""
self._destroy()
self._copy_from(rhs)
def use_region(self, offset=0, size=0, flags=0):
"""Assure we point to a window which allows access to the given offset into the file
:param offset: absolute offset in bytes into the file
:param size: amount of bytes to map. If 0, all available bytes will be mapped
:param flags: additional flags to be given to os.open in case a file handle is initially opened
for mapping. Has no effect if a region can actually be reused.
:return: this instance - it should be queried for whether it points to a valid memory region.
This is not the case if the mapping failed because we reached the end of the file
**Note:**: The size actually mapped may be smaller than the given size. If that is the case,
either the file has reached its end, or the map was created between two existing regions"""
need_region = True
man = self._manager
fsize = self._rlist.file_size()
size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
if self._region is not None:
if self._region.includes_ofs(offset):
need_region = False
else:
self.unuse_region()
# END handle existing region
# END check existing region
# offset too large ?
if offset >= fsize:
return self
# END handle offset
if need_region:
self._region = man._obtain_region(self._rlist, offset, size, flags, False)
self._region.increment_client_count()
# END need region handling
self._ofs = offset - self._region._b
self._size = min(size, self._region.ofs_end() - offset)
return self
def unuse_region(self):
"""Unuse the current region. Does nothing if we have no current region
**Note:** the cursor unuses the region automatically upon destruction. It is recommended
to un-use the region once you are done reading from it in persistent cursors as it
helps to free up resource more quickly"""
if self._region is not None:
self._region.increment_client_count(-1)
self._region = None
# note: should reset ofs and size, but we spare that for performance. Its not
# allowed to query information if we are not valid !
def buffer(self):
"""Return a buffer object which allows access to our memory region from our offset
to the window size. Please note that it might be smaller than you requested when calling use_region()
**Note:** You can only obtain a buffer if this instance is_valid() !
**Note:** buffers should not be cached passed the duration of your access as it will
prevent resources from being freed even though they might not be accounted for anymore !"""
return buffer(self._region.buffer(), self._ofs, self._size)
def map(self):
"""
:return: the underlying raw memory map. Please not that the offset and size is likely to be different
to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
file in case of StaticWindowMapManager"""
return self._region.map()
def is_valid(self):
""":return: True if we have a valid and usable region"""
return self._region is not None
def is_associated(self):
""":return: True if we are associated with a specific file already"""
return self._rlist is not None
def ofs_begin(self):
""":return: offset to the first byte pointed to by our cursor
**Note:** only if is_valid() is True"""
return self._region._b + self._ofs
def ofs_end(self):
""":return: offset to one past the last available byte"""
# unroll method calls for performance !
return self._region._b + self._ofs + self._size
def size(self):
""":return: amount of bytes we point to"""
return self._size
def region(self):
""":return: our mapped region, or None if nothing is mapped yet
:raise AssertionError: if we have no current region. This is only useful for debugging"""
return self._region
def includes_ofs(self, ofs):
""":return: True if the given absolute offset is contained in the cursors
current region
**Note:** cursor must be valid for this to work"""
# unroll methods
return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
def file_size(self):
""":return: size of the underlying file"""
return self._rlist.file_size()
def path_or_fd(self):
""":return: path or file descriptor of the underlying mapped file"""
return self._rlist.path_or_fd()
def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd()
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd()
#} END interface
class StaticWindowMapManager(object):
"""Provides a manager which will produce single size cursors that are allowed
to always map the whole file.
Clients must be written to specifically know that they are accessing their data
through a StaticWindowMapManager, as they otherwise have to deal with their window size.
These clients would have to use a SlidingWindowMapBuffer to hide this fact.
This type will always use a maximum window size, and optimize certain methods to
accommodate this fact"""
__slots__ = [
'_fdict', # mapping of path -> StorageHelper (of some kind
'_window_size', # maximum size of a window
'_max_memory_size', # maximum amount of memory we may allocate
'_max_handle_count', # maximum amount of handles to keep open
'_memory_size', # currently allocated memory size
'_handle_count', # amount of currently allocated file handles
]
#{ Configuration
MapRegionListCls = MapRegionList
MapWindowCls = MapWindow
MapRegionCls = MapRegion
WindowCursorCls = WindowCursor
#} END configuration
_MB_in_bytes = 1024 * 1024
def __init__(self, window_size=0, max_memory_size=0, max_open_handles=sys.maxsize):
"""initialize the manager with the given parameters.
:param window_size: if -1, a default window size will be chosen depending on
the operating system's architecture. It will internally be quantified to a multiple of the page size
If 0, the window may have any size, which basically results in mapping the whole file at one
:param max_memory_size: maximum amount of memory we may map at once before releasing mapped regions.
If 0, a viable default will be set depending on the system's architecture.
It is a soft limit that is tried to be kept, but nothing bad happens if we have to over-allocate
:param max_open_handles: if not maxint, limit the amount of open file handles to the given number.
Otherwise the amount is only limited by the system itself. If a system or soft limit is hit,
the manager will free as many handles as possible"""
self._fdict = dict()
self._window_size = window_size
self._max_memory_size = max_memory_size
self._max_handle_count = max_open_handles
self._memory_size = 0
self._handle_count = 0
if window_size < 0:
coeff = 64
if is_64_bit():
coeff = 1024
# END handle arch
self._window_size = coeff * self._MB_in_bytes
# END handle max window size
if max_memory_size == 0:
coeff = 1024
if is_64_bit():
coeff = 8192
# END handle arch
self._max_memory_size = coeff * self._MB_in_bytes
# END handle max memory size
#{ Internal Methods
def _collect_lru_region(self, size):
"""Unmap the region which was least-recently used and has no client
:param size: size of the region we want to map next (assuming its not already mapped partially or full
if 0, we try to free any available region
:return: Amount of freed regions
**Note:** We don't raise exceptions anymore, in order to keep the system working, allowing temporary overallocation.
If the system runs out of memory, it will tell.
**todo:** implement a case where all unusued regions are discarded efficiently. Currently its only brute force"""
num_found = 0
while (size == 0) or (self._memory_size + size > self._max_memory_size):
lru_region = None
lru_list = None
for regions in self._fdict.values():
for region in regions:
# check client count - if it's 1, it's just us
if (region.client_count() == 1 and
(lru_region is None or region._uc < lru_region._uc)):
lru_region = region
lru_list = regions
# END update lru_region
# END for each region
# END for each regions list
if lru_region is None:
break
# END handle region not found
num_found += 1
del(lru_list[lru_list.index(lru_region)])
lru_region.increment_client_count(-1)
self._memory_size -= lru_region.size()
self._handle_count -= 1
# END while there is more memory to free
return num_found
def _obtain_region(self, a, offset, size, flags, is_recursive):
"""Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region"""
if self._memory_size + size > self._max_memory_size:
self._collect_lru_region(size)
# END handle collection
r = None
if a:
assert len(a) == 1
r = a[0]
else:
try:
r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.append(r)
# END handle array
assert r.includes_ofs(offset)
return r
#}END internal methods
#{ Interface
def make_cursor(self, path_or_fd):
"""
:return: a cursor pointing to the given path or file descriptor.
It can be used to map new regions of the file into memory
**Note:** if a file descriptor is given, it is assumed to be open and valid,
but may be closed afterwards. To refer to the same file, you may reuse
your existing file descriptor, but keep in mind that new windows can only
be mapped as long as it stays valid. This is why the using actual file paths
are preferred unless you plan to keep the file descriptor open.
**Note:** file descriptors are problematic as they are not necessarily unique, as two
different files opened and closed in succession might have the same file descriptor id.
**Note:** Using file descriptors directly is faster once new windows are mapped as it
prevents the file to be opened again just for the purpose of mapping it."""
regions = self._fdict.get(path_or_fd)
if regions is None:
regions = self.MapRegionListCls(path_or_fd)
self._fdict[path_or_fd] = regions
# END obtain region for path
return self.WindowCursorCls(self, regions)
def collect(self):
"""Collect all available free-to-collect mapped regions
:return: Amount of freed handles"""
return self._collect_lru_region(0)
def num_file_handles(self):
""":return: amount of file handles in use. Each mapped region uses one file handle"""
return self._handle_count
def num_open_files(self):
"""Amount of opened files in the system"""
return reduce(lambda x, y: x + y, (1 for rlist in self._fdict.values() if len(rlist) > 0), 0)
def window_size(self):
""":return: size of each window when allocating new regions"""
return self._window_size
def mapped_memory_size(self):
""":return: amount of bytes currently mapped in total"""
return self._memory_size
def max_file_handles(self):
""":return: maximium amount of handles we may have opened"""
return self._max_handle_count
def max_mapped_memory_size(self):
""":return: maximum amount of memory we may allocate"""
return self._max_memory_size
#} END interface
#{ Special Purpose Interface
def force_map_handle_removal_win(self, base_path):
"""ONLY AVAILABLE ON WINDOWS
On windows removing files is not allowed if anybody still has it opened.
If this process is ourselves, and if the whole process uses this memory
manager (as far as the parent framework is concerned) we can enforce
closing all memory maps whose path matches the given base path to
allow the respective operation after all.
The respective system must NOT access the closed memory regions anymore !
This really may only be used if you know that the items which keep
the cursors alive will not be using it anymore. They need to be recreated !
:return: Amount of closed handles
**Note:** does nothing on non-windows platforms"""
if sys.platform != 'win32':
return
# END early bailout
num_closed = 0
for path, rlist in self._fdict.items():
if path.startswith(base_path):
for region in rlist:
region.release()
num_closed += 1
# END path matches
# END for each path
return num_closed
#} END special purpose interface
class SlidingWindowMapManager(StaticWindowMapManager):
"""Maintains a list of ranges of mapped memory regions in one or more files and allows to easily
obtain additional regions assuring there is no overlap.
Once a certain memory limit is reached globally, or if there cannot be more open file handles
which result from each mmap call, the least recently used, and currently unused mapped regions
are unloaded automatically.
**Note:** currently not thread-safe !
**Note:** in the current implementation, we will automatically unload windows if we either cannot
create more memory maps (as the open file handles limit is hit) or if we have allocated more than
a safe amount of memory already, which would possibly cause memory allocations to fail as our address
space is full."""
__slots__ = tuple()
def __init__(self, window_size=-1, max_memory_size=0, max_open_handles=sys.maxsize):
"""Adjusts the default window size to -1"""
super(SlidingWindowMapManager, self).__init__(window_size, max_memory_size, max_open_handles)
def _obtain_region(self, a, offset, size, flags, is_recursive):
# bisect to find an existing region. The c++ implementation cannot
# do that as it uses a linked list for regions.
r = None
lo = 0
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
ofs = a[mid]._b
if ofs <= offset:
if a[mid].includes_ofs(offset):
r = a[mid]
break
# END have region
lo = mid + 1
else:
hi = mid
# END handle position
# END while bisecting
if r is None:
window_size = self._window_size
left = self.MapWindowCls(0, 0)
mid = self.MapWindowCls(offset, size)
right = self.MapWindowCls(a.file_size(), 0)
# we want to honor the max memory size, and assure we have anough
# memory available
# Save calls !
if self._memory_size + window_size > self._max_memory_size:
self._collect_lru_region(window_size)
# END handle collection
# we assume the list remains sorted by offset
insert_pos = 0
len_regions = len(a)
if len_regions == 1:
if a[0]._b <= offset:
insert_pos = 1
# END maintain sort
else:
# find insert position
insert_pos = len_regions
for i, region in enumerate(a):
if region._b > offset:
insert_pos = i
break
# END if insert position is correct
# END for each region
# END obtain insert pos
# adjust the actual offset and size values to create the largest
# possible mapping
if insert_pos == 0:
if len_regions:
right = self.MapWindowCls.from_region(a[insert_pos])
# END adjust right side
else:
if insert_pos != len_regions:
right = self.MapWindowCls.from_region(a[insert_pos])
# END adjust right window
left = self.MapWindowCls.from_region(a[insert_pos - 1])
# END adjust surrounding windows
mid.extend_left_to(left, window_size)
mid.extend_right_to(right, window_size)
mid.align()
# it can happen that we align beyond the end of the file
if mid.ofs_end() > right.ofs:
mid.size = right.ofs - mid.ofs
# END readjust size
# insert new region at the right offset to keep the order
try:
if self._handle_count >= self._max_handle_count:
raise Exception
# END assert own imposed max file handles
r = self.MapRegionCls(a.path_or_fd(), mid.ofs, mid.size, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.insert(insert_pos, r)
# END create new region
return r
| 41.209983 | 124 | 0.618302 |
2b25616364b32726b07c55ee114a1319f7fac20e | 92,633 | py | Python | src/pudl/transform/eia861.py | kevinsung/pudl | fe2086a12282b84523883a474bad9905a4b662e5 | [
"MIT"
] | 285 | 2017-05-26T02:42:04.000Z | 2022-03-25T09:06:11.000Z | src/pudl/transform/eia861.py | kevinsung/pudl | fe2086a12282b84523883a474bad9905a4b662e5 | [
"MIT"
] | 1,398 | 2017-05-27T15:46:08.000Z | 2022-03-31T19:50:07.000Z | src/pudl/transform/eia861.py | kevinsung/pudl | fe2086a12282b84523883a474bad9905a4b662e5 | [
"MIT"
] | 76 | 2017-06-22T17:31:28.000Z | 2022-01-23T22:17:57.000Z | """Module to perform data cleaning functions on EIA861 data tables.
All transformations include:
- Replace . values with NA.
"""
import logging
import pandas as pd
import pudl
from pudl import constants as pc
logger = logging.getLogger(__name__)
BA_ID_NAME_FIXES = (
pd.DataFrame([
# report_date, util_id, ba_id, ba_name
('2001-01-01', 40577, 99999, 'Multiple Control Areas'),
('2002-01-01', 40577, 99999, 'Multiple Control Areas'),
('2002-01-01', 2759, 13781, 'Xcel Energy'),
('2002-01-01', 1004, 40604, 'Heartland Consumer Power Dist.'),
('2002-01-01', 5659, 20847, 'Wisconsin Electric Power'),
('2002-01-01', 5588, 9417, 'Interstate Power & Light'),
('2002-01-01', 6112, 9417, 'INTERSTATE POWER & LIGHT'),
('2002-01-01', 6138, 13781, 'Xcel Energy'),
('2002-01-01', 6276, pd.NA, 'Vectren Energy Delivery'),
('2002-01-01', 6501, 9417, 'Interstate Power and Light'),
('2002-01-01', 6579, 4716, 'Dairyland Power Coop'),
('2002-01-01', 6848, pd.NA, pd.NA),
('2002-01-01', 7140, 18195, 'Southern Co Services Inc'),
('2002-01-01', 7257, 22500, 'Westar Energy'),
('2002-01-01', 7444, 14232, 'Minnkota Power Cooperative'),
('2002-01-01', 8490, 22500, 'Westar'),
('2002-01-01', 8632, 12825, 'NorthWestern Energy'),
('2002-01-01', 8770, 22500, 'Westar Energy'),
('2002-01-01', 8796, 13434, 'ISO New England'),
('2002-01-01', 9699, pd.NA, 'Tri-State G&T'),
('2002-01-01', 10040, 13781, 'Xcel Energy'),
('2002-01-01', 10171, 56669, 'Midwest Indep System Operator'),
('2002-01-01', 11053, 9417, 'INTERSTATE POWER & LIGHT'),
('2002-01-01', 11148, 2775, 'California ISO'),
('2002-01-01', 11522, 1, 'Maritimes-Canada'),
('2002-01-01', 11731, 13781, 'XCEL Energy'),
('2002-01-01', 11788, 9417, 'Interstate Power & Light'),
('2002-01-01', 12301, 14232, 'Minnkota Power Cooperative'),
('2002-01-01', 12698, 20391, 'Aquila Networks - MPS'),
('2002-01-01', 12706, 18195, 'Southern Co Services Inc'),
('2002-01-01', 3258, 9417, 'Interstate Power & Light'),
('2002-01-01', 3273, 15473, 'Public Regulatory Commission'),
('2002-01-01', 3722, 9417, 'Interstate Power and Light'),
('2002-01-01', 1417, 12825, 'NorthWestern Energy'),
('2002-01-01', 1683, 12825, 'Northwestern Energy'),
('2002-01-01', 1890, 5416, 'Duke Energy Corporation'),
('2002-01-01', 4319, 20447, 'Okla. Municipal Pwr. Authority'),
('2002-01-01', 18446, 9417, 'Interstate Power and Light'),
('2002-01-01', 19108, pd.NA, 'NC Rural Electrification Auth.'),
('2002-01-01', 19545, 28503, 'Western Area Power Admin'),
('2002-01-01', 12803, 18195, 'Southern Illinois Power'),
('2002-01-01', 13382, 8283, 'Harrison County Rural Electric'),
('2002-01-01', 13423, 829, 'Town of New Carlisle'),
('2002-01-01', 13815, 13781, 'Xcel Energy'),
('2002-01-01', 14649, 18195, 'GSOC (Georgia System Operation'),
('2002-01-01', 15672, 924, 'Associated Electric Coop Inc'),
('2002-01-01', 16023, 9417, 'Interstate Power and Light'),
('2002-01-01', 16463, pd.NA, 'Central Louisiana Electric Co.'),
('2002-01-01', 16922, 22500, 'Westar Energy'),
('2002-01-01', 16992, 9417, 'Interstate Power and Light'),
('2002-01-01', 17643, 924, 'Associated Electric Coop Inc'),
('2002-01-01', 17706, 9417, 'Interstate Power & Light'),
('2002-01-01', 20811, 19876, 'Dominion NC Power'),
('2002-01-01', 3227, 15466, 'Xcel Energy'),
('2002-01-01', 20227, 14063, 'OG&E'),
('2002-01-01', 17787, 13337, 'Mun. Energy Agcy of Nebraska'),
('2002-01-01', 19264, 17718, 'Excel Energy'),
('2002-01-01', 11701, 19578, 'We Energies'),
('2002-01-01', 28802, 14725, 'PJM Interconnection'),
('2002-01-01', 20546, 1692, 'Big Rivers Electric Corp.'),
('2002-01-01', 6223, 1, 'Maritimes-Canada'),
('2002-01-01', 14405, 19876, 'VA Power'),
('2002-01-01', 14405, 14725, 'PJM'),
('2002-01-01', 12698, 20391, 'Aquila Networks - L&P'),
('2002-01-01', 16267, 12698, 'Aquila'),
('2002-01-01', 15871, 5723, 'ERC of Texas'),
('2002-01-01', 6753, 28503, 'Regional Office'),
('2002-01-01', 5571, 14328, 'Pacific Gas and Electric Co.'),
('2002-01-01', 367, pd.NA, 'Western Area Power Admin'),
('2002-01-01', 3247, 13501, 'NYISO'),
('2002-01-01', 11014, 5723, 'Ercot'),
('2002-01-01', 20845, 12427, 'Michigan Power Pool 12427'),
('2002-01-01', 17267, pd.NA, 'Watertown, SD'),
('2002-01-01', 12811, pd.NA, 'First Energy Corp.'),
('2002-01-01', 17368, 13501, 'NYISO'),
('2002-01-01', 5877, 13501, 'NYISO'),
('2002-01-01', 3240, pd.NA, 'Pacific NW Generating Cooperat'),
('2002-01-01', 3037, pd.NA, 'Trans Electric'),
('2002-01-01', 12199, 28503, 'WAPA-Rocky Mountain'),
('2002-01-01', 8936, 14378, 'Pacificorp'),
('2002-01-01', 40604, pd.NA, 'Watertown, SD Office'),
('2002-01-01', 19108, pd.NA, 'USDA- Rural Utility Service'),
('2002-01-01', 8199, 20391, 'Aquila'),
('2002-01-01', 12698, 20391, 'Aquila Networks - WPC'),
('2002-01-01', 12698, 20391, 'Aquila Networks - WPK'),
('2002-01-01', 20387, 14725, 'PJM West'),
('2002-01-01', 588, 20447, 'Western Farmers Elec Coop Inc'),
('2002-01-01', 17561, 5723, 'ERCOT ISO'),
('2002-01-01', 17320, 13781, 'Xcel Energy'),
('2002-01-01', 13676, 17716, 'Southwestern Power Admin.'),
('2002-01-01', 5703, 13501, 'NTISO'),
('2002-01-01', 113, 13501, 'NYISO'),
('2002-01-01', 4486, pd.NA, 'REMC of Western Indiana'),
('2002-01-01', 1039, 13501, 'NYISO'),
('2002-01-01', 5609, pd.NA, 'NMISA'),
('2002-01-01', 3989, pd.NA, 'WAPA'),
('2002-01-01', 13539, 13501, 'NY Independent System Operator'),
('2002-01-01', 15263, 14725, 'PJM West'),
('2002-01-01', 12796, 14725, 'PJM West'),
('2002-01-01', 3539, 13434, 'ISO New England'),
('2002-01-01', 3575, 13434, 'ISO New England'),
('2002-01-01', 3559, 13434, 'ISO New England'),
('2002-01-01', 18193, pd.NA, pd.NA),
('2002-01-01', 838, 3413, 'Chelan PUD'),
('2002-01-01', 1049, 1738, 'Bonneville'),
('2002-01-01', 9248, 14725, 'PJM'),
('2002-01-01', 15026, 803, 'APS Control Area'),
('2002-01-01', 798, 16572, 'Salt River Project'),
('2002-01-01', 5603, 13501, 'ISO - NEW YORK'),
('2002-01-01', 12260, 19876, 'Dominion Virginia Power'),
('2002-01-01', 14788, 17716, 'Southwest Power Administration'),
('2002-01-01', 12909, 22500, 'Westar Energy'),
('2002-01-01', 5605, 9417, 'Interstate Power and Light'),
('2002-01-01', 10908, 9417, 'Interstate Power and Light'),
('2003-01-01', 3258, 9417, 'Interstate Power & Light'),
('2003-01-01', 6501, 9417, 'Interstate Power & Light'),
('2003-01-01', 10650, 9417, 'Interstate Power & Light'),
('2003-01-01', 16992, 9417, 'Interstate Power & Light'),
('2003-01-01', 3722, 9417, 'Interstate Power & Light'),
('2003-01-01', 11788, 9417, 'Interstate Power & Light'),
('2003-01-01', 5588, 9417, 'Interstate Power & Light'),
('2003-01-01', 11053, 9417, 'Interstate Power & Light'),
('2003-01-01', 16023, 9417, 'Interstate Power & Light'),
('2003-01-01', 17706, 9417, 'Interstate Power & Light'),
('2003-01-01', 18446, 9417, 'Interstate Power & Light'),
('2004-01-01', 5309, 18195, 'Southern Company Services Inc'),
('2004-01-01', 192, 192, 'Ryant T. Rose'),
('2004-01-01', 6501, 9417, 'Interstate Power & Light'),
('2004-01-01', 16992, 9417, 'Interstate Power & Light'),
('2004-01-01', 8192, 14725, 'PJM-West'),
('2004-01-01', 192, 192, 'Phillip K. Peter, Sr.'),
('2004-01-01', 192, 192, 'Nelson Kinegak'),
('2004-01-01', 1004, 40604, 'Heartland Consumer Power Dist.'),
('2004-01-01', 3258, 9417, 'Interstate Power & Light'),
('2004-01-01', 3722, 9417, 'Interstate Power & Light'),
('2004-01-01', 19879, pd.NA, 'Kevin Smalls St Croix Districe'),
('2004-01-01', 11788, 9417, 'Interstate Power & Light'),
('2004-01-01', 4191, 13434, 'NEISO'),
('2004-01-01', 10650, 9417, 'Interstate Power & Light'),
('2004-01-01', 11053, 9417, 'Interstate Power & Light'),
('2004-01-01', 18446, 9417, 'Interstate Power & Light'),
('2004-01-01', 27000, pd.NA, 'Multiple Operators'),
('2004-01-01', 19879, pd.NA, 'Corey Hodge - St Thomass/St Jo'),
('2004-01-01', 13382, 8283, 'Harrison County Rural Electric'),
('2004-01-01', 10784, pd.NA, 'Hawkeye Tri-county REC'),
('2004-01-01', 16922, pd.NA, 'The Brown Atchison Electric Co'),
('2004-01-01', 15026, 803, 'APS Control Area'),
('2005-01-01', 192, 192, 'Ryant T. Rose'),
('2005-01-01', 192, 192, 'Phillip K. Peter, Sr.'),
('2005-01-01', 192, 182, 'Nelson Kinegak'),
('2005-01-01', 3258, 9417, 'Interstate Power & Light'),
('2005-01-01', 1004, 40604, 'Heartland Consumer Power Dist.'),
('2005-01-01', 5309, 18195, 'Southern Company Services Inc'),
('2005-01-01', 6501, 9417, 'Interstate Power & Light'),
('2005-01-01', 10623, 6455, 'Florida Power Corp'),
('2005-01-01', 10650, 9417, 'Interstate Power & Light'),
('2005-01-01', 13382, 8283, 'Harrison County Rural Electric'),
('2005-01-01', 16922, pd.NA, 'The Brown Atchison Electric Co'),
('2005-01-01', 3722, 9417, 'Interstate Power & Light'),
('2005-01-01', 4191, 13434, 'NEISO'),
('2005-01-01', 11788, 9417, 'Interstate Power & Light'),
('2005-01-01', 8192, 14725, 'PJM-West'),
('2005-01-01', 11053, 9417, 'Interstate Power & Light'),
('2005-01-01', 13815, 13781, 'Northern States Power Co'),
('2005-01-01', 15026, 803, 'APS Control Area'),
('2005-01-01', 18446, 9417, 'Interstate Power & Light'),
('2005-01-01', 19879, pd.NA, 'Kevin Smalls St Croix Districe'),
('2005-01-01', 19879, pd.NA, 'Corey Hodge - St Thomass/St Jo'),
('2005-01-01', 27000, pd.NA, 'Multiple Operators'),
('2005-01-01', 10610, 13501, 'ISO New York'),
('2006-01-01', 10610, 13501, 'ISO New York'),
('2008-01-01', 10610, 13501, 'ISO New York'),
('2009-01-01', 10610, 13501, 'ISO New York'),
('2010-01-01', 6389, 3755, 'Cleveland Electric Illum Co'),
('2010-01-01', 6389, 13998, 'Ohio Edison Co'),
('2010-01-01', 6389, 18997, 'Toledo Edison Co'),
('2010-01-01', 6949, 10000, 'Kansas City Power & Light Co'),
('2010-01-01', 14127, 14127, 'Omaha Public Power District'),
('2010-01-01', 11196, 13434, 'ISO New England'),
('2010-01-01', 97, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 3258, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 3405, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 3755, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 7292, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 8847, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 11701, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 13032, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 13998, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 14716, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 17141, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 18997, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 21249, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 40582, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 54862, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 56162, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 56496, 56669, 'Midwest Independent System Operator'),
('2010-01-01', 10610, 13501, 'ISO New York'),
('2011-01-01', 1968, 56669, 'Midwest Independent System Operator'),
('2011-01-01', 20806, 56669, 'Midwest Independent System Operator'),
('2011-01-01', 29296, 56669, 'Midwest Independent System Operator'),
('2012-01-01', 1968, 56669, 'Midwest Independent System Operator'),
('2012-01-01', 20806, 56669, 'Midwest Independent System Operator'),
('2012-01-01', 29296, 56669, 'Midwest Independent System Operator'),
], columns=[
"report_date", # We have this
"utility_id_eia", # We have this
"balancing_authority_id_eia", # We need to set this
"balancing_authority_name_eia", # We have this
])
.assign(report_date=lambda x: pd.to_datetime(x.report_date))
.astype(pudl.helpers.get_pudl_dtypes({
"utility_id_eia": "eia",
"balancing_authority_id_eia": "eia",
"balancing_authority_name_eia": "eia",
}))
.dropna(subset=["report_date", "balancing_authority_name_eia", "utility_id_eia"])
.set_index(["report_date", "balancing_authority_name_eia", "utility_id_eia"])
)
EIA_FIPS_COUNTY_FIXES = pd.DataFrame([
("AK", "Aleutians Ea", "Aleutians East"),
("AK", "Aleutian Islands", "Aleutians East"),
("AK", "Aleutians East Boro", "Aleutians East Borough"),
("AK", "Prince of Wales Ketchikan", "Prince of Wales-Hyder"),
("AK", "Prince Wales", "Prince of Wales-Hyder"),
("AK", "Ketchikan Gateway Bo", "Ketchikan Gateway Borough"),
("AK", "Prince of Wale", "Prince of Wales-Hyder"),
("AK", "Wrangell Petersburg", "Wrangell"),
("AK", "Wrangell Pet", "Wrangell"),
("AK", "Borough, Kodiak Island", "Kodiak Island Borough"),
("AK", "Matanuska Susitna Borough", "Matanuska-Susitna"),
("AK", "Matanuska Susitna", "Matanuska-Susitna"),
("AK", "Skagway-Yakutat", "Skagway"),
("AK", "Skagway Yaku", "Skagway"),
("AK", "Skagway Hoonah Angoon", "Hoonah-Angoon"),
("AK", "Angoon", "Hoonah-Angoon"),
("AK", "Hoonah", "Hoonah-Angoon"),
("AK", "Yukon Koyukuk", "Yukon-Koyukuk"),
("AK", "Yukon Koyuku", "Yukon-Koyukuk"),
("AK", "Yukon-Koyuku", "Yukon-Koyukuk"),
("AK", "Valdez Cordova", "Valdez-Cordova"),
("AK", "Cordova", "Valdez-Cordova"),
("AK", "Valdez Cordo", "Valdez-Cordova"),
("AK", "Lake and Pen", "Lake and Peninsula"),
("AK", "Lake & Peninsula Borough", "Lake and Peninsula"),
("AK", "Kodiak Islan", "Kodiak Island"),
("AK", "Kenai Penins", "Kenai Peninsula"),
("AK", "NW Arctic Borough", "Northwest Arctic"),
("AL", "De Kalb", "DeKalb"),
("AR", "Saint Franci", "St. Francis"),
("CA", "San Bernadino", "San Bernardino"),
("CA", "San Bernardi", "San Bernardino"),
("CT", "Shelton", "Fairfield"),
("FL", "De Soto", "DeSoto"),
("FL", "Miami Dade", "Miami-Dade"),
("FL", "Dade", "Miami-Dade"),
("FL", "St. Lucic", "St. Lucie"),
("FL", "St. Loucie", "St. Lucie"),
("GA", "De Kalb", "DeKalb"),
("GA", "Chattahooche", "Chattahoochee"),
("IA", "Pottawattami", "Pottawattamie"),
("IA", "Kossuh", "Kossuth"),
("IA", "Lousia", "Louisa"),
("IA", "Poweshick", "Poweshiek"),
("IA", "Humbolt", "Humboldt"),
("IA", "Harris", "Harrison"),
("IA", "O Brien", "O'Brien"),
("IL", "JoDavies", "Jo Daviess"),
("IL", "La Salle", "LaSalle"),
("IL", "Green", "Greene"),
("IL", "DeWitt", "De Witt"),
("IL", "Dewitt", "De Witt"),
("IL", "Du Page", "DuPage"),
("IL", "Burke", "Christian"),
("IL", "McCoupin", "Macoupin"),
("IN", "De Kalb County", "DeKalb County"),
("IN", "De Kalb", "DeKalb County"),
("IN", "La Porte", "LaPorte"),
("IN", "Putman", "Putnam"),
("IN", "Pyke", "Pike"),
("IN", "Sulliva", "Sullivan"),
("KS", "Leaveworth", "Leavenworth"),
("KY", "Spenser", "Spencer"),
("LA", "Jefferson Da", "Jefferson Davis"),
("LA", "Pointe Coupe", "Pointe Coupee"),
("LA", "West Baton R", "West Baton Rouge"),
("LA", "DeSoto", "De Soto"),
("LA", "Burke", "Iberia"),
("LA", "West Feleciana", "West Feliciana"),
("MA", "North Essex", "Essex"),
("MI", "Grand Traver", "Grand Traverse"),
("MI", "Antim", "Antrim"),
("MD", "Balto. City", "Baltimore City"),
("MD", "Prince Georg", "Prince George's County"),
("MD", "Worchester", "Worcester"),
("MN", "Fairbault", "Faribault"),
("MN", "Lac Qui Parl", "Lac Qui Parle"),
("MN", "Lake of The", "Lake of the Woods"),
("MN", "Ottertail", "Otter Tail"),
("MN", "Yellow Medic", "Yellow Medicine"),
("MO", "De Kalb", "DeKalb"),
("MO", "Cape Girarde", "Cape Girardeau"),
("MS", "Clark", "Clarke"),
("MS", "Clark", "Clarke"),
("MS", "De Soto", "DeSoto"),
("MS", "Jefferson Da", "Jefferson Davis"),
("MS", "Homoshitto", "Amite"),
("MT", "Anaconda-Dee", "Deer Lodge"),
("MT", "Butte-Silver", "Silver Bow"),
("MT", "Golden Valle", "Golden Valley"),
("MT", "Lewis and Cl", "Lewis and Clark"),
("NC", "Hartford", "Hertford"),
("NC", "Gilford", "Guilford"),
("NC", "North Hampton", "Northampton"),
("ND", "La Moure", "LaMoure"),
("NH", "Plaquemines", "Coos"),
("NH", "New Hampshire", "Coos"),
("OK", "Cimmaron", "Cimarron"),
("NY", "Westcherster", "Westchester"),
("OR", "Unioin", "Union"),
("PA", "Northumberla", "Northumberland"),
("PR", "Aquadilla", "Aguadilla"),
("PR", "Sabana Grand", "Sabana Grande"),
("PR", "San Sebastia", "San Sebastian"),
("PR", "Trujillo Alt", "Trujillo Alto"),
("RI", "Portsmouth", "Newport"),
("TX", "Collingswort", "Collingsworth"),
("TX", "De Witt", "DeWitt"),
("TX", "Hayes", "Hays"),
("TX", "San Augustin", "San Augustine"),
("VA", "Alexandria C", "Alexandria City"),
("VA", "City of Suff", "Suffolk City"),
("VA", "City of Manassas", "Manassas City"),
("VA", "Charlottesvi", "Charlottesville City"),
("VA", "Chesapeake C", "Chesapeake City"),
("VA", "Clifton Forg", "Alleghany"),
("VA", "Colonial Hei", "Colonial Heights City"),
("VA", "Covington Ci", "Covington City"),
("VA", "Fredericksbu", "Fredericksburg City"),
("VA", "Hopewell Cit", "Hopewell City"),
("VA", "Isle of Wigh", "Isle of Wight"),
("VA", "King and Que", "King and Queen"),
("VA", "Lexington Ci", "Lexington City"),
("VA", "Manassas Cit", "Manassas City"),
("VA", "Manassas Par", "Manassas Park City"),
("VA", "Northumberla", "Northumberland"),
("VA", "Petersburg C", "Petersburg City"),
("VA", "Poquoson Cit", "Poquoson City"),
("VA", "Portsmouth C", "Portsmouth City"),
("VA", "Prince Edwar", "Prince Edward"),
("VA", "Prince Georg", "Prince George"),
("VA", "Prince Willi", "Prince William"),
("VA", "Richmond Cit", "Richmond City"),
("VA", "Staunton Cit", "Staunton City"),
("VA", "Virginia Bea", "Virginia Beach City"),
("VA", "Waynesboro C", "Waynesboro City"),
("VA", "Winchester C", "Winchester City"),
("WA", "Wahkiakurn", "Wahkiakum"),
], columns=["state", "eia_county", "fips_county"])
BA_NAME_FIXES = pd.DataFrame([
("Omaha Public Power District", 14127, "OPPD"),
("Kansas City Power & Light Co", 10000, "KCPL"),
("Toledo Edison Co", 18997, pd.NA),
("Ohio Edison Co", 13998, pd.NA),
("Cleveland Electric Illum Co", 3755, pd.NA),
], columns=["balancing_authority_name_eia",
"balancing_authority_id_eia",
"balancing_authority_code_eia",
]
)
NERC_SPELLCHECK = {
'GUSTAVUSAK': 'ASCC',
'AK': 'ASCC',
'HI': 'HICC',
'ERCTO': 'ERCOT',
'RFO': 'RFC',
'RF': 'RFC',
'SSP': 'SPP',
'VACAR': 'SERC', # VACAR is a subregion of SERC
'GATEWAY': 'SERC', # GATEWAY is a subregion of SERC
'TERR': 'GU',
25470: 'MRO',
'TX': 'TRE',
'NY': 'NPCC',
'NEW': 'NPCC',
'YORK': 'NPCC',
}
###############################################################################
# EIA Form 861 Transform Helper functions
###############################################################################
def _filter_class_cols(df, class_list):
regex = f"^({'_|'.join(class_list)}).*$"
return df.filter(regex=regex)
def _filter_non_class_cols(df, class_list):
regex = f"^(?!({'_|'.join(class_list)})).*$"
return df.filter(regex=regex)
def _ba_code_backfill(df):
"""
Backfill Balancing Authority Codes based on codes in later years.
Note:
The BA Code to ID mapping can change from year to year. If a Balancing Authority
is bought by another entity, the code may change, but the old EIA BA ID will be
retained.
Args:
ba_eia861 (pandas.DataFrame): The transformed EIA 861 Balancing Authority
dataframe (balancing_authority_eia861).
Returns:
pandas.DataFrame: The balancing_authority_eia861 dataframe, but with many fewer
NA values in the balancing_authority_code_eia column.
"""
start_len = len(df)
start_nas = len(df.loc[df.balancing_authority_code_eia.isnull()])
logger.info(
f"Started with {start_nas} missing BA Codes out of {start_len} "
f"records ({start_nas/start_len:.2%})")
ba_ids = (
df[["balancing_authority_id_eia",
"balancing_authority_code_eia",
"report_date"]]
.drop_duplicates()
.sort_values(["balancing_authority_id_eia", "report_date"])
)
ba_ids["ba_code_filled"] = (
ba_ids.groupby("balancing_authority_id_eia")[
"balancing_authority_code_eia"].fillna(method="bfill")
)
ba_eia861_filled = df.merge(ba_ids, how="left")
ba_eia861_filled = (
ba_eia861_filled.assign(
balancing_authority_code_eia=lambda x: x.ba_code_filled)
.drop("ba_code_filled", axis="columns")
)
end_len = len(ba_eia861_filled)
if start_len != end_len:
raise AssertionError(
f"Number of rows in the dataframe changed {start_len}!={end_len}!"
)
end_nas = len(
ba_eia861_filled.loc[ba_eia861_filled.balancing_authority_code_eia.isnull()])
logger.info(
f"Ended with {end_nas} missing BA Codes out of {end_len} "
f"records ({end_nas/end_len:.2%})")
return ba_eia861_filled
def _tidy_class_dfs(df, df_name, idx_cols, class_list, class_type, keep_totals=False):
# Clean up values just enough to use primary key columns as a multi-index:
logger.debug(
f"Cleaning {df_name} table index columns so we can tidy data.")
if 'balancing_authority_code_eia' in idx_cols:
df = (
df.assign(
balancing_authority_code_eia=(
lambda x: x.balancing_authority_code_eia.fillna("UNK")))
)
raw_df = (
df.dropna(subset=["utility_id_eia"])
.astype(pudl.helpers.get_pudl_dtypes({"utility_id_eia": "eia"}))
.set_index(idx_cols)
)
# Split the table into index, data, and "denormalized" columns for processing:
# Separate customer classes and reported data into a hierarchical index
logger.debug(f"Stacking EIA861 {df_name} data columns by {class_type}.")
data_cols = _filter_class_cols(raw_df, class_list)
# Create a regex identifier that splits the column headers based on the strings
# deliniated in the class_list not just an underscore. This enables prefixes with
# underscores such as fuel_cell as opposed to single-word prefixes followed by
# underscores. Final string looks like: '(?<=customer_test)_|(?<=unbundled)_'
# This ensures that the underscore AFTER the desired string (that can also include
# underscores) is where the column headers are split, not just the first underscore.
class_list_regex = '|'.join(['(?<=' + col + ')_' for col in class_list])
data_cols.columns = (
data_cols.columns.str.split(fr"{class_list_regex}", n=1, expand=True)
.set_names([class_type, None])
)
# Now stack the customer classes into their own categorical column,
data_cols = (
data_cols.stack(level=0, dropna=False)
.reset_index()
)
denorm_cols = _filter_non_class_cols(raw_df, class_list).reset_index()
# Merge the index, data, and denormalized columns back together
tidy_df = pd.merge(denorm_cols, data_cols, on=idx_cols)
# Compare reported totals with sum of component columns
if 'total' in class_list:
_compare_totals(data_cols, idx_cols, class_type, df_name)
if keep_totals is False:
tidy_df = tidy_df.query(f"{class_type}!='total'")
return tidy_df, idx_cols + [class_type]
def _drop_dupes(df, df_name, subset):
tidy_nrows = len(df)
deduped_df = df.drop_duplicates(subset=subset)
deduped_nrows = len(df)
logger.info(
f"Dropped {tidy_nrows-deduped_nrows} duplicate records from EIA 861 "
f"{df_name} table, out of a total of {tidy_nrows} records "
f"({(tidy_nrows-deduped_nrows)/tidy_nrows:.4%} of all records). "
)
return deduped_df
def _check_for_dupes(df, df_name, subset):
dupes = (
df.duplicated(
subset=subset, keep=False)
)
if dupes.any():
raise AssertionError(
f"Found {len(df[dupes])} duplicate rows in the {df_name} table, "
f"when zero were expected!"
)
def _early_transform(df):
"""Fix EIA na values and convert year column to date."""
df = pudl.helpers.fix_eia_na(df)
df = pudl.helpers.convert_to_date(df)
return df
def _compare_totals(data_cols, idx_cols, class_type, df_name):
"""Compare reported totals with sum of component columns.
Args:
data_cols (pd.DataFrame): A DataFrame containing only the columns with
normalized information.
idx_cols (list): A list of the primary keys for the given denormalized
DataFrame.
class_type (str): The name (either 'customer_class' or 'tech_class') of
the column for which you'd like to compare totals to components.
df_name (str): The name of the dataframe.
"""
# Convert column dtypes so that numeric cols can be adequately summed
data_cols = pudl.helpers.convert_cols_dtypes(data_cols, 'eia')
# Drop data cols that are non numeric (preserve primary keys)
logger.debug(f'{idx_cols}, {class_type}')
data_cols = (
data_cols.set_index(idx_cols + [class_type])
.select_dtypes('number')
.reset_index()
)
logger.debug(f'{data_cols.columns.tolist()}')
# Create list of data columns to be summed
# (may include non-numeric that will be excluded)
data_col_list = set(data_cols.columns.tolist()) - \
set(idx_cols + [class_type])
logger.debug(f'{data_col_list}')
# Distinguish reported totals from segments
data_totals_df = data_cols.loc[data_cols[class_type] == 'total']
data_no_tots_df = data_cols.loc[data_cols[class_type] != 'total']
# Calculate sum of segments for comparison against reported total
data_sums_df = data_no_tots_df.groupby(
idx_cols + [class_type], observed=True).sum()
sum_total_df = pd.merge(data_totals_df, data_sums_df, on=idx_cols,
how='outer', suffixes=('_total', '_sum'))
# Check each data column's calculated sum against the reported total
for col in data_col_list:
col_df = (sum_total_df.loc[sum_total_df[col + '_total'].notnull()])
if len(col_df) > 0:
col_df = (
col_df.assign(
compare_totals=lambda x: (x[col + '_total'] == x[col + '_sum']))
)
bad_math = (col_df['compare_totals']).sum() / len(col_df)
logger.debug(
f"{df_name}: for column {col}, {bad_math:.0%} "
"of non-null reported totals = the sum of parts."
)
else:
logger.debug(
f'{df_name}: for column {col} all total values are NaN')
def _clean_nerc(df, idx_cols):
"""Clean NERC region entries and make new rows for multiple nercs.
This function examines reported NERC regions and makes sure the output column of the
same name has reliable, singular NERC region acronyms. To do so, this function
identifies entries where there are two or more NERC regions specified in a single
cell (such as SPP & ERCOT) and makes new, duplicate rows for each NERC region. It
also converts non-recognized reported nerc regions to 'UNK'.
Args:
df (pandas.DataFrame): A DataFrame with the column 'nerc_region' to be cleaned.
idx_cols (list): A list of the primary keys.
Returns:
pandas.DataFrame: A DataFrame with correct and clean nerc regions.
"""
idx_no_nerc = idx_cols.copy()
if 'nerc_region' in idx_no_nerc:
idx_no_nerc.remove('nerc_region')
# Split raw df into primary keys plus nerc region and other value cols
nerc_df = df[idx_cols].copy()
other_df = df.drop('nerc_region', axis=1).set_index(idx_no_nerc)
# Make all values upper-case
# Replace all NA values with UNK
# Make nerc values into lists to see how many separate values are stuffed into one row (ex: 'SPP & ERCOT' --> ['SPP', 'ERCOT'])
nerc_df = (
nerc_df.assign(
nerc_region=(lambda x: (
x.nerc_region
.str.upper()
.fillna('UNK')
.str.findall(r'[A-Z]+')))
)
)
# Record a list of the reported nerc regions not included in the recognized regions list (these eventually become UNK)
nerc_col = nerc_df['nerc_region'].tolist()
nerc_list = list(set([item for sublist in nerc_col for item in sublist]))
non_nerc_list = [
nerc_entity for nerc_entity in nerc_list
if nerc_entity not in pc.RECOGNIZED_NERC_REGIONS + list(NERC_SPELLCHECK.keys())]
print(
f'The following reported NERC regions are not currently recognized and become \
UNK values: {non_nerc_list}')
# Function to turn instances of 'SPP_UNK' or 'SPP_SPP' into 'SPP'
def _remove_nerc_duplicates(entity_list):
if len(entity_list) > 1:
if 'UNK' in entity_list:
entity_list.remove('UNK')
if all(x == entity_list[0] for x in entity_list):
entity_list = [entity_list[0]]
return entity_list
# Go through the nerc regions, spellcheck errors, delete those that aren't
# recognized, and piece them back together (with _ separator if more than one
# recognized)
nerc_df['nerc_region'] = (
nerc_df['nerc_region']
.apply(lambda x: (
[i if i not in NERC_SPELLCHECK.keys()
else NERC_SPELLCHECK[i] for i in x]))
.apply(lambda x: sorted(
[i if i in pc.RECOGNIZED_NERC_REGIONS else 'UNK' for i in x]))
.apply(lambda x: _remove_nerc_duplicates(x))
.str.join('_')
)
# Merge all data back together
full_df = pd.merge(nerc_df, other_df, on=idx_no_nerc)
return full_df
def _compare_nerc_physical_w_nerc_operational(df):
"""Show df rows where physical nerc region does not match operational region.
In the Utility Data table, there is the 'nerc_region' index column, otherwise
interpreted as nerc region in which the utility is physically located and the
'nerc_regions_of_operation' column that depicts the nerc regions the utility
operates in. In most cases, these two columns are the same, however there are
certain instances where this is not true. There are also instances where a
utility operates in multiple nerc regions in which case one row will match and
another row will not. The output of this function in a table that shows only the
utilities where the physical nerc region does not match the operational region
ever, meaning there is no additional row for the same utlity during the same
year where there is a match between the cols.
Args:
df (pandas.DataFrame): The utility_data_nerc_eia861 table output from the
utility_data() function.
Returns:
pandas.DataFrame: A DataFrame with rows for utilities where NO listed operating
nerc region matches the "physical location" nerc region column that's a part of
the index.
"""
# Set NA states to UNK
df['state'] = df['state'].fillna('UNK')
# Create column indicating whether the nerc region matches the nerc region of
# operation (TRUE)
df['nerc_match'] = df['nerc_region'] == df['nerc_regions_of_operation']
# Group by utility, state, and report date to see which groups have at least one
# TRUE value
grouped_nerc_match_bools = (
df.groupby(['utility_id_eia', 'state', 'report_date'])
[['nerc_match']].any()
.reset_index()
.rename(columns={'nerc_match': 'nerc_group_match'})
)
# Merge back with original df to show cases where there are multiple non-matching
# nerc values per utility id, year, and state.
expanded_nerc_match_bools = (
pd.merge(df,
grouped_nerc_match_bools,
on=['utility_id_eia', 'state', 'report_date'],
how='left')
)
# Keep only rows where there are no matches for the whole group.
expanded_nerc_match_bools_false = (
expanded_nerc_match_bools[~expanded_nerc_match_bools['nerc_group_match']]
)
return expanded_nerc_match_bools_false
def _pct_to_mw(df, pct_col):
"""Turn pct col into mw capacity using total capacity col."""
mw_value = df['total_capacity_mw'] * df[pct_col] / 100
return mw_value
def _make_yn_bool(df_object):
"""Turn Y/N reporting into True or False boolean statements for df or series."""
return df_object.replace({"N": False, "Y": True})
def _thousand_to_one(df_object):
"""Turn reporting in thousands of dollars to regular dollars for df or series."""
return df_object * 1000
###############################################################################
# EIA Form 861 Table Transform Functions
###############################################################################
def service_territory(tfr_dfs):
"""Transform the EIA 861 utility service territory table.
Transformations include:
* Homogenize spelling of county names.
* Add field for state/county FIPS code.
Args:
tfr_dfs (dict): A dictionary of DataFrame objects in which pages from EIA861
form (keys) correspond to normalized DataFrames of values from that page
(values).
Returns:
dict: a dictionary of pandas.DataFrame objects in which pages from EIA861 form
(keys) correspond to normalized DataFrames of values from that page
(values).
"""
# No data tidying required
# There are a few NA values in the county column which get interpreted
# as floats, which messes up the parsing of counties by addfips.
type_compatible_df = tfr_dfs["service_territory_eia861"].astype({
"county": str})
# Transform values:
# * Add state and county fips IDs
transformed_df = (
# Ensure that we have the canonical US Census county names:
pudl.helpers.clean_eia_counties(
type_compatible_df,
fixes=EIA_FIPS_COUNTY_FIXES)
# Add FIPS IDs based on county & state names:
.pipe(pudl.helpers.add_fips_ids)
)
tfr_dfs["service_territory_eia861"] = transformed_df
return tfr_dfs
def balancing_authority(tfr_dfs):
"""
Transform the EIA 861 Balancing Authority table.
Transformations include:
* Fill in balancing authrority IDs based on date, utility ID, and BA Name.
* Backfill balancing authority codes based on BA ID.
* Fix BA code and ID typos.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
# No data tidying required
# All columns are already type compatible.
# Value transformations:
# * Backfill BA codes on a per BA ID basis
# * Fix data entry errors
df = (
tfr_dfs["balancing_authority_eia861"]
.pipe(pudl.helpers.convert_cols_dtypes, "eia", "balancing_authority_eia861")
.set_index(["report_date", "balancing_authority_name_eia", "utility_id_eia"])
)
# Fill in BA IDs based on date, utility ID, and BA Name:
df.loc[BA_ID_NAME_FIXES.index,
"balancing_authority_id_eia"] = BA_ID_NAME_FIXES.balancing_authority_id_eia
# Backfill BA Codes based on BA IDs:
df = df.reset_index().pipe(_ba_code_backfill)
# Typo: NEVP, BA ID is 13407, but in 2014-2015 in UT, entered as 13047
df.loc[
(df.balancing_authority_code_eia == "NEVP") &
(df.balancing_authority_id_eia == 13047),
"balancing_authority_id_eia"
] = 13407
# Typo: Turlock Irrigation District is TIDC, not TID.
df.loc[
(df.balancing_authority_code_eia == "TID") &
(df.balancing_authority_id_eia == 19281),
"balancing_authority_code_eia"
] = "TIDC"
tfr_dfs["balancing_authority_eia861"] = df
return tfr_dfs
def balancing_authority_assn(tfr_dfs):
"""
Compile a balancing authority, utility, state association table.
For the years up through 2012, the only BA-Util information that's available comes
from the balancing_authority_eia861 table, and it does not include any state-level
information. However, there is utility-state association information in the
sales_eia861 and other data tables.
For the years from 2013 onward, there's explicit BA-Util-State information in the
data tables (e.g. sales_eia861). These observed associations can be compiled to give
us a picture of which BA-Util-State associations exist. However, we need to merge in
the balancing authority IDs since the data tables only contain the balancing
authority codes.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 dataframes. This must
include any dataframes from which we want to compile BA-Util-State
associations, which means this function has to be called after all the basic
transformfunctions that depend on only a single raw table.
Returns:
dict: a dictionary of transformed dataframes. This function both compiles the
association table, and finishes the normalization of the balancing authority
table. It may be that once the harvesting process incorporates the EIA 861, some
or all of this functionality should be pulled into the phase-2 transform
functions.
"""
# These aren't really "data" tables, and should not be searched for associations
non_data_dfs = [
"balancing_authority_eia861",
"service_territory_eia861",
]
# The dataframes from which to compile BA-Util-State associations
data_dfs = [tfr_dfs[table]
for table in tfr_dfs if table not in non_data_dfs]
logger.info("Building an EIA 861 BA-Util-State association table.")
# Helpful shorthand query strings....
early_years = "report_date<='2012-12-31'"
late_years = "report_date>='2013-01-01'"
early_dfs = [df.query(early_years) for df in data_dfs]
late_dfs = [df.query(late_years) for df in data_dfs]
# The old BA table lists utilities directly, but has no state information.
early_date_ba_util = _harvest_associations(
dfs=[tfr_dfs["balancing_authority_eia861"].query(early_years), ],
cols=["report_date",
"balancing_authority_id_eia",
"utility_id_eia"],
)
# State-utility associations are brought in from observations in data_dfs
early_date_util_state = _harvest_associations(
dfs=early_dfs,
cols=["report_date",
"utility_id_eia",
"state"],
)
early_date_ba_util_state = (
early_date_ba_util
.merge(early_date_util_state, how="outer")
.drop_duplicates()
)
# New BA table has no utility information, but has BA Codes...
late_ba_code_id = _harvest_associations(
dfs=[tfr_dfs["balancing_authority_eia861"].query(late_years), ],
cols=["report_date",
"balancing_authority_code_eia",
"balancing_authority_id_eia"],
)
# BA Code allows us to bring in utility+state data from data_dfs:
late_date_ba_code_util_state = _harvest_associations(
dfs=late_dfs,
cols=["report_date",
"balancing_authority_code_eia",
"utility_id_eia",
"state"],
)
# We merge on ba_code then drop it, b/c only BA ID exists in all years consistently:
late_date_ba_util_state = (
late_date_ba_code_util_state
.merge(late_ba_code_id, how="outer")
.drop("balancing_authority_code_eia", axis="columns")
.drop_duplicates()
)
tfr_dfs["balancing_authority_assn_eia861"] = (
pd.concat([early_date_ba_util_state, late_date_ba_util_state])
.dropna(subset=["balancing_authority_id_eia", ])
.astype(pudl.helpers.get_pudl_dtypes({"utility_id_eia": "eia"}))
)
return tfr_dfs
def utility_assn(tfr_dfs):
"""Harvest a Utility-Date-State Association Table."""
# These aren't really "data" tables, and should not be searched for associations
non_data_dfs = [
"balancing_authority_eia861",
"service_territory_eia861",
]
# The dataframes from which to compile BA-Util-State associations
data_dfs = [tfr_dfs[table]
for table in tfr_dfs if table not in non_data_dfs]
logger.info("Building an EIA 861 Util-State-Date association table.")
tfr_dfs["utility_assn_eia861"] = _harvest_associations(
data_dfs, ["report_date", "utility_id_eia", "state"])
return tfr_dfs
def _harvest_associations(dfs, cols):
"""
Compile all unique, non-null combinations of values ``cols`` within ``dfs``.
Find all unique, non-null combinations of the columns ``cols`` in the dataframes
``dfs`` within records that are selected by ``query``. All of ``cols`` must be
present in each of the ``dfs``.
Args:
dfs (iterable of pandas.DataFrame): The DataFrames in which to search for
cols (iterable of str): Labels of columns for which to find unique, non-null
combinations of values.
Raises:
ValueError: if no associations for cols are found in dfs.
Returns:
pandas.DataFrame: A dataframe containing all the unique, non-null combinations
of values found in ``cols``.
"""
assn = pd.DataFrame()
for df in dfs:
if set(df.columns).issuperset(set(cols)):
assn = assn.append(df[cols])
assn = assn.dropna().drop_duplicates()
if assn.empty:
raise ValueError(
"These dataframes contain no associations for the columns: "
f"{cols}"
)
return assn
def normalize_balancing_authority(tfr_dfs):
"""
Finish the normalization of the balancing_authority_eia861 table.
The balancing_authority_assn_eia861 table depends on information that is only
available in the UN-normalized form of the balancing_authority_eia861 table, so
and also on having access to a bunch of transformed data tables, so it can compile
the observed combinations of report dates, balancing authorities, states, and
utilities. This means that we have to hold off on the final normalization of the
balancing_authority_eia861 table until the rest of the transform process is over.
"""
logger.info("Completing normalization of balancing_authority_eia861.")
ba_eia861_normed = (
tfr_dfs["balancing_authority_eia861"]
.loc[:, [
"report_date",
"balancing_authority_id_eia",
"balancing_authority_code_eia",
"balancing_authority_name_eia",
]]
.drop_duplicates(subset=["report_date", "balancing_authority_id_eia"])
)
# Make sure that there aren't any more BA IDs we can recover from later years:
ba_ids_missing_codes = (
ba_eia861_normed.loc[
ba_eia861_normed.balancing_authority_code_eia.isnull(),
"balancing_authority_id_eia"]
.drop_duplicates()
.dropna()
)
fillable_ba_codes = ba_eia861_normed[
(ba_eia861_normed.balancing_authority_id_eia.isin(ba_ids_missing_codes)) &
(ba_eia861_normed.balancing_authority_code_eia.notnull())
]
if len(fillable_ba_codes) != 0:
raise ValueError(
f"Found {len(fillable_ba_codes)} unfilled but fillable BA Codes!"
)
tfr_dfs["balancing_authority_eia861"] = ba_eia861_normed
return tfr_dfs
def sales(tfr_dfs):
"""Transform the EIA 861 Sales table.
Transformations include:
* Remove rows with utility ids 88888 and 99999.
* Tidy data by customer class.
* Drop primary key duplicates.
* Convert 1000s of dollars into dollars.
* Convert data_observed field I/O into boolean.
* Map full spelling onto code values.
"""
idx_cols = [
"utility_id_eia",
"state",
"report_date",
"balancing_authority_code_eia",
]
# Pre-tidy clean specific to sales table
raw_sales = (
tfr_dfs["sales_eia861"].copy()
.query("utility_id_eia not in (88888, 99999)")
)
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Sales table.")
tidy_sales, idx_cols = _tidy_class_dfs(
raw_sales,
df_name='Sales',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
)
# remove duplicates on the primary key columns + customer_class -- there
# are lots of records that have reporting errors in the form of duplicates.
# Many of them include different values and are therefore impossible to tell
# which are "correct". The following function drops all but the first of
# these duplicate entries.
deduped_sales = _drop_dupes(
df=tidy_sales,
df_name='Sales',
subset=idx_cols
)
###########################################################################
# Transform Values:
# * Turn 1000s of dollars back into dollars
# * Re-code data_observed to boolean:
# * O="observed" => True
# * I="imputed" => False
# * Change the form code (A, B, C, D) into the business model that it
# corresponds to (retail vs. energy_services), which in combination with
# the service_type column (energy, delivery, bundled) will now serve as
# part of the primary key for the table.
###########################################################################
logger.info("Performing value transformations on EIA 861 Sales table.")
transformed_sales = (
deduped_sales.assign(
sales_revenue=lambda x: _thousand_to_one(x.sales_revenue),
data_observed=lambda x: x.data_observed.replace({
"O": True,
"I": False,
}),
business_model=lambda x: x.business_model.replace({
"A": "retail",
"B": "retail",
"C": "retail",
"D": "energy_services",
}),
service_type=lambda x: x.service_type.str.lower(),
)
)
tfr_dfs["sales_eia861"] = transformed_sales
return tfr_dfs
def advanced_metering_infrastructure(tfr_dfs):
"""
Transform the EIA 861 Advanced Metering Infrastructure table.
Transformations include:
* Tidy data by customer class.
* Drop total_meters columns (it's calculable with other fields).
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
"utility_id_eia",
"state",
"balancing_authority_code_eia",
"report_date",
]
raw_ami = tfr_dfs["advanced_metering_infrastructure_eia861"].copy()
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Advanced Metering Infrastructure table.")
tidy_ami, idx_cols = _tidy_class_dfs(
raw_ami,
df_name='Advanced Metering Infrastructure',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
)
# No duplicates to speak of but take measures to check just in case
_check_for_dupes(tidy_ami, 'Advanced Metering Infrastructure', idx_cols)
# Drop total_meters col
tidy_ami = tidy_ami.drop(['total_meters'], axis=1)
tfr_dfs["advanced_metering_infrastructure_eia861"] = tidy_ami
return tfr_dfs
def demand_response(tfr_dfs):
"""
Transform the EIA 861 Demand Response table.
Transformations include:
* Fill in NA balancing authority codes with UNK (because it's part of the primary
key).
* Tidy subset of the data by customer class.
* Drop duplicate rows based on primary keys.
* Convert 1000s of dollars into dollars.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
"utility_id_eia",
"state",
"balancing_authority_code_eia",
"report_date",
]
raw_dr = tfr_dfs["demand_response_eia861"].copy()
# fill na BA values with 'UNK'
raw_dr['balancing_authority_code_eia'] = (
raw_dr['balancing_authority_code_eia'].fillna('UNK')
)
# Split data into tidy-able and not
raw_dr_water_heater = raw_dr[idx_cols + ['water_heater']].copy()
raw_dr_water_heater = _drop_dupes(
df=raw_dr_water_heater,
df_name='Demand Response Water Heater',
subset=idx_cols
)
raw_dr = raw_dr.drop(['water_heater'], axis=1)
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Demand Response table.")
tidy_dr, idx_cols = _tidy_class_dfs(
raw_dr,
df_name='Demand Response',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
)
# shouldn't be duplicates but there are some strange values from IN.
# these values have Nan BA values and should be deleted earlier.
# thinking this might have to do with DR table weirdness between 2012 and 2013
# will come back to this after working on the DSM table. Dropping dupes for now.
deduped_dr = _drop_dupes(
df=tidy_dr,
df_name='Demand Response',
subset=idx_cols
)
###########################################################################
# Transform Values:
# * Turn 1000s of dollars back into dollars
###########################################################################
logger.info(
"Performing value transformations on EIA 861 Demand Response table.")
transformed_dr = (
deduped_dr.assign(
customer_incentives_cost=lambda x: (
_thousand_to_one(x.customer_incentives_cost)),
other_costs=lambda x: (
_thousand_to_one(x.other_costs))
)
)
tfr_dfs["demand_response_eia861"] = transformed_dr
tfr_dfs["demand_response_water_heater_eia861"] = raw_dr_water_heater
return tfr_dfs
def demand_side_management(tfr_dfs):
"""
Transform the EIA 861 Demand Side Management table.
In 2013, the EIA changed the contents of the 861 form so that information pertaining
to demand side management was no longer housed in a single table, but rather two
seperate ones pertaining to energy efficiency and demand response. While the pre and
post 2013 tables contain similar information, one column in the pre-2013 demand side
management table may not have an obvious column equivalent in the post-2013 energy
efficiency or demand response data. We've addressed this by keeping the demand side
management and energy efficiency and demand response tables seperate. Use the DSM
table for pre 2013 data and the EE / DR tables for post 2013 data. Despite the
uncertainty of comparing across these years, the data are similar and we hope to
provide a cohesive dataset in the future with all years and comprable columns
combined.
Transformations include:
* Clean up NERC codes and ensure one per row.
* Remove demand_side_management and data_observed columns (they are all the same).
* Tidy subset of the data by customer class.
* Convert Y/N columns to booleans.
* Convert 1000s of dollars into dollars.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
'utility_id_eia',
'state',
'nerc_region',
'report_date',
]
sales_cols = [
'sales_for_resale_mwh',
'sales_to_ultimate_consumers_mwh'
]
bool_cols = [
'energy_savings_estimates_independently_verified',
'energy_savings_independently_verified',
'major_program_changes',
'price_responsive_programs',
'short_form',
'time_responsive_programs',
]
cost_cols = [
'annual_indirect_program_cost',
'annual_total_cost',
'energy_efficiency_annual_cost',
'energy_efficiency_annual_incentive_payment',
'load_management_annual_cost',
'load_management_annual_incentive_payment',
]
raw_dsm = tfr_dfs['demand_side_management_eia861'].copy()
###########################################################################
# Transform Data Round 1 (must be done to avoid issues with nerc_region col in
# _tidy_class_dfs())
# * Clean NERC region col
# * Drop data_status and demand_side_management cols (they don't contain anything)
###########################################################################
transformed_dsm1 = (
_clean_nerc(raw_dsm, idx_cols)
.drop(['demand_side_management', 'data_status'], axis=1)
.query("utility_id_eia not in [88888]")
)
# Separate dsm data into sales vs. other table (the latter of which can be tidied)
dsm_sales = transformed_dsm1[idx_cols + sales_cols].copy()
dsm_ee_dr = transformed_dsm1.drop(sales_cols, axis=1)
###########################################################################
# Tidy Data:
###########################################################################
tidy_dsm, dsm_idx_cols = (
pudl.transform.eia861._tidy_class_dfs(
dsm_ee_dr,
df_name='Demand Side Management',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
keep_totals=True
)
)
###########################################################################
# Transform Data Round 2
# * Make booleans (Y=True, N=False)
# * Turn 1000s of dollars back into dollars
###########################################################################
# Split tidy dsm data into transformable chunks
tidy_dsm_bool = (
tidy_dsm[dsm_idx_cols + bool_cols].copy()
.set_index(dsm_idx_cols)
)
tidy_dsm_cost = (
tidy_dsm[dsm_idx_cols + cost_cols].copy()
.set_index(dsm_idx_cols)
)
tidy_dsm_ee_dr = (
tidy_dsm.drop(bool_cols + cost_cols, axis=1)
)
# Calculate transformations for each chunk
transformed_dsm2_bool = (
_make_yn_bool(tidy_dsm_bool)
.reset_index()
.assign(short_form=lambda x: x.short_form.fillna(False))
)
transformed_dsm2_cost = _thousand_to_one(tidy_dsm_cost).reset_index()
# Merge transformed chunks back together
transformed_dsm2 = (
pd.merge(transformed_dsm2_bool, transformed_dsm2_cost,
on=dsm_idx_cols, how='outer')
)
transformed_dsm2 = (
pd.merge(transformed_dsm2, tidy_dsm_ee_dr,
on=dsm_idx_cols, how='outer')
)
# Split into final tables
ee_cols = [col for col in transformed_dsm2 if 'energy_efficiency' in col]
dr_cols = [col for col in transformed_dsm2 if 'load_management' in col]
program_cols = ['price_responsiveness_customers',
'time_responsiveness_customers']
total_cost_cols = ['annual_indirect_program_cost', 'annual_total_cost']
dsm_ee_dr = (
transformed_dsm2[
dsm_idx_cols
+ ee_cols
+ dr_cols
+ program_cols
+ total_cost_cols].copy()
)
dsm_misc = (
transformed_dsm2.drop(
ee_cols
+ dr_cols
+ program_cols
+ total_cost_cols
+ ['customer_class'], axis=1)
)
dsm_misc = _drop_dupes(
df=dsm_misc,
df_name='Demand Side Management Misc.',
subset=['utility_id_eia', 'state', 'nerc_region', 'report_date']
)
del tfr_dfs['demand_side_management_eia861']
tfr_dfs['demand_side_management_sales_eia861'] = dsm_sales
tfr_dfs['demand_side_management_ee_dr_eia861'] = dsm_ee_dr
tfr_dfs['demand_side_management_misc_eia861'] = dsm_misc
return tfr_dfs
def distributed_generation(tfr_dfs):
"""
Transform the EIA 861 Distributed Generation table.
Transformations include:
* Map full spelling onto code values.
* Convert pre-2010 percent values in mw values.
* Remove total columns calculable with other fields.
* Tidy subset of the data by tech class.
* Tidy subset of the data by fuel class.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
'utility_id_eia',
'state',
'report_date',
]
misc_cols = [
'backup_capacity_mw',
'backup_capacity_pct',
'distributed_generation_owned_capacity_mw',
'distributed_generation_owned_capacity_pct',
'estimated_or_actual_capacity_data',
'generators_number',
'generators_num_less_1_mw',
'total_capacity_mw',
'total_capacity_less_1_mw',
'utility_name_eia',
]
tech_cols = [
'all_storage_capacity_mw',
'combustion_turbine_capacity_mw',
'combustion_turbine_capacity_pct',
'estimated_or_actual_tech_data',
'hydro_capacity_mw',
'hydro_capacity_pct',
'internal_combustion_capacity_mw',
'internal_combustion_capacity_pct',
'other_capacity_mw',
'other_capacity_pct',
'pv_capacity_mw',
'steam_capacity_mw',
'steam_capacity_pct',
'total_capacity_mw',
'wind_capacity_mw',
'wind_capacity_pct',
]
fuel_cols = [
'oil_fuel_pct',
'estimated_or_actual_fuel_data',
'gas_fuel_pct',
'other_fuel_pct',
'renewable_fuel_pct',
'water_fuel_pct',
'wind_fuel_pct',
'wood_fuel_pct',
]
# Pre-tidy transform: set estimated or actual A/E values to 'Acutal'/'Estimated'
raw_dg = (
tfr_dfs['distributed_generation_eia861'].copy()
.assign(
estimated_or_actual_capacity_data=lambda x: (
x.estimated_or_actual_capacity_data.map(pc.ESTIMATED_OR_ACTUAL)),
estimated_or_actual_fuel_data=lambda x: (
x.estimated_or_actual_fuel_data.map(pc.ESTIMATED_OR_ACTUAL)),
estimated_or_actual_tech_data=lambda x: (
x.estimated_or_actual_tech_data.map(pc.ESTIMATED_OR_ACTUAL))
)
)
# Split into three tables: Capacity/tech-related, fuel-related, and misc.
raw_dg_tech = raw_dg[idx_cols + tech_cols].copy()
raw_dg_fuel = raw_dg[idx_cols + fuel_cols].copy()
raw_dg_misc = raw_dg[idx_cols + misc_cols].copy()
###########################################################################
# Transform Values:
# * Turn pct values into mw values
# * Remove old pct cols and totals cols
# Explanation: Pre 2010 reporting asks for components as a percent of total capacity
# whereas after 2010, the forms ask for the component portion as a mw value. In
# order to coalesce similar data, we've used total values to turn percent values
# from pre 2010 into mw values like those post-2010.
###########################################################################
# Separate datasets into years with only pct values (pre-2010) and years with only mw values (post-2010)
df_pre_2010_tech = raw_dg_tech[raw_dg_tech['report_date'] < '2010-01-01']
df_post_2010_tech = raw_dg_tech[raw_dg_tech['report_date'] >= '2010-01-01']
df_pre_2010_misc = raw_dg_misc[raw_dg_misc['report_date'] < '2010-01-01']
df_post_2010_misc = raw_dg_misc[raw_dg_misc['report_date'] >= '2010-01-01']
logger.info(
'Converting pct values into mw values for distributed generation misc table')
transformed_dg_misc = (
df_pre_2010_misc.assign(
distributed_generation_owned_capacity_mw=lambda x: _pct_to_mw(
x, 'distributed_generation_owned_capacity_pct'),
backup_capacity_mw=lambda x: _pct_to_mw(x, 'backup_capacity_pct'),
).append(df_post_2010_misc)
.drop(['distributed_generation_owned_capacity_pct',
'backup_capacity_pct',
'total_capacity_mw'], axis=1)
)
logger.info(
'Converting pct values into mw values for distributed generation tech table')
transformed_dg_tech = (
df_pre_2010_tech.assign(
combustion_turbine_capacity_mw=lambda x: (
_pct_to_mw(x, 'combustion_turbine_capacity_pct')),
hydro_capacity_mw=lambda x: _pct_to_mw(x, 'hydro_capacity_pct'),
internal_combustion_capacity_mw=lambda x: (
_pct_to_mw(x, 'internal_combustion_capacity_pct')),
other_capacity_mw=lambda x: _pct_to_mw(x, 'other_capacity_pct'),
steam_capacity_mw=lambda x: _pct_to_mw(x, 'steam_capacity_pct'),
wind_capacity_mw=lambda x: _pct_to_mw(x, 'wind_capacity_pct'),
).append(df_post_2010_tech)
.drop([
'combustion_turbine_capacity_pct',
'hydro_capacity_pct',
'internal_combustion_capacity_pct',
'other_capacity_pct',
'steam_capacity_pct',
'wind_capacity_pct',
'total_capacity_mw'], axis=1
)
)
###########################################################################
# Tidy Data
###########################################################################
logger.info('Tidying Distributed Generation Tech Table')
tidy_dg_tech, tech_idx_cols = _tidy_class_dfs(
df=transformed_dg_tech,
df_name='Distributed Generation Tech Component Capacity',
idx_cols=idx_cols,
class_list=pc.TECH_CLASSES,
class_type='tech_class',
)
logger.info('Tidying Distributed Generation Fuel Table')
tidy_dg_fuel, fuel_idx_cols = _tidy_class_dfs(
df=raw_dg_fuel,
df_name='Distributed Generation Fuel Percent',
idx_cols=idx_cols,
class_list=pc.FUEL_CLASSES,
class_type='fuel_class',
)
# Drop original distributed generation table from tfr_dfs
del tfr_dfs['distributed_generation_eia861']
tfr_dfs["distributed_generation_tech_eia861"] = tidy_dg_tech
tfr_dfs["distributed_generation_fuel_eia861"] = tidy_dg_fuel
tfr_dfs["distributed_generation_misc_eia861"] = transformed_dg_misc
return tfr_dfs
def distribution_systems(tfr_dfs):
"""
Transform the EIA 861 Distribution Systems table.
Transformations include:
* No additional transformations.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
# No data tidying or transformation required
raw_ds = (
tfr_dfs['distribution_systems_eia861'].copy()
)
# No duplicates to speak of but take measures to check just in case
_check_for_dupes(raw_ds, 'Distribution Systems', [
"utility_id_eia", "state", "report_date"])
tfr_dfs["distribution_systems_eia861"] = raw_ds
return tfr_dfs
def dynamic_pricing(tfr_dfs):
"""
Transform the EIA 861 Dynamic Pricing table.
Transformations include:
* Tidy subset of the data by customer class.
* Convert Y/N columns to booleans.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
"utility_id_eia",
"state",
"balancing_authority_code_eia",
"report_date",
]
class_attributes = [
'critical_peak_pricing',
'critical_peak_rebate',
'real_time_pricing_program',
'time_of_use_pricing_program',
'variable_peak_pricing_program'
]
raw_dp = tfr_dfs["dynamic_pricing_eia861"].copy()
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Dynamic Pricing table.")
tidy_dp, idx_cols = _tidy_class_dfs(
raw_dp,
df_name='Dynamic Pricing',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
)
# No duplicates to speak of but take measures to check just in case
_check_for_dupes(tidy_dp, 'Dynamic Pricing', idx_cols)
###########################################################################
# Transform Values:
# * Make Y/N's into booleans and X values into pd.NA
###########################################################################
logger.info(
"Performing value transformations on EIA 861 Dynamic Pricing table.")
for col in class_attributes:
tidy_dp[col] = (
tidy_dp[col].replace({'Y': True, 'N': False})
.apply(lambda x: x if x in [True, False] else pd.NA)
)
tfr_dfs["dynamic_pricing_eia861"] = tidy_dp
return tfr_dfs
def energy_efficiency(tfr_dfs):
"""
Transform the EIA 861 Energy Efficiency table.
Transformations include:
* Tidy subset of the data by customer class.
* Drop website column (almost no valid information).
* Convert 1000s of dollars into dollars.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
'utility_id_eia',
'state',
'balancing_authority_code_eia',
'report_date',
]
raw_ee = tfr_dfs["energy_efficiency_eia861"].copy()
# No duplicates to speak of but take measures to check just in case
_check_for_dupes(raw_ee, 'Energy Efficiency', idx_cols)
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Energy Efficiency table.")
# wide-to-tall by customer class (must be done before wide-to-tall by fuel class)
tidy_ee, _ = pudl.transform.eia861._tidy_class_dfs(
raw_ee,
df_name='Energy Efficiency',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
keep_totals=True
)
###########################################################################
# Transform Values:
# * Turn 1000s of dollars back into dollars
# * Get rid of website column
###########################################################################
logger.info("Transforming the EIA 861 Energy Efficiency table.")
transformed_ee = (
tidy_ee.assign(
customer_incentives_incremental_cost=lambda x: (
_thousand_to_one(x.customer_incentives_incremental_cost)),
customer_incentives_incremental_life_cycle_cost=lambda x: (
_thousand_to_one(x.customer_incentives_incremental_life_cycle_cost)),
customer_other_costs_incremental_life_cycle_cost=lambda x: (
_thousand_to_one(x.customer_other_costs_incremental_life_cycle_cost)),
other_costs_incremental_cost=lambda x: (
_thousand_to_one(x.other_costs_incremental_cost)),
).drop(['website'], axis=1)
)
tfr_dfs["energy_efficiency_eia861"] = transformed_ee
return tfr_dfs
def green_pricing(tfr_dfs):
"""
Transform the EIA 861 Green Pricing table.
Transformations include:
* Tidy subset of the data by customer class.
* Convert 1000s of dollars into dollars.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
"utility_id_eia",
"state",
"report_date",
]
raw_gp = tfr_dfs["green_pricing_eia861"].copy()
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Green Pricing table.")
tidy_gp, idx_cols = _tidy_class_dfs(
raw_gp,
df_name='Green Pricing',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
)
_check_for_dupes(tidy_gp, 'Green Pricing', idx_cols)
###########################################################################
# Transform Values:
# * Turn 1000s of dollars back into dollars
###########################################################################
logger.info(
"Performing value transformations on EIA 861 Green Pricing table.")
transformed_gp = (
tidy_gp.assign(
green_pricing_revenue=lambda x: (
_thousand_to_one(x.green_pricing_revenue)),
rec_revenue=lambda x: (
_thousand_to_one(x.rec_revenue))
)
)
tfr_dfs["green_pricing_eia861"] = transformed_gp
return tfr_dfs
def mergers(tfr_dfs):
"""
Transform the EIA 861 Mergers table.
Transformations include:
* Map full spelling onto code values.
* Retain preceeding zeros in zipcode field.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
raw_mergers = tfr_dfs["mergers_eia861"].copy()
# No data tidying required
###########################################################################
# Transform Values:
# * Turn ownership column from single-letter code to full ownership category.
# * Retain preceeding zeros in zip codes
###########################################################################
transformed_mergers = (
raw_mergers.assign(
entity_type=lambda x: x.entity_type.map(pc.ENTITY_TYPE_DICT),
)
)
# No duplicates to speak of but take measures to check just in case
_check_for_dupes(transformed_mergers, 'Mergers', [
"utility_id_eia", "state", "report_date"])
tfr_dfs["mergers_eia861"] = transformed_mergers
return tfr_dfs
def net_metering(tfr_dfs):
"""
Transform the EIA 861 Net Metering table.
Transformations include:
* Remove rows with utility ids 99999.
* Tidy subset of the data by customer class.
* Tidy subset of the data by tech class.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
'utility_id_eia',
'state',
'balancing_authority_code_eia',
'report_date',
]
misc_cols = [
'pv_current_flow_type'
]
# Pre-tidy clean specific to net_metering table
raw_nm = (
tfr_dfs["net_metering_eia861"].copy()
.query("utility_id_eia not in [99999]")
)
# Separate customer class data from misc data (in this case just one col: current flow)
# Could easily add this to tech_class if desired.
raw_nm_customer_fuel_class = (
raw_nm.drop(misc_cols, axis=1).copy())
raw_nm_misc = raw_nm[idx_cols + misc_cols].copy()
# Check for duplicates before idx cols get changed
_check_for_dupes(
raw_nm_misc, 'Net Metering Current Flow Type PV', idx_cols)
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Net Metering table.")
# wide-to-tall by customer class (must be done before wide-to-tall by fuel class)
tidy_nm_customer_class, idx_cols = _tidy_class_dfs(
raw_nm_customer_fuel_class,
df_name='Net Metering',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
)
# wide-to-tall by fuel class
tidy_nm_customer_fuel_class, idx_cols = _tidy_class_dfs(
tidy_nm_customer_class,
df_name='Net Metering',
idx_cols=idx_cols,
class_list=pc.TECH_CLASSES,
class_type='tech_class',
keep_totals=True,
)
# No duplicates to speak of but take measures to check just in case
_check_for_dupes(
tidy_nm_customer_fuel_class, 'Net Metering Customer & Fuel Class', idx_cols)
# No transformation needed
# Drop original net_metering_eia861 table from tfr_dfs
del tfr_dfs['net_metering_eia861']
tfr_dfs["net_metering_customer_fuel_class_eia861"] = tidy_nm_customer_fuel_class
tfr_dfs["net_metering_misc_eia861"] = raw_nm_misc
return tfr_dfs
def non_net_metering(tfr_dfs):
"""
Transform the EIA 861 Non-Net Metering table.
Transformations include:
* Remove rows with utility ids 99999.
* Drop duplicate rows.
* Tidy subset of the data by customer class.
* Tidy subset of the data by tech class.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
'utility_id_eia',
'state',
'balancing_authority_code_eia',
'report_date',
]
misc_cols = [
'backup_capacity_mw',
'generators_number',
'pv_current_flow_type',
'utility_owned_capacity_mw'
]
# Pre-tidy clean specific to non_net_metering table
raw_nnm = (
tfr_dfs["non_net_metering_eia861"].copy()
.query("utility_id_eia not in '99999'")
)
# there are ~80 fully duplicate records in the 2018 table. We need to
# remove those duplicates
og_len = len(raw_nnm)
raw_nnm = raw_nnm.drop_duplicates(keep='first')
diff_len = og_len - len(raw_nnm)
if diff_len > 100:
raise ValueError(
f"""Too many duplicate dropped records in raw non-net metering
table: {diff_len}""")
# Separate customer class data from misc data
raw_nnm_customer_fuel_class = raw_nnm.drop(misc_cols, axis=1).copy()
raw_nnm_misc = (raw_nnm[idx_cols + misc_cols]).copy()
# Check for duplicates before idx cols get changed
_check_for_dupes(
raw_nnm_misc, 'Non Net Metering Misc.', idx_cols)
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Non Net Metering table.")
# wide-to-tall by customer class (must be done before wide-to-tall by fuel class)
tidy_nnm_customer_class, idx_cols = _tidy_class_dfs(
raw_nnm_customer_fuel_class,
df_name='Non Net Metering',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
keep_totals=True
)
# wide-to-tall by fuel class
tidy_nnm_customer_fuel_class, idx_cols = _tidy_class_dfs(
tidy_nnm_customer_class,
df_name='Non Net Metering',
idx_cols=idx_cols,
class_list=pc.TECH_CLASSES,
class_type='tech_class',
keep_totals=True
)
# No duplicates to speak of (deleted 2018 duplicates above) but take measures to
# check just in case
_check_for_dupes(
tidy_nnm_customer_fuel_class,
'Non Net Metering Customer & Fuel Class', idx_cols)
# Delete total_capacity_mw col for redundancy (must delete x not y)
tidy_nnm_customer_fuel_class = (
tidy_nnm_customer_fuel_class.drop(columns='capacity_mw_x')
.rename(columns={'capacity_mw_y': 'capacity_mw'})
)
# No transformation needed
# Drop original net_metering_eia861 table from tfr_dfs
del tfr_dfs['non_net_metering_eia861']
tfr_dfs["non_net_metering_customer_fuel_class_eia861"] = (
tidy_nnm_customer_fuel_class)
tfr_dfs["non_net_metering_misc_eia861"] = raw_nnm_misc
return tfr_dfs
def operational_data(tfr_dfs):
"""
Transform the EIA 861 Operational Data table.
Transformations include:
* Remove rows with utility ids 88888.
* Remove rows with NA utility id.
* Clean up NERC codes and ensure one per row.
* Convert data_observed field I/O into boolean.
* Tidy subset of the data by revenue class.
* Convert 1000s of dollars into dollars.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
'utility_id_eia',
'state',
'nerc_region',
'report_date',
]
# Pre-tidy clean specific to operational data table
raw_od = tfr_dfs["operational_data_eia861"].copy()
raw_od = ( # removed (raw_od['utility_id_eia'].notnull()) for RMI
raw_od[(raw_od['utility_id_eia'] != 88888) &
(raw_od['utility_id_eia'].notnull())]
)
###########################################################################
# Transform Data Round 1:
# * Clean up reported NERC regions:
# * Fix puncuation/case
# * Replace na with 'UNK'
# * Make sure NERC regions are a verified NERC region
# * Add underscore between double entires (SPP_ERCOT)
# * Re-code data_observed to boolean:
# * O="observed" => True
# * I="imputed" => False
###########################################################################
transformed_od = (
_clean_nerc(raw_od, idx_cols)
.assign(
data_observed=lambda x: x.data_observed.replace({
"O": True,
"I": False}))
)
# Split data into 2 tables:
# * Revenue (wide-to-tall)
# * Misc. (other)
revenue_cols = [col for col in transformed_od if 'revenue' in col]
transformed_od_misc = (transformed_od.drop(revenue_cols, axis=1))
transformed_od_rev = (transformed_od[idx_cols + revenue_cols].copy())
# Wide-to-tall revenue columns
tidy_od_rev, idx_cols = (
_tidy_class_dfs(
transformed_od_rev,
df_name='Operational Data Revenue',
idx_cols=idx_cols,
class_list=pc.REVENUE_CLASSES,
class_type='revenue_class'
)
)
###########################################################################
# Transform Data Round 2:
# * Turn 1000s of dollars back into dollars
###########################################################################
# Transform revenue 1000s into dollars
transformed_od_rev = (
tidy_od_rev.assign(revenue=lambda x: (
_thousand_to_one(x.revenue))
)
)
# Drop original operational_data_eia861 table from tfr_dfs
del tfr_dfs['operational_data_eia861']
tfr_dfs["operational_data_revenue_eia861"] = transformed_od_rev
tfr_dfs["operational_data_misc_eia861"] = transformed_od_misc
return tfr_dfs
def reliability(tfr_dfs):
"""
Transform the EIA 861 Reliability table.
Transformations include:
* Tidy subset of the data by reliability standard.
* Convert Y/N columns to booleans.
* Map full spelling onto code values.
* Drop duplicate rows.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
'utility_id_eia',
'state',
'report_date'
]
# Pre-tidy clean specific to operational data table
raw_r = tfr_dfs["reliability_eia861"].copy()
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Reliability table.")
# wide-to-tall by standards
tidy_r, idx_cols = _tidy_class_dfs(
df=raw_r,
df_name='Reliability',
idx_cols=idx_cols,
class_list=pc.RELIABILITY_STANDARDS,
class_type='standard',
keep_totals=False,
)
###########################################################################
# Transform Data:
# * Re-code outages_recorded_automatically and inactive_accounts_included to
# boolean:
# * Y/y="Yes" => True
# * N/n="No" => False
# * Expand momentary_interruption_definition:
# * 'L' => 'Less than one minute'
# * 'F' => 'Less than or equal to five minutes'
# * 'O' => 'Other'
###########################################################################
transformed_r = (
tidy_r.assign(
outages_recorded_automatically=lambda x: (
_make_yn_bool(x.outages_recorded_automatically.str.upper())),
inactive_accounts_included=lambda x: (
_make_yn_bool(x.inactive_accounts_included)),
momentary_interruption_definition=lambda x: (
x.momentary_interruption_definition.map(
pc.MOMENTARY_INTERRUPTION_DEF))
)
)
# Drop duplicate entries for utilities 13027, 3408 and 9697
transformed_r = _drop_dupes(
df=transformed_r,
df_name='Reliability',
subset=idx_cols
)
tfr_dfs["reliability_eia861"] = transformed_r
return tfr_dfs
def utility_data(tfr_dfs):
"""
Transform the EIA 861 Utility Data table.
Transformations include:
* Remove rows with utility ids 88888.
* Clean up NERC codes and ensure one per row.
* Tidy subset of the data by NERC region.
* Tidy subset of the data by RTO.
* Convert Y/N columns to booleans.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
idx_cols = [
'utility_id_eia',
'state',
'report_date',
'nerc_region'
]
# Pre-tidy clean specific to operational data table
raw_ud = (
tfr_dfs["utility_data_eia861"].copy()
.query("utility_id_eia not in [88888]")
)
##############################################################################
# Transform Data Round 1 (must be done to avoid issues with nerc_region col in
# _tidy_class_dfs())
# * Clean NERC region col
##############################################################################
transformed_ud = _clean_nerc(raw_ud, idx_cols)
# Establish columns that are nerc regions vs. rtos
nerc_cols = [col for col in raw_ud if 'nerc_region_operation' in col]
rto_cols = [col for col in raw_ud if 'rto_operation' in col]
# Make separate tables for nerc vs. rto vs. misc data
raw_ud_nerc = transformed_ud[idx_cols + nerc_cols].copy()
raw_ud_rto = transformed_ud[idx_cols + rto_cols].copy()
raw_ud_misc = transformed_ud.drop(nerc_cols + rto_cols, axis=1).copy()
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Utility Data tables.")
tidy_ud_nerc, nerc_idx_cols = _tidy_class_dfs(
df=raw_ud_nerc,
df_name='Utility Data NERC Regions',
idx_cols=idx_cols,
class_list=[x.lower() for x in pc.RECOGNIZED_NERC_REGIONS],
class_type='nerc_regions_of_operation',
)
tidy_ud_rto, rto_idx_cols = _tidy_class_dfs(
df=raw_ud_rto,
df_name='Utility Data RTOs',
idx_cols=idx_cols,
class_list=pc.RTO_CLASSES,
class_type='rtos_of_operation'
)
###########################################################################
# Transform Data Round 2:
# * Re-code operating_in_XX to boolean:
# * Y = "Yes" => True
# * N = "No" => False
# * Blank => False
# * Make nerc_regions uppercase
###########################################################################
# Transform NERC region table
transformed_ud_nerc = (
tidy_ud_nerc.assign(
nerc_region_operation=lambda x: (
_make_yn_bool(x.nerc_region_operation.fillna(False))),
nerc_regions_of_operation=lambda x: (
x.nerc_regions_of_operation.str.upper()
)
)
)
# Only keep true values and drop bool col
transformed_ud_nerc = (
transformed_ud_nerc[transformed_ud_nerc.nerc_region_operation]
.drop(['nerc_region_operation'], axis=1)
)
# Transform RTO table
transformed_ud_rto = (
tidy_ud_rto.assign(
rto_operation=lambda x: (
x.rto_operation
.fillna(False)
.replace({"N": False, "Y": True})),
rtos_of_operation=lambda x: (
x.rtos_of_operation.str.upper()
)
)
)
# Only keep true values and drop bool col
transformed_ud_rto = (
transformed_ud_rto[transformed_ud_rto.rto_operation]
.drop(['rto_operation'], axis=1)
)
# Transform MISC table by first separating bool cols from non bool cols
# and then making them into boolean values.
transformed_ud_misc_bool = (
raw_ud_misc
.drop(['entity_type', 'utility_name_eia'], axis=1)
.set_index(idx_cols)
.fillna(False)
.replace({"N": False, "Y": True})
)
# Merge misc. bool cols back together with misc. non bool cols
transformed_ud_misc = (
pd.merge(
raw_ud_misc[idx_cols + ['entity_type', 'utility_name_eia']],
transformed_ud_misc_bool,
on=idx_cols,
how='outer'
)
)
# Drop original operational_data_eia861 table from tfr_dfs
del tfr_dfs['utility_data_eia861']
tfr_dfs["utility_data_nerc_eia861"] = transformed_ud_nerc
tfr_dfs["utility_data_rto_eia861"] = transformed_ud_rto
tfr_dfs["utility_data_misc_eia861"] = transformed_ud_misc
return tfr_dfs
##############################################################################
# Coordinating Transform Function
##############################################################################
def transform(raw_dfs, eia861_tables=pc.pudl_tables["eia861"]):
"""
Transform EIA 861 DataFrames.
Args:
raw_dfs (dict): a dictionary of tab names (keys) and DataFrames (values). This
can be generated by pudl.
eia861_tables (tuple): A tuple containing the names of the EIA 861 tables that
can be pulled into PUDL.
Returns:
dict: A dictionary of DataFrame objects in which pages from EIA 861 form (keys)
corresponds to a normalized DataFrame of values from that page (values).
"""
# these are the tables that we have transform functions for...
tfr_funcs = {
"balancing_authority_eia861": balancing_authority,
"service_territory_eia861": service_territory,
"sales_eia861": sales,
"advanced_metering_infrastructure_eia861": advanced_metering_infrastructure,
"demand_response_eia861": demand_response,
"demand_side_management_eia861": demand_side_management,
"distributed_generation_eia861": distributed_generation,
"distribution_systems_eia861": distribution_systems,
"dynamic_pricing_eia861": dynamic_pricing,
"energy_efficiency_eia861": energy_efficiency,
"green_pricing_eia861": green_pricing,
"mergers_eia861": mergers,
"net_metering_eia861": net_metering,
"non_net_metering_eia861": non_net_metering,
"operational_data_eia861": operational_data,
"reliability_eia861": reliability,
"utility_data_eia861": utility_data,
}
# Dictionary for transformed dataframes and pre-transformed dataframes.
# Pre-transformed dataframes may be split into two or more output dataframes.
tfr_dfs = {}
if not raw_dfs:
logger.info(
"No raw EIA 861 dataframes found. Not transforming EIA 861.")
return tfr_dfs
# for each of the tables, run the respective transform funtction
for table in eia861_tables:
if table not in tfr_funcs.keys():
raise ValueError(f"Unrecognized EIA 861 table: {table}")
logger.info(f"Transforming raw EIA 861 DataFrames for {table} "
f"concatenated across all years.")
tfr_dfs[table] = _early_transform(raw_dfs[table])
tfr_dfs = tfr_funcs[table](tfr_dfs)
# This is more like harvesting stuff, and should probably be relocated:
tfr_dfs = balancing_authority_assn(tfr_dfs)
tfr_dfs = utility_assn(tfr_dfs)
tfr_dfs = normalize_balancing_authority(tfr_dfs)
tfr_dfs = pudl.helpers.convert_dfs_dict_dtypes(tfr_dfs, 'eia')
return tfr_dfs
| 37.564071 | 131 | 0.60744 |
9561a03377388a344ac779b6644678bd5602e472 | 5,714 | py | Python | train.py | yoyoismee/Sarkhans-Constitution-Generator | d7511521c9f73b2fd3725f8e2ffa29761dfa0137 | [
"WTFPL",
"MIT"
] | 1 | 2018-04-05T06:01:40.000Z | 2018-04-05T06:01:40.000Z | train.py | yoyoismee/Sarkhans-Constitution-Generator | d7511521c9f73b2fd3725f8e2ffa29761dfa0137 | [
"WTFPL",
"MIT"
] | null | null | null | train.py | yoyoismee/Sarkhans-Constitution-Generator | d7511521c9f73b2fd3725f8e2ffa29761dfa0137 | [
"WTFPL",
"MIT"
] | null | null | null |
import numpy as np
import argparse
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('file',
type=str,
help='input file')
data_dir = parser.parse_args().file
with open(data_dir, "r") as f:
text = f.read()
scenes = text.split('\n\n')
sentence_count_scene = [scene.count('\n') for scene in scenes]
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
word_count_sentence = [len(sentence.split()) for sentence in sentences]
def create_lookup_tables(text):
w2i = {}
i2w = {}
for i , word in enumerate(set(text)):
w2i[word] = i
i2w[i] = word
return w2i, i2w
###
token_dict = {".":"|dot|",",":"|comma|","\"":"|qoute|",";":"|semicolon|","!":"|exclamation|","?":"|question|","(":"|patenthesesL|",")":"|patenthesesR|","--":"|dash|","\n":"|return|",}
for key, token in token_dict.items():
text = text.replace(key, ' {} '.format(token))
text = text.lower()
text = text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(text)
int_text = [vocab_to_int[word] for word in text]
pickle.dump((int_text, vocab_to_int, int_to_vocab, token_dict), open('data.pj', 'wb'))
###
int_text, vocab_to_int, int_to_vocab, token_dict = pickle.load(open('data.pj', mode='rb'))
import warnings
import tensorflow as tf
def get_inputs():
Input = tf.placeholder(dtype=tf.int32,shape=[None,None],name="input")
Targets = tf.placeholder(dtype=tf.int32,shape=[None,None],name="targets")
LearningRate = tf.placeholder(dtype=tf.float32,name="learningrate")
return Input, Targets, LearningRate
def get_init_cell(batch_size, rnn_size):
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm])
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(input=initial_state,name='initial_state')
return cell, initial_state
def get_embed(input_data, vocab_size, embed_dim):
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embeded = tf.nn.embedding_lookup(embedding, input_data)
return embeded
def build_rnn(cell, inputs):
outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=inputs,dtype=tf.float32)
final_state = tf.identity(input=final_state,name="final_state")
# TODO: Implement Function
return outputs, final_state
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
embeded = get_embed(embed_dim = embed_dim, input_data = input_data,vocab_size = vocab_size)
outputs, final_state = build_rnn(cell=cell, inputs= embeded)
logits = tf.contrib.layers.fully_connected(inputs=outputs,num_outputs= vocab_size, activation_fn=None)
print(final_state.shape)
return logits, final_state
def get_batches(int_text, batch_size, seq_length):
n_batch = len(int_text)//(batch_size*seq_length)
out = np.ndarray((n_batch, 2, batch_size, seq_length))
maxtar = n_batch * batch_size * seq_length
for i in range(n_batch):
for j in range(batch_size):
out[i][0][j]=(int_text[(j*(n_batch*seq_length)+i*(seq_length)):(j*(n_batch*seq_length)+(i+1)*(seq_length))])
out[i][1][j]=(int_text[(j*(n_batch*seq_length)+i*(seq_length)+1):(j*(n_batch*seq_length)+(i+1)*(seq_length)+1)])
if (j*(n_batch*seq_length)+(i+1)*(seq_length)+1) > maxtar:
out[i][1][j][-1] = int_text[0]
return out
# Number of Epochs
num_epochs = 99
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 100
# Sequence Length
seq_length = 15
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 100
save_dir = './tmp'
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
pickle.dump((seq_length, save_dir), open('params.pj', 'wb')) | 32.101124 | 183 | 0.666258 |
cce98f819f43e65e8890d86e943f7876d8f7de17 | 1,815 | py | Python | demos/ant.core/10-scan.py | asantacreu/python-ant | 894e9281d0a5811b8acec1ad9ac2efbc9fb4b0f3 | [
"MIT"
] | 3 | 2021-05-09T18:51:02.000Z | 2022-03-23T16:53:08.000Z | demos/ant.core/10-scan.py | asantacreu/python-ant | 894e9281d0a5811b8acec1ad9ac2efbc9fb4b0f3 | [
"MIT"
] | null | null | null | demos/ant.core/10-scan.py | asantacreu/python-ant | 894e9281d0a5811b8acec1ad9ac2efbc9fb4b0f3 | [
"MIT"
] | 1 | 2018-09-12T15:19:41.000Z | 2018-09-12T15:19:41.000Z | """
Extending on demo-04, re-implements the event callbackso we can parse the results of the scan.
"""
import sys
import time
from ant.core import driver
from ant.core import node
from ant.core import event
from ant.core import message
from ant.core.constants import *
from config import *
NETKEY = '\xB9\xA5\x21\xFB\xBD\x72\xC3\x45'
# A run-the-mill event listener
class ScanListener(event.EventCallback):
def process(self, msg):
if isinstance(msg, message.ChannelBroadcastDataMessage):
if len(msg.payload) > 9:
flagByte = ord(msg.payload[9])
if flagByte == 0x80:
deviceNumberLSB = ord(msg.payload[10])
deviceNumberMSB = ord(msg.payload[11])
deviceNumber = "{}".format(deviceNumberLSB + (deviceNumberMSB<<8))
deviceType = "{}".format(ord(msg.payload[12]))
print 'New Device Found: %s of type %s' % (deviceNumber,deviceType)
# Initialize
stick = driver.USB1Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Setup channel
key = node.NetworkKey('N:ANT+', NETKEY)
antnode.setNetworkKey(0, key)
channel = antnode.getFreeChannel()
channel.name = 'C:HRM'
channel.assign('N:ANT+', CHANNEL_TYPE_TWOWAY_RECEIVE,0x01)
channel.setID(120, 0, 0)
channel.enableExtendedMessages(0x01)
channel.setSearchTimeout(TIMEOUT_NEVER)
channel.setPeriod(8070)
channel.setFrequency(57)
channel.open()
# Setup callback
# Note: We could also register an event listener for non-channel events by
# calling registerEventListener() on antnode rather than channel.
channel.registerCallback(ScanListener())
# Wait
print "Listening for HR monitor events (120 seconds)..."
time.sleep(120)
# Shutdown
channel.close()
channel.unassign()
antnode.stop()
| 27.923077 | 94 | 0.69697 |
a9d1eaa34095398289eef8cb0ff46c746d022ae1 | 9,462 | py | Python | research/lstm_object_detection/inputs/seq_dataset_builder_test.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | null | null | null | research/lstm_object_detection/inputs/seq_dataset_builder_test.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | null | null | null | research/lstm_object_detection/inputs/seq_dataset_builder_test.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset_builder."""
import os
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from lstm_object_detection.inputs import seq_dataset_builder
from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2
from object_detection.builders import preprocessor_builder
from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import preprocessor_pb2
class DatasetBuilderTest(tf.test.TestCase):
def _create_tf_record(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(16, 16, 3)).astype(np.uint8)
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
sequence_example = example_pb2.SequenceExample(
context=feature_pb2.Features(
feature={
'image/format':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=['jpeg'.encode('utf-8')])),
'image/height':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[16])),
'image/width':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[16])),
}),
feature_lists=feature_pb2.FeatureLists(
feature_list={
'image/encoded':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[encoded_jpeg])),
]),
'image/object/bbox/xmin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'image/object/bbox/xmax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'image/object/bbox/ymin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'image/object/bbox/ymax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'image/object/class/label':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[2]))
]),
}))
writer.write(sequence_example.SerializeToString())
writer.close()
return path
def _get_model_configs_from_proto(self):
"""Creates a model text proto for testing.
Returns:
A dictionary of model configs.
"""
model_text_proto = """
[lstm_object_detection.protos.lstm_model] {
train_unroll_length: 4
eval_unroll_length: 4
}
model {
ssd {
feature_extractor {
type: 'lstm_mobilenet_v1_fpn'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
negative_class_weight: 2.0
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 32
width: 32
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}
}"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
text_format.Merge(model_text_proto, pipeline_config)
configs = {}
configs['model'] = pipeline_config.model
configs['lstm_model'] = pipeline_config.Extensions[
internal_pipeline_pb2.lstm_model]
return configs
def _get_data_augmentation_preprocessor_proto(self):
preprocessor_text_proto = """
random_horizontal_flip {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
return preprocessor_proto
def _create_training_dict(self, tensor_dict):
image_dict = {}
all_dict = {}
all_dict['batch'] = tensor_dict.pop('batch')
for i, _ in enumerate(tensor_dict[fields.InputDataFields.image]):
for key, val in tensor_dict.items():
image_dict[key] = val[i]
image_dict[fields.InputDataFields.image] = tf.to_float(
tf.expand_dims(image_dict[fields.InputDataFields.image], 0))
suffix = str(i)
for key, val in image_dict.items():
all_dict[key + suffix] = val
return all_dict
def _get_input_proto(self, input_reader):
return """
external_input_reader {
[lstm_object_detection.protos.GoogleInputReader.google_input_reader] {
%s: {
input_path: '{0}'
data_type: TF_SEQUENCE_EXAMPLE
video_length: 4
}
}
}
""" % input_reader
def test_video_input_reader(self):
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(
self._get_input_proto('tf_record_video_input_reader'),
input_reader_proto)
configs = self._get_model_configs_from_proto()
tensor_dict = seq_dataset_builder.build(
input_reader_proto,
configs['model'],
configs['lstm_model'],
unroll_length=1)
all_dict = self._create_training_dict(tensor_dict)
self.assertEqual((1, 32, 32, 3), all_dict['image0'].shape)
self.assertEqual(4, all_dict['groundtruth_boxes0'].shape[1])
def test_build_with_data_augmentation(self):
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(
self._get_input_proto('tf_record_video_input_reader'),
input_reader_proto)
configs = self._get_model_configs_from_proto()
data_augmentation_options = [
preprocessor_builder.build(
self._get_data_augmentation_preprocessor_proto())
]
tensor_dict = seq_dataset_builder.build(
input_reader_proto,
configs['model'],
configs['lstm_model'],
unroll_length=1,
data_augmentation_options=data_augmentation_options)
all_dict = self._create_training_dict(tensor_dict)
self.assertEqual((1, 32, 32, 3), all_dict['image0'].shape)
self.assertEqual(4, all_dict['groundtruth_boxes0'].shape[1])
def test_raises_error_without_input_paths(self):
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
configs = self._get_model_configs_from_proto()
with self.assertRaises(ValueError):
_ = seq_dataset_builder.build(
input_reader_proto,
configs['model'],
configs['lstm_model'],
unroll_length=1)
if __name__ == '__main__':
tf.test.main()
| 33.434629 | 81 | 0.577679 |
d151ff6390424787fd5aeb7815b3f96f08a97468 | 3,899 | py | Python | ubimaior/commands.py | forthelols/ubimaior | 30ee55673022942e1cdf5c3a005696122ddbf231 | [
"Apache-2.0",
"MIT"
] | null | null | null | ubimaior/commands.py | forthelols/ubimaior | 30ee55673022942e1cdf5c3a005696122ddbf231 | [
"Apache-2.0",
"MIT"
] | 316 | 2018-04-22T09:27:23.000Z | 2022-02-28T00:54:47.000Z | ubimaior/commands.py | forthelols/ubimaior | 30ee55673022942e1cdf5c3a005696122ddbf231 | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Command line tools to view, query and manipulate hierarchical configurations."""
import collections
import itertools
import click
import ubimaior
import ubimaior.configurations
import ubimaior.formats
# TODO: Add options to override 'schema' and 'scopes'
@click.group()
@click.option(
"--configuration",
show_default=True,
type=click.Path(exists=True, dir_okay=False),
help="Configuration file for ubimaior.",
)
@click.option(
"--format",
"fmt",
type=click.Choice(ubimaior.formats.FORMATTERS),
help="Format of the configuration files.",
)
@click.pass_context
def main(ctx, configuration, fmt):
"""Manages hierarchical configuration files"""
# Set defaults from a configuration file (needed). If the file is not passed from
# the command line, the default is to iteratively search for .ubimaior.yaml starting from the
# present working directory and proceeding up to root.
if not configuration:
try:
configuration = ubimaior.configurations.search_file_in_path(".ubimaior.yaml")
except IOError as exc:
raise click.ClickException("ubimaior configuration " + str(exc))
ctx.ensure_object(dict)
ctx.obj["configuration"] = configuration
ubimaior.configurations.setup_from_file(configuration)
if fmt:
ctx.obj["format"] = fmt
ubimaior.configurations.set_default_format(fmt)
@main.command()
@click.option(
"--validate",
type=click.BOOL,
default=False,
show_default=True,
is_flag=True,
help="Validates the configuration against its schema.",
)
@click.option(
"--blame",
type=click.BOOL,
default=False,
show_default=True,
is_flag=True,
help="Show provenance of each attribute or value in the configuration.",
)
@click.argument("name")
def show(validate, blame, name):
"""Display the merged configuration files."""
cfg = ubimaior.load(name)
settings = ubimaior.configurations.retrieve_settings()
if validate:
schema = settings["schema"]
if not schema:
raise click.ClickException("validation schema not found")
ubimaior.configurations.validate(cfg, schema)
formatter = ubimaior.formats.FORMATTERS[settings.format]
styles = None
if settings.format in {"yaml", "json"}:
styles = collections.defaultdict(lambda: lambda x: click.style(x, bold=True))
attribute = ubimaior.formats.TokenTypes.ATTRIBUTE
styles[attribute] = lambda x: click.style(x, fg="yellow", bold=True)
cfg_lines, provenance = formatter.pprint(cfg, formatters=styles)
if blame:
scopes = format_provenance(provenance)
formatted_cfg_str = [p + line for p, line in zip(scopes, cfg_lines)]
formatted_cfg_str = "\n".join(formatted_cfg_str)
else:
formatted_cfg_str = "\n".join(x for x in cfg_lines)
click.echo_via_pager(formatted_cfg_str)
def format_provenance(provenance):
"""Format the provenance in a form that is ready to be displayed.
Args:
provenance (list): list of scopes
Returns:
list of formatted string (one for each provenance item)
"""
# Construct a color map for the scopes
colors = ["red", "green", "blue", "magenta", "cyan", "white", "black"]
items = sorted(set(itertools.chain.from_iterable(provenance)))
color_map = dict(zip(items, colors))
# Style all the scopes according to the colormap
scopes = [
click.style("[[", bold=True)
+ ",".join(click.style(x, fg=color_map[x]) for x in scope)
+ click.style("]]", bold=True)
+ " "
for scope in provenance
]
raw_lengths = [len(",".join(x)) for x in provenance]
max_width = max(raw_lengths)
spaces_to_add = [max_width - x for x in raw_lengths]
return [val + " " * spacer + "|" for val, spacer in zip(scopes, spaces_to_add)]
| 31.699187 | 97 | 0.674532 |
9ebf9455d47452b610940fec6a73a154a7760b84 | 9,970 | py | Python | Map_Match.py | NeowithU/Trajectory | d575aa9182cb1c7c155e4a0ec067a37bbb617ffd | [
"MIT"
] | null | null | null | Map_Match.py | NeowithU/Trajectory | d575aa9182cb1c7c155e4a0ec067a37bbb617ffd | [
"MIT"
] | null | null | null | Map_Match.py | NeowithU/Trajectory | d575aa9182cb1c7c155e4a0ec067a37bbb617ffd | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
__author__ = 'Neo'
import os
import math
import datetime
import utilities
import unicodecsv
INER_DATA_DIR = "Intermediate"
GRIDE_FILE = 'grids_dict'
TEST_FOLDER = 'B61962'
LONGITUDE_POSITION_IN_CSV = 2
LATITUDE_POSITION_IN_CSV = 3
RADIUS = 6371000
MAXDIST = 99999999
STEP = 0.02
# Map_Match 只有一个match方法,输入为切割好的轨迹所在文件夹的名称
class Map_Match:
__min_lat = 0
__max_lat = 0
__min_lon = 0
__max_lon = 0
__num_lat = 0
__num_lon = 0
__num_grids = 0
__grids = {}
# 输入是文件夹名称
# 文件夹内每个文件是剪好的一条轨迹
# 循环打开每个文件,对每个文件做轨迹匹配
# 匹配完在每个csv文件每行末尾增加字段:路段名称,道路类型,匹配距离
# 路段名称的格式是 分块id_道路id_分块内部序号,其中道路id与osm提供的id是一致的.
def match(self, folder_name):
os.chdir('Raw/' + folder_name)
for file in os.listdir('.'):
self.__match_per_freight(file, self.__get_output_file_name(file))
os.chdir('..')
os.chdir('..')
def __init__(self):
s_time = datetime.datetime.now()
self.__get_range_of_map()
e_time = datetime.datetime.now()
cost_time = e_time - s_time
log = "get_range_of_map cost %s\n" % str(cost_time)
utilities.write_log('matching.log', log)
s_time = datetime.datetime.now()
self.__get_grids()
e_time = datetime.datetime.now()
cost_time = e_time - s_time
log = "get_grids cost %s\n" % str(cost_time)
utilities.write_log('matching.log', log)
def __get_output_file_name(self, input_file_name):
name = input_file_name.split('.')
name[0] += '+'
return name[0] + '.' + name[1]
def __match_per_freight(self, input_file, output_file):
rows_list = []
with open(input_file) as input_csv:
reader = unicodecsv.reader(input_csv)
for row in reader:
point = self.__construct_point(float(row[LATITUDE_POSITION_IN_CSV]),
float(row[LONGITUDE_POSITION_IN_CSV]))
matched_segment, segment_type, distance = self.__match_point_naive(point)
row.extend([matched_segment, segment_type, distance])
rows_list.append(row)
if not os.path.exists(output_file):
f = open(output_file, 'w')
f.close()
with open(output_file, 'a') as output_csv:
writer = unicodecsv.writer(output_csv)
writer.writerows(rows_list)
def __construct_point(self, x, y):
point = dict()
point["x"] = x
point["y"] = y
return point
def __match_point_naive(self, point):
point_id = self.__find_grid_id(point["x"], point["y"])
neighbor_grid = self.__find_neighbor(point_id)
min_dist = MAXDIST
min_route = None
min_type = ''
for grid_id in neighbor_grid:
routes = self.__grids[str(grid_id)]
for route in routes:
dist = self.__cal_point_route(point, route)
if dist < min_dist:
min_route = route.keys()[0]
min_type = route.values()[0]['highway']
min_dist = dist
return min_route, min_type, min_dist
def __get_grids(self):
self.__grids = utilities.read_json(GRIDE_FILE, INER_DATA_DIR)
def __find_grid_id(self, x, y):
loc_x = int((x - self.__min_lat) / STEP)
if loc_x == self.__num_lat:
loc_x -= 1
loc_y = int((y - self.__min_lon) / STEP)
if loc_y == self.__num_lon:
loc_y -= 1
loc = loc_x * self.__num_lon + loc_y
return loc
def __get_range_of_map(self):
range_of_map = utilities.read_json('map_info', INER_DATA_DIR)
self.__min_lat, self.__max_lat, self.__min_lon, \
self.__max_lon, self.__num_lat, self.__num_lon, self.__num_grids = range_of_map
def __cal_probe_distance(self, s_lat, s_lon, e_lat, e_lon):
s_lat = math.radians(s_lat)
s_lon = math.radians(s_lon)
e_lat = math.radians(e_lat)
e_lon = math.radians(e_lon)
theta_lat = s_lat - e_lat
theta_long = s_lon - e_lon
first = pow(math.sin(theta_lat / 2.0), 2)
second = math.cos(s_lat) * math.cos(e_lat) * pow(math.sin(theta_long / 2.0), 2)
angle = 2 * math.asin(math.sqrt(first + second))
return math.floor(RADIUS * angle + 0.5)
def __find_neighbor(self, grid_id):
right_up = self.__num_lon - 1
left_down = self.__num_lon * (self.__num_lat - 1)
right_down = self.__num_lon * self.__num_lat - 1
up = range(1, right_up)
down = range(left_down + 1, right_down)
left = range(self.__num_lon, left_down, self.__num_lon)
right = range(self.__num_lon + right_up, right_down, self.__num_lon)
if grid_id in up:
ret_id = self.__find_neighbor_up(grid_id)
elif grid_id in down:
ret_id = self.__find_neighbor_down(grid_id)
elif grid_id in left:
ret_id = self.__find_neighbor_left(grid_id)
elif grid_id in right:
ret_id = self.__find_neighbor_right(grid_id)
elif grid_id == 0:
ret_id = self.__find_neighbor_left_up(grid_id)
elif grid_id == left_down:
ret_id = self.__find_neighbor_left_down(grid_id)
elif grid_id == right_up:
ret_id = self.__find_neighbor_right_up(grid_id)
elif grid_id == right_down:
ret_id = self.__find_neighbor_right_down(grid_id)
else:
ret_id = self.__find_neighbor_inside(grid_id)
return ret_id
def __cal_point_route(self, point, segment):
s_x = float(segment.values()[0]["snode"][0])
s_y = float(segment.values()[0]["snode"][1])
e_x = float(segment.values()[0]["enode"][0])
e_y = float(segment.values()[0]["enode"][1])
p_x, p_y = self.__get_project_point(point, s_x, s_y, e_x, e_y)
if (p_x - s_x) * (p_x - e_x) < 0 and (p_y - s_y) * (p_y - e_y) < 0:
return self.__cal_probe_distance(point["x"], point["y"], p_x, p_y)
else:
return min(self.__cal_probe_distance(point["x"], point["y"], s_x, s_y),
self.__cal_probe_distance(point["x"], point["y"], e_x, e_y))
def __get_project_point(self, point, x1, y1, x2, y2):
x0 = point["x"]
y0 = point["y"]
fenzi = (x1-x0) * (x1-x2) + (y1-y0) * (y1-y2)
fenmu = math.pow(x1-x2, 2) + math.pow(y1-y2, 2)
if fenmu == 0.0:
return x1, y1
temp = fenzi / fenmu
ret_x = x1 + temp * (x2-x1)
ret_y = y1 + temp * (y2-y1)
return ret_x, ret_y
def __find_neighbor_left_up(self, grid_id):
ret_id = list()
ret_id.append(grid_id)
ret_id.append(1)
ret_id.append(self.__num_lon)
ret_id.append(self.__num_lon + 1)
return ret_id
def __find_neighbor_right_up(self, grid_id):
ret_id = list()
ret_id.append(grid_id)
ret_id.append(grid_id - 1)
ret_id.append(grid_id + self.__num_lon)
ret_id.append(grid_id + self.__num_lon - 1)
return ret_id
def __find_neighbor_left_down(self, grid_id):
ret_id = list()
ret_id.append(grid_id)
ret_id.append(grid_id - self.__num_lon)
ret_id.append(grid_id - self.__num_lon + 1)
ret_id.append(grid_id+1)
return ret_id
def __find_neighbor_right_down(self, grid_id):
ret_id = list()
ret_id.append(grid_id)
ret_id.append(grid_id - self.__num_lon - 1)
ret_id.append(grid_id - self.__num_lon)
ret_id.append(grid_id - 1)
return ret_id
def __find_neighbor_up(self, grid_id):
ret_id = list()
ret_id.append(grid_id - 1)
ret_id.append(grid_id)
ret_id.append(grid_id+1)
ret_id.append(grid_id + self.__num_lon - 1)
ret_id.append(grid_id + self.__num_lon)
ret_id.append(grid_id + self.__num_lon + 1)
return ret_id
def __find_neighbor_down(self, grid_id):
ret_id = list()
ret_id.append(grid_id - 1)
ret_id.append(grid_id)
ret_id.append(grid_id + 1)
ret_id.append(grid_id - self.__num_lon - 1)
ret_id.append(grid_id - self.__num_lon)
ret_id.append(grid_id - self.__num_lon + 1)
return ret_id
def __find_neighbor_left(self, grid_id):
ret_id = list()
ret_id.append(grid_id - self.__num_lon)
ret_id.append(grid_id)
ret_id.append(grid_id + self.__num_lon)
ret_id.append(grid_id - self.__num_lon + 1)
ret_id.append(grid_id + 1)
ret_id.append(grid_id + self.__num_lon + 1)
return ret_id
def __find_neighbor_right(self, grid_id):
ret_id = list()
ret_id.append(grid_id - self.__num_lon)
ret_id.append(grid_id)
ret_id.append(grid_id + self.__num_lon)
ret_id.append(grid_id - self.__num_lon - 1)
ret_id.append(grid_id - 1)
ret_id.append(grid_id + self.__num_lon - 1)
return ret_id
def __find_neighbor_inside(self, grid_id):
ret_id = list()
ret_id.append(grid_id - 1)
ret_id.append(grid_id)
ret_id.append(grid_id + 1)
ret_id.append(grid_id - self.__num_lon - 1)
ret_id.append(grid_id - self.__num_lon)
ret_id.append(grid_id - self.__num_lon + 1)
ret_id.append(grid_id + self.__num_lon - 1)
ret_id.append(grid_id + self.__num_lon)
ret_id.append(grid_id + self.__num_lon + 1)
return ret_id
if __name__ == "__main__":
map_matching = Map_Match()
utilities.write_log('matching.log', '\n')
s_time = datetime.datetime.now()
map_matching.match(TEST_FOLDER)
e_time = datetime.datetime.now()
cost_time = e_time - s_time
log = "Map matching cost %s\n" % str(cost_time)
utilities.write_log('matching.log', log)
# map_matching.match('B61962')
| 35.35461 | 89 | 0.607121 |
18838dee89bfee526fe0ef285d6eb83b1e1b4e57 | 167 | py | Python | koila/interfaces/components/meminfo.py | techthiyanes/koila | b665482ff99a02bfeeceaa1323589fb89495a30c | [
"MIT"
] | null | null | null | koila/interfaces/components/meminfo.py | techthiyanes/koila | b665482ff99a02bfeeceaa1323589fb89495a30c | [
"MIT"
] | null | null | null | koila/interfaces/components/meminfo.py | techthiyanes/koila | b665482ff99a02bfeeceaa1323589fb89495a30c | [
"MIT"
] | null | null | null | from typing import Protocol
from .datatypes import DataType
from .multidim import MultiDimensional
class MemoryInfo(MultiDimensional, DataType, Protocol):
pass
| 18.555556 | 55 | 0.814371 |
0c7d8819125e36c3bef0c46f0c977fa9b3ccbea8 | 4,860 | py | Python | docs/conf.py | relaton/relaton-bib-py | 03b601d9a5482e6142b90d6b6cca5a05879452c8 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | relaton/relaton-bib-py | 03b601d9a5482e6142b90d6b6cca5a05879452c8 | [
"BSD-3-Clause"
] | 7 | 2021-08-18T04:02:25.000Z | 2022-01-28T19:22:42.000Z | docs/conf.py | relaton/relaton-bib-py | 03b601d9a5482e6142b90d6b6cca5a05879452c8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# relaton_bib documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import relaton_bib
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'relaton-bib'
copyright = "2021, Aliaksandr Babrykovich"
author = "Aliaksandr Babrykovich"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = relaton_bib.__version__
# The full version, including alpha/beta/rc tags.
release = relaton_bib.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'relaton_bibdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'relaton_bib.tex',
'relaton-bib Documentation',
'Aliaksandr Babrykovich', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'relaton_bib',
'relaton-bib Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'relaton_bib',
'relaton-bib Documentation',
author,
'relaton_bib',
'One line description of project.',
'Miscellaneous'),
]
| 29.815951 | 77 | 0.688477 |
7551bdeedb6cd27685141b815d4a2990a4c0dcbd | 5,391 | py | Python | tests/presentation/_rpc.py | WshgL/pyuavcan | f2b8d2d743f09ad4af8d62fc96d8f0b013aeb8b0 | [
"MIT"
] | null | null | null | tests/presentation/_rpc.py | WshgL/pyuavcan | f2b8d2d743f09ad4af8d62fc96d8f0b013aeb8b0 | [
"MIT"
] | null | null | null | tests/presentation/_rpc.py | WshgL/pyuavcan | f2b8d2d743f09ad4af8d62fc96d8f0b013aeb8b0 | [
"MIT"
] | null | null | null | # Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
import typing
import asyncio
import pytest
import pyuavcan
from .conftest import TransportFactory
pytestmark = pytest.mark.asyncio
async def _unittest_slow_presentation_rpc(
compiled: typing.List[pyuavcan.dsdl.GeneratedPackageInfo], transport_factory: TransportFactory
) -> None:
assert compiled
import uavcan.register
import uavcan.primitive
import uavcan.time
from pyuavcan.transport import Priority, Timestamp
asyncio.get_running_loop().slow_callback_duration = 5.0
tran_a, tran_b, _ = transport_factory(123, 42)
assert tran_a.local_node_id == 123
assert tran_b.local_node_id == 42
pres_a = pyuavcan.presentation.Presentation(tran_a)
pres_b = pyuavcan.presentation.Presentation(tran_b)
assert pres_a.transport is tran_a
server = pres_a.get_server_with_fixed_service_id(uavcan.register.Access_1_0)
assert server is pres_a.get_server_with_fixed_service_id(uavcan.register.Access_1_0)
client0 = pres_b.make_client_with_fixed_service_id(uavcan.register.Access_1_0, 123)
client1 = pres_b.make_client_with_fixed_service_id(uavcan.register.Access_1_0, 123)
client_dead = pres_b.make_client_with_fixed_service_id(uavcan.register.Access_1_0, 111)
assert client0 is not client1
assert client0._maybe_impl is not None # pylint: disable=protected-access
assert client1._maybe_impl is not None # pylint: disable=protected-access
assert client0._maybe_impl is client1._maybe_impl # pylint: disable=protected-access
assert client0._maybe_impl is not client_dead._maybe_impl # pylint: disable=protected-access
assert client0._maybe_impl.proxy_count == 2 # pylint: disable=protected-access
assert client_dead._maybe_impl is not None # pylint: disable=protected-access
assert client_dead._maybe_impl.proxy_count == 1 # pylint: disable=protected-access
with pytest.raises(TypeError):
# noinspection PyTypeChecker
pres_a.make_publisher_with_fixed_subject_id(uavcan.register.Access_1_0) # type: ignore
with pytest.raises(TypeError):
# noinspection PyTypeChecker
pres_a.make_subscriber_with_fixed_subject_id(uavcan.register.Access_1_0) # type: ignore
assert client0.response_timeout == pytest.approx(1.0)
client0.response_timeout = 0.1
assert client0.response_timeout == pytest.approx(0.1)
client0.priority = Priority.SLOW
last_request = uavcan.register.Access_1_0.Request()
last_metadata = pyuavcan.presentation.ServiceRequestMetadata(
timestamp=Timestamp(0, 0), priority=Priority(0), transfer_id=0, client_node_id=0
)
response: typing.Optional[uavcan.register.Access_1_0.Response] = None
async def server_handler(
request: uavcan.register.Access_1_0.Request, metadata: pyuavcan.presentation.ServiceRequestMetadata
) -> typing.Optional[uavcan.register.Access_1_0.Response]:
nonlocal last_metadata
print("SERVICE REQUEST:", request, metadata)
assert isinstance(request, server.dtype.Request) and isinstance(request, uavcan.register.Access_1_0.Request)
assert repr(last_request) == repr(request)
last_metadata = metadata
return response
server.serve_in_background(server_handler)
last_request = uavcan.register.Access_1_0.Request(
name=uavcan.register.Name_1_0("Hello world!"),
value=uavcan.register.Value_1_0(string=uavcan.primitive.String_1_0("Profanity will not be tolerated")),
)
result_a = await client0.call(last_request)
assert result_a is None, "Expected to fail"
assert last_metadata.client_node_id == 42
assert last_metadata.transfer_id == 0
assert last_metadata.priority == Priority.SLOW
client0.response_timeout = 2.0 # Increase the timeout back because otherwise the test fails on slow systems.
last_request = uavcan.register.Access_1_0.Request(name=uavcan.register.Name_1_0("security.uber_secure_password"))
response = uavcan.register.Access_1_0.Response(
timestamp=uavcan.time.SynchronizedTimestamp_1_0(123456789),
mutable=True,
persistent=False,
value=uavcan.register.Value_1_0(string=uavcan.primitive.String_1_0("hunter2")),
)
client0.priority = Priority.IMMEDIATE
result_b = (await client0.call(last_request))[0] # type: ignore
assert repr(result_b) == repr(response)
assert last_metadata.client_node_id == 42
assert last_metadata.transfer_id == 1
assert last_metadata.priority == Priority.IMMEDIATE
server.close()
client0.close()
client1.close()
client_dead.close()
# Double-close has no effect (no error either):
server.close()
client0.close()
client1.close()
client_dead.close()
# Allow the tasks to finish
await asyncio.sleep(0.1)
# Make sure the transport sessions have been closed properly, this is supremely important.
assert list(pres_a.transport.input_sessions) == []
assert list(pres_b.transport.input_sessions) == []
assert list(pres_a.transport.output_sessions) == []
assert list(pres_b.transport.output_sessions) == []
pres_a.close()
pres_b.close()
await asyncio.sleep(1) # Let all pending tasks finalize properly to avoid stack traces in the output.
| 42.117188 | 117 | 0.750696 |
ea1870d84423572e880e2bb427e88af888cf671a | 79,765 | py | Python | indrops.py | kenichi-shimada/indrops_langenau | 03f30bf034f22f208a97fe68eab61131ea4e50bd | [
"Xnet",
"Linux-OpenIB",
"X11"
] | 1 | 2022-01-23T19:50:02.000Z | 2022-01-23T19:50:02.000Z | indrops.py | kenichi-shimada/indrops_langenau | 03f30bf034f22f208a97fe68eab61131ea4e50bd | [
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null | indrops.py | kenichi-shimada/indrops_langenau | 03f30bf034f22f208a97fe68eab61131ea4e50bd | [
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null | import os, subprocess
import itertools
import operator
from collections import defaultdict, OrderedDict
import errno
# cPickle is a faster version of pickle that isn't installed in python3
# inserted try statement just in case
try:
import cPickle as pickle
except:
import pickle
from io import BytesIO
import numpy as np
import re
import shutil
import gzip
# product: product(A, B) returns the same as ((x,y) for x in A for y in B).
# combination: Return r length subsequences of elements from the input iterable.
from itertools import product, combinations
import time
import yaml
import pysam
import tempfile
import string
from contextlib import contextmanager
# -----------------------
#
# Helper functions
#
# -----------------------
def string_hamming_distance(str1, str2):
"""
Fast hamming distance over 2 strings known to be of same length.
In information theory, the Hamming distance between two strings of equal
length is the number of positions at which the corresponding symbols
are different.
eg "karolin" and "kathrin" is 3.
"""
return sum(itertools.imap(operator.ne, str1, str2))
def rev_comp(seq):
tbl = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'N':'N'}
return ''.join(tbl[s] for s in seq[::-1])
def to_fastq(name, seq, qual):
"""
Return string that can be written to fastQ file
"""
return '@'+name+'\n'+seq+'\n+\n'+qual+'\n'
def to_fastq_lines(bc, umi, seq, qual, read_name=''):
"""
Return string that can be written to fastQ file
"""
reformated_name = read_name.replace(':', '_')
name = '%s:%s:%s' % (bc, umi, reformated_name)
return to_fastq(name, seq, qual)
def from_fastq(handle):
while True:
name = next(handle).rstrip()[1:] #Read name
seq = next(handle).rstrip() #Read seq
next(handle) #+ line
qual = next(handle).rstrip() #Read qual
if not name or not seq or not qual:
break
yield name, seq, qual
def seq_neighborhood(seq, n_subs=1):
"""
Given a sequence, yield all sequences within n_subs substitutions of
that sequence by looping through each combination of base pairs within
each combination of positions.
"""
for positions in combinations(range(len(seq)), n_subs):
# yields all unique combinations of indices for n_subs mutations
for subs in product(*("ATGCN",)*n_subs):
# yields all combinations of possible nucleotides for strings of length
# n_subs
seq_copy = list(seq)
for p, s in zip(positions, subs):
seq_copy[p] = s
yield ''.join(seq_copy)
def build_barcode_neighborhoods(barcode_file, expect_reverse_complement=True):
"""
Given a set of barcodes, produce sequences which can unambiguously be
mapped to these barcodes, within 2 substitutions. If a sequence maps to
multiple barcodes, get rid of it. However, if a sequences maps to a bc1 with
1change and another with 2changes, keep the 1change mapping.
"""
# contains all mutants that map uniquely to a barcode
clean_mapping = dict()
# contain single or double mutants
mapping1 = defaultdict(set)
mapping2 = defaultdict(set)
#Build the full neighborhood and iterate through barcodes
with open(barcode_file, 'rU') as f:
# iterate through each barcode (rstrip cleans string of whitespace)
for line in f:
barcode = line.rstrip()
if expect_reverse_complement:
barcode = rev_comp(line.rstrip())
# each barcode obviously maps to itself uniquely
clean_mapping[barcode] = barcode
# for each possible mutated form of a given barcode, either add
# the origin barcode into the set corresponding to that mutant or
# create a new entry for a mutant not already in mapping1
# eg: barcodes CATG and CCTG would be in the set for mutant CTTG
# but only barcode CATG could generate mutant CANG
for n in seq_neighborhood(barcode, 1):
mapping1[n].add(barcode)
# same as above but with double mutants
for n in seq_neighborhood(barcode, 2):
mapping2[n].add(barcode)
# take all single-mutants and find those that could only have come from one
# specific barcode
for k, v in mapping1.items():
if k not in clean_mapping:
if len(v) == 1:
clean_mapping[k] = list(v)[0]
for k, v in mapping2.items():
if k not in clean_mapping:
if len(v) == 1:
clean_mapping[k] = list(v)[0]
del mapping1
del mapping2
return clean_mapping
def check_dir(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def print_to_stderr(msg, newline=True):
"""
Wrapper to eventually write to stderr
"""
sys.stderr.write(str(msg))
if newline:
sys.stderr.write('\n')
def worker_filter(iterable, worker_index, total_workers):
return (p for i,p in enumerate(iterable) if (i-worker_index)%total_workers==0)
class FIFO():
"""
A context manager for a named pipe.
"""
def __init__(self, filename="", suffix="", prefix="tmp_fifo_dir", dir=None):
if filename:
self.filename = filename
else:
self.tmpdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
self.filename = os.path.join(self.tmpdir, 'fifo')
def __enter__(self):
if os.path.exists(self.filename):
os.unlink(self.filename)
os.mkfifo(self.filename)
return self
def __exit__(self, type, value, traceback):
os.remove(self.filename)
if hasattr(self, 'tmpdir'):
shutil.rmtree(self.tmpdir)
# -----------------------
#
# Core objects
#
# -----------------------
class IndropsProject():
def __init__(self, project_yaml_file_handle):
self.yaml = yaml.load(project_yaml_file_handle)
self.name = self.yaml['project_name']
self.project_dir = self.yaml['project_dir']
self.libraries = OrderedDict()
self.runs = OrderedDict()
for run in self.yaml['sequencing_runs']:
"""
After filtering, each sequencing run generates between 1 ... X files with filtered reads.
X = (N x M)
- N: The run is often split into several files (a typical NextSeq run is split into L001,
L002, L003, L004 which match different lanes, but this can also be done artificially.
- M: The same run might contain several libraries. The demultiplexing can be handled by the script (or externally).
If demultiplexing is done externally, there will be a different .fastq file for each library.
"""
version = run['version']
filtered_filename = '{library_name}_{run_name}'
if run['version'] == 'v3':
filtered_filename += '_{library_index}'
# Prepare to iterate over run split into several files
if 'split_affixes' in run:
filtered_filename += '_{split_affix}'
split_affixes = run['split_affixes']
else:
split_affixes = ['']
filtered_filename += '.fastq'
# Prepare to iterate over libraries
if 'libraries' in run:
run_libraries = run['libraries']
elif 'library_name' in run:
run_libraries = [{'library_name' : run['library_name'], 'library_prefix':''}]
else:
raise Exception('No library name or libraries specified.')
if run['version']=='v1' or run['version']=='v2':
for affix in split_affixes:
for lib in run_libraries:
lib_name = lib['library_name']
if lib_name not in self.libraries:
self.libraries[lib_name] = IndropsLibrary(name=lib_name, project=self, version=run['version'])
else:
assert self.libraries[lib_name].version == run['version']
if version == 'v1':
metaread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R1', library_prefix=lib['library_prefix']))
bioread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R2', library_prefix=lib['library_prefix']))
elif version == 'v2':
metaread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R2', library_prefix=lib['library_prefix']))
bioread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R1', library_prefix=lib['library_prefix']))
filtered_part_filename = filtered_filename.format(run_name=run['name'], split_affix=affix, library_name=lib_name)
filtered_part_path = os.path.join(self.project_dir, lib_name, 'filtered_parts', filtered_part_filename)
part = V1V2Filtering(filtered_fastq_filename=filtered_part_path,
project=self,
bioread_filename=bioread_filename,
metaread_filename=metaread_filename,
run_name=run['name'],
library_name=lib_name,
part_name=affix)
if run['name'] not in self.runs:
self.runs[run['name']] = []
self.runs[run['name']].append(part)
self.libraries[lib_name].parts.append(part)
elif run['version'] == 'v3':
for affix in split_affixes:
filtered_part_filename = filtered_filename.format(run_name=run['name'], split_affix=affix,
library_name='{library_name}', library_index='{library_index}')
part_filename = os.path.join(self.project_dir, '{library_name}', 'filtered_parts', filtered_part_filename)
input_filename = os.path.join(run['dir'], run['fastq_path'].format(split_affix=affix, read='{read}'))
part = V3Demultiplexer(run['libraries'], project=self, part_filename=part_filename, input_filename=input_filename, run_name=run['name'], part_name=affix)
if run['name'] not in self.runs:
self.runs[run['name']] = []
self.runs[run['name']].append(part)
for lib in run_libraries:
lib_name = lib['library_name']
lib_index = lib['library_index']
if lib_name not in self.libraries:
self.libraries[lib_name] = IndropsLibrary(name=lib_name, project=self, version=run['version'])
self.libraries[lib_name].parts.append(part.libraries[lib_index])
@property
def paths(self):
if not hasattr(self, '_paths'):
script_dir = os.path.dirname(os.path.realpath(__file__))
#Read defaults
with open(os.path.join(script_dir, 'default_parameters.yaml'), 'r') as f:
paths = yaml.load(f)['paths']
# Update with user provided values
paths.update(self.yaml['paths'])
paths['python'] = os.path.join(paths['python_dir'], 'python')
paths['java'] = os.path.join(paths['java_dir'], 'java')
paths['bowtie'] = os.path.join(paths['bowtie_dir'], 'bowtie')
paths['samtools'] = os.path.join(paths['samtools_dir'], 'samtools')
paths['trimmomatic_jar'] = os.path.join(script_dir, 'bins', 'trimmomatic-0.33.jar')
paths['rsem_tbam2gbam'] = os.path.join(paths['rsem_dir'], 'rsem-tbam2gbam')
paths['rsem_prepare_reference'] = os.path.join(paths['rsem_dir'], 'rsem-prepare-reference')
self._paths = type('Paths_anonymous_object',(object,),paths)()
self._paths.trim_polyA_and_filter_low_complexity_reads_py = os.path.join(script_dir, 'trim_polyA_and_filter_low_complexity_reads.py')
self._paths.quantify_umifm_from_alignments_py = os.path.join(script_dir, 'quantify_umifm_from_alignments.py')
self._paths.count_barcode_distribution_py = os.path.join(script_dir, 'count_barcode_distribution.py')
self._paths.gel_barcode1_list = os.path.join(script_dir, 'ref/barcode_lists/gel_barcode1_list.txt')
self._paths.gel_barcode2_list = os.path.join(script_dir, 'ref/barcode_lists/gel_barcode2_list.txt')
return self._paths
@property
def parameters(self):
if not hasattr(self, '_parameters'):
#Read defaults
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default_parameters.yaml'), 'r') as f:
self._parameters = yaml.load(f)['parameters']
# Update with user provided values
if 'parameters' in self.yaml:
for k, d in self.yaml['parameters'].items():
self._parameters[k].update(d)
return self._parameters
@property
def gel_barcode1_revcomp_list_neighborhood(self):
if not hasattr(self, '_gel_barcode1_list_neighborhood'):
self._gel_barcode1_revcomp_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode1_list, True)
return self._gel_barcode1_revcomp_list_neighborhood
@property
def gel_barcode2_revcomp_list_neighborhood(self):
if not hasattr(self, '_gel_barcode2_revcomp_list_neighborhood'):
self._gel_barcode2_revcomp_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode2_list, True)
return self._gel_barcode2_revcomp_list_neighborhood
@property
def gel_barcode2_list_neighborhood(self):
if not hasattr(self, '_gel_barcode2_list_neighborhood'):
self._gel_barcode2_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode2_list, False)
return self._gel_barcode2_list_neighborhood
@property
def stable_barcode_names(self):
if not hasattr(self, '_stable_barcode_names'):
with open(self.paths.gel_barcode1_list) as f:
rev_bc1s = [rev_comp(line.rstrip()) for line in f]
with open(self.paths.gel_barcode2_list) as f:
bc2s = [line.rstrip() for line in f]
rev_bc2s = [rev_comp(bc2) for bc2 in bc2s]
# V1, V2 names:
v1v2_names = {}
barcode_iter = product(rev_bc1s, rev_bc2s)
name_iter = product(string.ascii_uppercase, repeat=4)
for barcode, name in zip(barcode_iter, name_iter):
v1v2_names['-'.join(barcode)] = 'bc' + ''.join(name)
# V3 names:
v3_names = {}
barcode_iter = product(bc2s, rev_bc2s)
name_iter = product(string.ascii_uppercase, repeat=4)
for barcode, name in zip(barcode_iter, name_iter):
v3_names['-'.join(barcode)] = 'bc' + ''.join(name)
self._stable_barcode_names = {
'v1' : v1v2_names,
'v2' : v1v2_names,
'v3': v3_names,
}
return self._stable_barcode_names
def build_transcriptome(self, gzipped_genome_softmasked_fasta_filename, gzipped_transcriptome_gtf):
import pyfasta
index_dir = os.path.dirname(self.paths.bowtie_index)
check_dir(index_dir)
genome_filename = os.path.join(index_dir, '.'.join(gzipped_genome_softmasked_fasta_filename.split('.')[:-1]))
gtf_filename = os.path.join(index_dir, gzipped_transcriptome_gtf.split('/')[-1])
gtf_prefix = '.'.join(gtf_filename.split('.')[:-2])
gtf_with_genenames_in_transcript_id = gtf_prefix + '.annotated.gtf'
# accepted_gene_biotypes_for_NA_transcripts = set(["protein_coding","IG_V_gene","IG_J_gene","TR_J_gene","TR_D_gene","TR_V_gene","IG_C_gene","IG_D_gene","TR_C_gene"])
accepted_gene_biotypes_for_NA_transcripts = set(["protein_coding","IG_V_gene","IG_J_gene","IG_C_pseudogene","IG_J_pseudogene","IG_pseudogene","IG_V_pseudogene","lincRNA","processed_transcript","TR_J_gene","TR_D_gene","TR_V_gene","IG_C_gene","IG_D_gene","TR_C_gene"])
tsl1_or_tsl2_strings = ['transcript_support_level "1"', 'transcript_support_level "1 ', 'transcript_support_level "2"', 'transcript_support_level "2 ']
tsl_NA = 'transcript_support_level "NA'
print_to_stderr('Filtering GTF')
output_gtf = open(gtf_with_genenames_in_transcript_id, 'w')
for line in subprocess.Popen(["gzip", "--stdout", "-d", gzipped_transcriptome_gtf], stdout=subprocess.PIPE).stdout:
if 'transcript_id' not in line:
continue
line_valid_for_output = False
# for string in tsl1_or_tsl2_strings:
# if string in line:
# line_valid_for_output = True
# break
gene_biotype = re.search(r'gene_biotype \"(.*?)\";', line)
if gene_biotype and gene_biotype.group(1) in accepted_gene_biotypes_for_NA_transcripts:
line_valid_for_output = True
if line_valid_for_output:
gene_name = re.search(r'gene_name \"(.*?)\";', line)
if gene_name:
gene_name = gene_name.group(1)
out_line = re.sub(r'(?<=transcript_id ")(.*?)(?=";)', r'\1|'+gene_name, line)
output_gtf.write(out_line)
output_gtf.close()
print_to_stderr('Gunzipping Genome')
p_gzip = subprocess.Popen(["gzip", "-dfc", gzipped_genome_softmasked_fasta_filename], stdout=open(genome_filename, 'wb'))
if p_gzip.wait() != 0:
raise Exception(" Error in rsem-prepare reference ")
# p_rsem = subprocess.Popen([self.paths.rsem_prepare_reference, '--bowtie', '--bowtie-path', self.paths.bowtie_dir,
# '--gtf', gtf_with_genenames_in_transcript_id,
# '--polyA', '--polyA-length', '5', genome_filename, self.paths.bowtie_index])
# Above code errors out with rsem 1.2.4. Made the following changes: removed --bowtie and --polyA
p_rsem = subprocess.Popen([self.paths.rsem_prepare_reference, '--bowtie-path', self.paths.bowtie_dir,
'--gtf', gtf_with_genenames_in_transcript_id,
'--polyA-length', '5', genome_filename, self.paths.bowtie_index])
if p_rsem.wait() != 0:
raise Exception(" Error in rsem-prepare reference ")
print_to_stderr('Finding soft masked regions in transcriptome')
transcripts_fasta = pyfasta.Fasta(self.paths.bowtie_index + '.transcripts.fa')
soft_mask = {}
for tx, seq in transcripts_fasta.items():
seq = str(seq)
soft_mask[tx] = set((m.start(), m.end()) for m in re.finditer(r'[atcgn]+', seq))
with open(self.paths.bowtie_index + '.soft_masked_regions.pickle', 'w') as out:
pickle.dump(soft_mask, out)
class IndropsLibrary():
def __init__(self, name='', project=None, version=''):
self.project = project
self.name = name
self.parts = []
self.version = version
self.paths = {}
for lib_dir in ['filtered_parts', 'quant_dir']:
dir_path = os.path.join(self.project.project_dir, self.name, lib_dir)
check_dir(dir_path)
self.paths[lib_dir] = dir_path
self.paths = type('Paths_anonymous_object',(object,),self.paths)()
self.paths.abundant_barcodes_names_filename = os.path.join(self.project.project_dir, self.name, 'abundant_barcodes.pickle')
self.paths.filtering_statistics_filename = os.path.join(self.project.project_dir, self.name, self.name+'.filtering_stats.csv')
self.paths.barcode_abundance_histogram_filename = os.path.join(self.project.project_dir, self.name, self.name+'.barcode_abundance.png')
self.paths.missing_quants_filename = os.path.join(self.project.project_dir, self.name, self.name+'.missing_barcodes.pickle')
@property
def barcode_counts(self):
if not hasattr(self, '_barcode_counts'):
self._barcode_counts = defaultdict(int)
for part in self.parts:
for k, v in part.part_barcode_counts.items():
self._barcode_counts[k] += v
return self._barcode_counts
@property
def abundant_barcodes(self):
if not hasattr(self, '_abundant_barcodes'):
with open(self.paths.abundant_barcodes_names_filename) as f:
self._abundant_barcodes = pickle.load(f)
return self._abundant_barcodes
def sorted_barcode_names(self, min_reads=0):
return [name for bc,(name,abun) in sorted(self.abundant_barcodes.items(), key=lambda i:-i[1][1]) if abun>min_reads]
def identify_abundant_barcodes(self, make_histogram=True, absolute_min_reads=10000):
"""
Identify which barcodes are above the absolute minimal abundance,
and make a histogram summarizing the barcode distribution
"""
keep_barcodes = []
for k, v in self.barcode_counts.items():
if v > absolute_min_reads:
keep_barcodes.append(k)
abundant_barcodes = {}
print_to_stderr(" %d barcodes above absolute minimum threshold" % len(keep_barcodes))
for bc in keep_barcodes:
abundant_barcodes[bc] = (self.project.stable_barcode_names[self.version][bc], self.barcode_counts[bc])
self._abundant_barcodes = abundant_barcodes
with open(self.paths.abundant_barcodes_names_filename, 'w') as f:
pickle.dump(abundant_barcodes, f)
# Create table about the filtering process
with open(self.paths.filtering_statistics_filename, 'w') as filtering_stats:
header = ['Run', 'Part', 'Valid Structure', 'Surviving Trimmomatic', 'Surviving polyA trim and complexity filter']
if self.version == 'v1' or self.version == 'v2':
structure_parts = ['W1_in_R2', 'empty_read', 'No_W1', 'No_polyT', 'BC1', 'BC2', 'Umi_error']
header += ['W1 in R2', 'empty read', 'No W1 in R1', 'No polyT', 'BC1', 'BC2', 'UMI_contains_N']
elif self.version == 'v3':
structure_parts = ['Invalid_BC1', 'Invalid_BC2', 'UMI_contains_N']
header += ['Invalid BC1', 'Invalid BC2', 'UMI_contains_N']
trimmomatic_parts = ['dropped']
header += ['Dropped by Trimmomatic']
complexity_filter_parts = ['rejected_because_too_short', 'rejected_because_complexity_too_low']
header += ['Too short after polyA trim', 'Read complexity too low']
filtering_stats.write(','.join(header)+'\n')
for part in self.parts:
with open(part.filtering_metrics_filename) as f:
part_stats = yaml.load(f)
# line = [part.run_name, part.part_name, part_stats['read_structure']['Total'], part_stats['read_structure']['Valid'], part_stats['trimmomatic']['output'], part_stats['complexity_filter']['output']]
line = [part.run_name, part.part_name, part_stats['read_structure']['Valid'], part_stats['trimmomatic']['output'], part_stats['complexity_filter']['output']]
line += [part_stats['read_structure'][k] for k in structure_parts]
line += [part_stats['trimmomatic'][k] for k in trimmomatic_parts]
line += [part_stats['complexity_filter'][k] for k in complexity_filter_parts]
line = [str(l) for l in line]
filtering_stats.write(','.join(line)+'\n')
print_to_stderr("Created Library filtering summary:")
print_to_stderr(" " + self.paths.filtering_statistics_filename)
tot_reads = 0
for bc, count in self.barcode_counts.items():
tot_reads += count
print tot_reads
# Make the histogram figure
if not make_histogram:
return
count_freq = defaultdict(int)
for bc, count in self.barcode_counts.items():
count_freq[count] += 1
x = np.array(count_freq.keys())
y = np.array(count_freq.values())
w = x*y
# need to use non-intenactive Agg backend
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
ax = plt.subplot(111)
ax.hist(x, bins=np.logspace(0, 6, 50), weights=w)
ax.set_xscale('log')
ax.set_xlabel('Reads per barcode')
ax.set_ylabel('#reads coming from bin')
plt.savefig(self.paths.barcode_abundance_histogram_filename)
plt.close()
print_to_stderr("Created Barcode Abundance Histogram at:")
print_to_stderr(" " + self.paths.barcode_abundance_histogram_filename)
def sort_reads_by_barcode(self, index=0):
self.parts[index].sort_reads_by_barcode(self.abundant_barcodes)
def get_reads_for_barcode(self, barcode, run_filter=[]):
for part in self.parts:
if (not run_filter) or (part.run_name in run_filter):
for line in part.get_reads_for_barcode(barcode):
yield line
def quantify_expression(self, analysis_prefix='', min_reads=750, min_counts=0, total_workers=1, worker_index=0, no_bam=False, run_filter=[]):
if analysis_prefix:
analysis_prefix += '.'
sorted_barcode_names = self.sorted_barcode_names(min_reads=min_reads)
# Identify which barcodes belong to this worker
barcodes_for_this_worker = []
i = worker_index
while i < len(sorted_barcode_names):
barcodes_for_this_worker.append(sorted_barcode_names[i])
i += total_workers
counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))
ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))
ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))
metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))
ignored_for_output_filename = counts_output_filename+'.ignored'
merged_bam_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.bam'% (analysis_prefix, worker_index, total_workers))
merged_bam_index_filename = merged_bam_filename + '.bai'
get_barcode_genomic_bam_filename = lambda bc: os.path.join(self.paths.quant_dir, '%s%s.genomic.sorted.bam' % (analysis_prefix, bc))
# If we wanted BAM output, and the merge BAM and merged BAM index are present, then we are done
if (not no_bam) and (os.path.isfile(merged_bam_filename) and os.path.isfile(merged_bam_index_filename)):
print_to_stderr('Indexed, merged BAM file detected for this worker. Done.')
return
# Otherwise, we have to check what we need to quantify
"""
Function to determine which barcodes this quantification worker might have already quantified.
This tries to handle interruption during any step of the process.
The worker is assigned some list of barcodes L. For every barcode:
- It could have been quantified
- but have less than min_counts ---> so it got written to `ignored` file.
- and quantification succeeded, meaning
1. there is a line (ending in \n) in the `metrics` file.
2. there is a line (ending in \n) in the `quantification` file.
3. there (could) be a line (ending in \n) in the `ambiguous quantification` file.
4. there (could) be a line (ending in \n) in the `ambiguous quantification partners` file.
[If any line doesn't end in \n, then likely the output of that line was interrupted!]
5. (If BAM output is desired) There should be a sorted genomic BAM
6. (If BAM output is desired) There should be a sorted genomic BAM index
"""
succesfully_previously_quantified = set()
previously_ignored = set()
header_written = False
if os.path.isfile(counts_output_filename) and os.path.isfile(metrics_output_filename):
# Load in list of ignored barcodes
if os.path.isfile(ignored_for_output_filename):
with open(ignored_for_output_filename, 'r') as f:
previously_ignored = set([line.rstrip().split('\t')[0] for line in f])
# Load the metrics data into memory
# (It should be fairly small, this is fast and safe)
existing_metrics_data = {}
with open(metrics_output_filename, 'r') as f:
existing_metrics_data = dict((line.partition('\t')[0], line) for line in f if line[-1]=='\n')
# Quantification data could be large, read it line by line and output it back for barcodes that have a matching metrics line.
with open(counts_output_filename, 'r') as in_counts, \
open(counts_output_filename+'.tmp', 'w') as tmp_counts, \
open(metrics_output_filename+'.tmp', 'w') as tmp_metrics:
for line in in_counts:
# The first worker is reponsible for written the header.
# Make sure we carry that over
if (not header_written) and (worker_index==0):
tmp_counts.write(line)
tmp_metrics.write(existing_metrics_data['Barcode'])
header_written = True
continue
# This line has incomplete output, skip it.
# (This can only happen with the last line)
if line[-1] != '\n':
continue
barcode = line.partition('\t')[0]
# Skip barcode if we don't have existing metrics data
if barcode not in existing_metrics_data:
continue
# Check if we BAM required BAM files exist
barcode_genomic_bam_filename = get_barcode_genomic_bam_filename(barcode)
bam_files_required_and_present = no_bam or (os.path.isfile(barcode_genomic_bam_filename) and os.path.isfile(barcode_genomic_bam_filename+'.bai'))
if not bam_files_required_and_present:
continue
# This passed all the required checks, write the line to the temporary output files
tmp_counts.write(line)
tmp_metrics.write(existing_metrics_data[barcode])
succesfully_previously_quantified.add(barcode)
shutil.move(counts_output_filename+'.tmp', counts_output_filename)
shutil.move(metrics_output_filename+'.tmp', metrics_output_filename)
# For any 'already quantified' barcode, make sure we also copy over the ambiguity data
with open(ambig_counts_output_filename, 'r') as in_f, \
open(ambig_counts_output_filename+'.tmp', 'w') as tmp_f:
f_first_line = (worker_index == 0)
for line in in_f:
if f_first_line:
tmp_f.write(line)
f_first_line = False
continue
if (line.partition('\t')[0] in succesfully_previously_quantified) and (line[-1]=='\n'):
tmp_f.write(line)
shutil.move(ambig_counts_output_filename+'.tmp', ambig_counts_output_filename)
with open(ambig_partners_output_filename, 'r') as in_f, \
open(ambig_partners_output_filename+'.tmp', 'w') as tmp_f:
for line in in_f:
if (line.partition('\t')[0] in succesfully_previously_quantified) and (line[-1]=='\n'):
tmp_f.write(line)
shutil.move(ambig_partners_output_filename+'.tmp', ambig_partners_output_filename)
barcodes_to_quantify = [bc for bc in barcodes_for_this_worker if (bc not in succesfully_previously_quantified and bc not in previously_ignored)]
print_to_stderr("""[%s] This worker assigned %d out of %d total barcodes.""" % (self.name, len(barcodes_for_this_worker), len(sorted_barcode_names)))
if len(barcodes_for_this_worker)-len(barcodes_to_quantify) > 0:
print_to_stderr(""" %d previously quantified, %d previously ignored, %d left for this run.""" % (len(succesfully_previously_quantified), len(previously_ignored), len(barcodes_to_quantify)))
print_to_stderr(('{0:<14.12}'.format('Prefix') if analysis_prefix else '') + '{0:<14.12}{1:<9}'.format("Library", "Barcode"), False)
print_to_stderr("{0:<8s}{1:<8s}{2:<10s}".format("Reads", "Counts", "Ambigs"))
for barcode in barcodes_to_quantify:
self.quantify_expression_for_barcode(barcode,
counts_output_filename, metrics_output_filename,
ambig_counts_output_filename, ambig_partners_output_filename,
no_bam=no_bam, write_header=(not header_written) and (worker_index==0), analysis_prefix=analysis_prefix,
min_counts = min_counts, run_filter=run_filter)
header_written = True
print_to_stderr("Per barcode quantification completed.")
if no_bam:
return
#Gather list of barcodes with output from the metrics file
genomic_bams = []
with open(metrics_output_filename, 'r') as f:
for line in f:
bc = line.partition('\t')[0]
if bc == 'Barcode': #This is the line in the header
continue
genomic_bams.append(get_barcode_genomic_bam_filename(bc))
print_to_stderr("Merging BAM output.")
try:
subprocess.check_output([self.project.paths.samtools, 'merge', '-f', merged_bam_filename]+genomic_bams, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:400])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in samtools merge === ")
print_to_stderr("Indexing merged BAM output.")
try:
subprocess.check_output([self.project.paths.samtools, 'index', merged_bam_filename], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:400])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in samtools index === ")
print(genomic_bams)
for filename in genomic_bams:
os.remove(filename)
os.remove(filename + '.bai')
def quantify_expression_for_barcode(self, barcode, counts_output_filename, metrics_output_filename,
ambig_counts_output_filename, ambig_partners_output_filename,
min_counts=0, analysis_prefix='', no_bam=False, write_header=False, run_filter=[]):
print_to_stderr(('{0:<14.12}'.format(analysis_prefix) if analysis_prefix else '') + '{0:<14.12}{1:<9}'.format(self.name, barcode), False)
unaligned_reads_output = os.path.join(self.paths.quant_dir, '%s%s.unaligned.fastq' % (analysis_prefix,barcode))
aligned_bam = os.path.join(self.paths.quant_dir, '%s%s.aligned.bam' % (analysis_prefix,barcode))
# Bowtie command
bowtie_cmd = [self.project.paths.bowtie, self.project.paths.bowtie_index, '-q', '-',
'-p', '1', '-a', '--best', '--strata', '--chunkmbs', '1000', '--norc', '--sam',
'-shmem', #should sometimes reduce memory usage...?
'-m', str(self.project.parameters['bowtie_arguments']['m']),
'-n', str(self.project.parameters['bowtie_arguments']['n']),
'-l', str(self.project.parameters['bowtie_arguments']['l']),
'-e', str(self.project.parameters['bowtie_arguments']['e']),
]
if self.project.parameters['output_arguments']['output_unaligned_reads_to_other_fastq']:
bowtie_cmd += ['--un', unaligned_reads_output]
# Quantification command
script_dir = os.path.dirname(os.path.realpath(__file__))
quant_cmd = [self.project.paths.python, self.project.paths.quantify_umifm_from_alignments_py,
'-m', str(self.project.parameters['umi_quantification_arguments']['m']),
'-u', str(self.project.parameters['umi_quantification_arguments']['u']),
'-d', str(self.project.parameters['umi_quantification_arguments']['d']),
'--min_non_polyA', str(self.project.parameters['umi_quantification_arguments']['min_non_polyA']),
'--library', str(self.name),
'--barcode', str(barcode),
'--counts', counts_output_filename,
'--metrics', metrics_output_filename,
'--ambigs', ambig_counts_output_filename,
'--ambig-partners', ambig_partners_output_filename,
'--min-counts', str(min_counts),
]
if not no_bam:
quant_cmd += ['--bam', aligned_bam]
if write_header:
quant_cmd += ['--write-header']
if self.project.parameters['umi_quantification_arguments']['split-ambigs']:
quant_cmd.append('--split-ambig')
if self.project.parameters['output_arguments']['filter_alignments_to_softmasked_regions']:
quant_cmd += ['--soft-masked-regions', self.project.paths.bowtie_index + '.soft_masked_regions.pickle']
# Spawn processes
p1 = subprocess.Popen(bowtie_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(quant_cmd, stdin=p1.stdout, stderr=subprocess.PIPE)
for line in self.get_reads_for_barcode(barcode, run_filter=run_filter):
p1.stdin.write(line)
p1.stdin.close()
if p1.wait() != 0:
print_to_stderr('\n')
print_to_stderr(p1.stderr.read())
raise Exception('\n === Error on bowtie ===')
if p2.wait() != 0:
print_to_stderr(p2.stderr.read())
raise Exception('\n === Error on Quantification Script ===')
print_to_stderr(p2.stderr.read(), False)
if no_bam:
# We are done here
return False
if not os.path.isfile(aligned_bam):
raise Exception("\n === No aligned bam was output for barcode %s ===" % barcode)
genomic_bam = os.path.join(self.paths.quant_dir, '%s%s.genomic.bam' % (analysis_prefix,barcode))
sorted_bam = os.path.join(self.paths.quant_dir, '%s%s.genomic.sorted.bam' % (analysis_prefix,barcode))
try:
subprocess.check_output([self.project.paths.rsem_tbam2gbam, self.project.paths.bowtie_index, aligned_bam, genomic_bam], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:100])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in rsem-tbam2gbam === ")
try:
subprocess.check_output([self.project.paths.samtools, 'sort', '-o', sorted_bam, genomic_bam], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:100])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in samtools sort === ")
try:
subprocess.check_output([self.project.paths.samtools, 'index', sorted_bam], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:100])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in samtools index === ")
os.remove(aligned_bam)
os.remove(genomic_bam)
return True
def aggregate_counts(self, analysis_prefix='', process_ambiguity_data=False):
if analysis_prefix:
analysis_prefix += '.'
quant_output_files = [fn[len(analysis_prefix):].split('.')[0] for fn in os.listdir(self.paths.quant_dir) if ('worker' in fn and fn[:len(analysis_prefix)]==analysis_prefix)]
worker_names = [w[6:] for w in quant_output_files]
worker_indices = set(int(w.split('_')[0]) for w in worker_names)
total_workers = set(int(w.split('_')[1]) for w in worker_names)
if len(total_workers) > 1:
raise Exception("""Quantification for library %s, prefix '%s' was run with different numbers of total_workers.""" % (self.name, analysis_prefix))
total_workers = list(total_workers)[0]
missing_workers = []
for i in range(total_workers):
if i not in worker_indices:
missing_workers.append(i)
if missing_workers:
missing_workers = ','.join([str(i) for i in sorted(missing_workers)])
raise Exception("""Output from workers %s (total %d) is missing. """ % (missing_workers, total_workers))
aggregated_counts_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.counts.tsv')
aggregated_quant_metrics_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.quant_metrics.tsv')
aggregated_ignored_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ignored_barcodes.txt')
aggregated_bam_output = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.bam')
aggregated_ambig_counts_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ambig_counts.tsv')
aggregated_ambig_partners_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ambig_partners.tsv')
agg_counts = open(aggregated_counts_filename, mode='w')
agg_metrics = open(aggregated_quant_metrics_filename, mode='w')
agg_ignored = open(aggregated_ignored_filename, mode='w')
if process_ambiguity_data:
agg_ambigs = open(aggregated_ambig_counts_filename, mode='w')
agg_ambig_partners = open(aggregated_ambig_partners_filename, mode='w')
end_of_counts_header = 0
end_of_metrics_header = 0
end_of_ambigs_header = 0
print_to_stderr(' Concatenating output from all workers.')
for worker_index in range(total_workers):
counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))
ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))
ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))
metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))
ignored_for_output_filename = counts_output_filename+'.ignored'
# Counts
with open(counts_output_filename, 'r') as f:
shutil.copyfileobj(f, agg_counts)
# Metrics
with open(metrics_output_filename, 'r') as f:
shutil.copyfileobj(f, agg_metrics)
# Ignored
if os.path.isfile(counts_output_filename+'.ignored'):
with open(counts_output_filename+'.ignored', 'r') as f:
shutil.copyfileobj(f, agg_ignored)
if process_ambiguity_data:
with open(ambig_counts_output_filename, 'r') as f:
shutil.copyfileobj(f, agg_ambigs)
with open(ambig_partners_output_filename, 'r') as f:
shutil.copyfileobj(f, agg_ambig_partners)
print_to_stderr(' GZIPping concatenated output.')
agg_counts.close()
subprocess.Popen(['gzip', '-f', aggregated_counts_filename]).wait()
agg_metrics.close()
subprocess.Popen(['gzip', '-f', aggregated_quant_metrics_filename]).wait()
print_to_stderr('Aggregation completed in %s.gz' % aggregated_counts_filename)
if process_ambiguity_data:
agg_ambigs.close()
subprocess.Popen(['gzip', '-f', aggregated_ambig_counts_filename]).wait()
agg_ambig_partners.close()
subprocess.Popen(['gzip', '-f', aggregated_ambig_partners_filename]).wait()
target_bams = [os.path.join(self.paths.quant_dir, '%sworker%d_%d.bam'% (analysis_prefix, worker_index, total_workers)) for worker_index in range(total_workers)]
target_bams = [t for t in target_bams if os.path.isfile(t)]
if target_bams:
print_to_stderr(' Merging BAM files.')
p1 = subprocess.Popen([self.project.paths.samtools, 'merge', '-f', aggregated_bam_output]+target_bams, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if p1.wait() == 0:
print_to_stderr(' Indexing merged BAM file.')
p2 = subprocess.Popen([self.project.paths.samtools, 'index', aggregated_bam_output], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if p2.wait() == 0:
for filename in target_bams:
os.remove(filename)
os.remove(filename + '.bai')
else:
print_to_stderr(" === Error in samtools index ===")
print_to_stderr(p2.stderr.read())
else:
print_to_stderr(" === Error in samtools merge ===")
print_to_stderr(p1.stderr.read())
# print_to_stderr('Deleting per-worker counts files.')
# for worker_index in range(total_workers):
# counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))
# os.remove(counts_output_filename)
# ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))
# os.remove(ambig_counts_output_filename)
# ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))
# os.remove(ambig_partners_output_filename)
# metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))
# os.remove(metrics_output_filename)
# ignored_for_output_filename = counts_output_filename+'.ignored'
# os.remove(ignored_for_output_filename)
class LibrarySequencingPart():
def __init__(self, filtered_fastq_filename=None, project=None, run_name='', library_name='', part_name=''):
self.project = project
self.run_name = run_name
self.part_name = part_name
self.library_name = library_name
self.filtered_fastq_filename = filtered_fastq_filename
self.barcode_counts_pickle_filename = filtered_fastq_filename + '.counts.pickle'
self.filtering_metrics_filename = '.'.join(filtered_fastq_filename.split('.')[:-1]) + 'metrics.yaml'
self.sorted_gzipped_fastq_filename = filtered_fastq_filename + '.sorted.fastq.gz'
self.sorted_gzipped_fastq_index_filename = filtered_fastq_filename + '.sorted.fastq.gz.index.pickle'
@property
def is_filtered(self):
if not hasattr(self, '_is_filtered'):
self._is_filtered = os.path.exists(self.filtered_fastq_filename) and os.path.exists(self.barcode_counts_pickle_filename)
return self._is_filtered
@property
def is_sorted(self):
if not hasattr(self, '_is_sorted'):
self._is_sorted = os.path.exists(self.sorted_gzipped_fastq_filename) and os.path.exists(self.sorted_gzipped_fastq_index_filename)
return self._is_sorted
@property
def part_barcode_counts(self):
if not hasattr(self, '_part_barcode_counts'):
with open(self.barcode_counts_pickle_filename, 'r') as f:
self._part_barcode_counts = pickle.load(f)
return self._part_barcode_counts
@property
def sorted_index(self):
if not hasattr(self, '_sorted_index'):
with open(self.sorted_gzipped_fastq_index_filename, 'r') as f:
self._sorted_index = pickle.load(f)
return self._sorted_index
def contains_library_in_query(self, query_libraries):
return self.library_name in query_libraries
def sort_reads_by_barcode(self, abundant_barcodes={}):
sorted_barcodes = [j for j,v in sorted(abundant_barcodes.items(), key=lambda i:-i[1][1])]
sorted_barcodes = [j for j in sorted_barcodes if j in self.part_barcode_counts]
barcode_buffers = {}
barcode_gzippers = {}
for bc in sorted_barcodes + ['ignored']:
barcode_buffers[bc] = BytesIO()
barcode_gzippers[bc] = gzip.GzipFile(fileobj=barcode_buffers[bc], mode='wb')
total_processed_reads = 0
total_ignored_reads = 0
bcs_with_data = set()
bcs_with_tmp_data = set()
barcode_tmp_filename = lambda bc: '%s.%s.tmp.gz' % (self.sorted_gzipped_fastq_filename, bc)
total_reads = sum(self.part_barcode_counts.values())
print_to_stderr('Sorting %d reads from %d barcodes above absolute minimum threshold.' % (total_reads, len(abundant_barcodes)))
with open(self.filtered_fastq_filename, 'r') as input_fastq:
for name, seq, qual in from_fastq(input_fastq):
total_processed_reads += 1
bc = name.split(':')[0]
if total_processed_reads%1000000 == 0:
print_to_stderr('Read in %.02f percent of all reads (%d)' % (100.*total_processed_reads/total_reads, total_processed_reads))
if bc in abundant_barcodes:
barcode_gzippers[bc].write(to_fastq(name, seq, qual))
bcs_with_data.add(bc)
else:
total_ignored_reads += 1
barcode_gzippers['ignored'].write(to_fastq(name, seq, qual))
bcs_with_data.add('ignored')
sorted_output_index = {}
with open(self.sorted_gzipped_fastq_filename, 'wb') as sorted_output:
for original_bc in sorted_barcodes + ['ignored']:
if original_bc != 'ignored':
new_bc_name = abundant_barcodes[original_bc][0]
barcode_reads_count = self.part_barcode_counts[original_bc]
else:
new_bc_name = 'ignored'
barcode_reads_count = total_ignored_reads
start_pos = sorted_output.tell()
barcode_gzippers[original_bc].close()
if original_bc in bcs_with_data:
barcode_buffers[original_bc].seek(0)
shutil.copyfileobj(barcode_buffers[original_bc], sorted_output)
barcode_buffers[original_bc].close()
end_pos = sorted_output.tell()
if end_pos > start_pos:
sorted_output_index[new_bc_name] = (original_bc, start_pos, end_pos, end_pos-start_pos, barcode_reads_count)
with open(self.sorted_gzipped_fastq_index_filename, 'w') as f:
pickle.dump(sorted_output_index, f)
def get_reads_for_barcode(self, barcode):
if barcode not in self.sorted_index:
raise StopIteration
original_barcode, start_byte_offset, end_byte_offset, byte_length, barcode_reads = self.sorted_index[barcode]
with open(self.sorted_gzipped_fastq_filename, 'rb') as sorted_output:
sorted_output.seek(start_byte_offset)
byte_buffer = BytesIO(sorted_output.read(byte_length))
ungzipper = gzip.GzipFile(fileobj=byte_buffer, mode='rb')
while True:
yield next(ungzipper)
@contextmanager
def trimmomatic_and_low_complexity_filter_process(self):
"""
We start 3 processes that are connected with Unix pipes.
Process 1 - Trimmomatic. Doesn't support stdin/stdout, so we instead use named pipes (FIFOs). It reads from FIFO1, and writes to FIFO2.
Process 2 - In line complexity filter, a python script. It reads from FIFO2 (Trimmomatic output) and writes to the ouput file.
Process 3 - Indexer that counts the number of reads for every barcode. This reads from stdin, writes the reads to stdout and writes the index as a pickle to stderr.
When these are done, we start another process to count the results on the FastQ file.
"""
filtered_dir = os.path.dirname(self.filtered_fastq_filename) #We will use the same directory for creating temporary FIFOs, assuming we have write access.
self.filtering_statistics_counter = defaultdict(int)
with FIFO(dir=filtered_dir) as fifo2, open(self.filtered_fastq_filename, 'w') as filtered_fastq_file, open(self.filtered_fastq_filename+'.counts.pickle', 'w') as filtered_index_file:
low_complexity_filter_cmd = [self.project.paths.python, self.project.paths.trim_polyA_and_filter_low_complexity_reads_py,
'-input', fifo2.filename,
'--min-post-trim-length', self.project.parameters['trimmomatic_arguments']['MINLEN'],
'--max-low-complexity-fraction', str(self.project.parameters['low_complexity_filter_arguments']['max_low_complexity_fraction']),
]
print low_complexity_filter_cmd
counter_cmd = [self.project.paths.python, self.project.paths.count_barcode_distribution_py]
p2 = subprocess.Popen(low_complexity_filter_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p3 = subprocess.Popen(counter_cmd, stdin=p2.stdout, stdout=filtered_fastq_file, stderr=filtered_index_file)
with FIFO(dir=filtered_dir) as fifo1:
trimmomatic_cmd = [self.project.paths.java, '-Xmx500m', '-jar', self.project.paths.trimmomatic_jar,
'SE', '-threads', "1", '-phred33', fifo1.filename, fifo2.filename]
for arg, val in self.project.parameters['trimmomatic_arguments'].items():
trimmomatic_cmd.append('%s:%s' % (arg, val))
p1 = subprocess.Popen(trimmomatic_cmd, stderr=subprocess.PIPE)
print trimmomatic_cmd
fifo1_filehandle = open(fifo1.filename, 'w')
yield fifo1_filehandle
fifo1_filehandle.close()
trimmomatic_stderr = p1.stderr.read().splitlines()
if trimmomatic_stderr[2] != 'TrimmomaticSE: Completed successfully':
raise Exception('Trimmomatic did not complete succesfully on %s' % filtered_filename)
trimmomatic_metrics = trimmomatic_stderr[1].split()
# ['Input', 'Reads:', #READS, 'Surviving:', #SURVIVING, (%SURVIVING), 'Dropped:', #DROPPED, (%DROPPED)]
trimmomatic_metrics = {'input' : trimmomatic_metrics[2], 'output': trimmomatic_metrics[4], 'dropped': trimmomatic_metrics[7]}
p1.wait()
complexity_filter_metrics = pickle.load(p2.stderr)
p2.wait()
p3.wait()
print self.filtering_statistics_counter
filtering_metrics = {
'read_structure' : dict(self.filtering_statistics_counter),
'trimmomatic' : trimmomatic_metrics,
'complexity_filter': complexity_filter_metrics,
}
with open(self.filtering_metrics_filename, 'w') as f:
yaml.dump(dict(filtering_metrics), f, default_flow_style=False)
class V1V2Filtering(LibrarySequencingPart):
def __init__(self, bioread_filename=None, metaread_filename=None, *args, **kwargs):
self.bioread_filename = bioread_filename
self.metaread_filename = metaread_filename
LibrarySequencingPart.__init__(self, *args, **kwargs)
def filter_and_count_reads(self):
"""
Input the two raw FastQ files
Output:
- A single fastQ file that uses the read name to store the barcoding information
- A pickle of the number of reads originating from each barcode
"""
# Relevant paths
r1_filename, r2_filename = self.metaread_filename, self.bioread_filename
#Get barcode neighborhoods
bc1s = self.project.gel_barcode1_revcomp_list_neighborhood
bc2s = self.project.gel_barcode2_revcomp_list_neighborhood
# This starts a Trimmomatic process, a low complexity filter process, and will
# upon closing, start the barcode distribution counting process.
last_ping = time.time()
ping_every_n_reads = 1000000
ping_header = "{0:>12}{1:>16}{2:>12}{3:>10}{4:>10}{5:>10}{6:>10}{7:>10}{8:>10}{9:>10}"
ping_header = ping_header.format("Total Reads", "", "Valid Reads", "W1 in R2", "Empty", "No W1", "No polyT", "No BC1", "No BC2", "No UMI")
ping_template = "{total:12d} {rate:5.1f} sec/M {Valid:12.1%}{W1_in_R2:10.1%}{empty_read:10.1%}{No_W1:10.1%}{No_polyT:10.1%}{BC1:10.1%}{BC2:10.1%}{Umi_error:10.1%}"
def print_ping_to_log(last_ping):
sec_per_mil = (time.time()-last_ping)/(ping_every_n_reads/10**6) if last_ping else 0.0
total = self.filtering_statistics_counter['Total']
if total > 0:
ping_format_data = {k: float(self.filtering_statistics_counter[k])/total for k in ['Valid', 'W1_in_R2', 'empty_read', 'No_W1', 'No_polyT', 'BC1', 'BC2', 'Umi_error']}
print_to_stderr(ping_template.format(total=total, rate=sec_per_mil, **ping_format_data))
with self.trimmomatic_and_low_complexity_filter_process() as trim_process:
#Iterate over the weaved reads
for r_name, r1_seq, r1_qual, r2_seq, r2_qual in self._weave_fastqs(r1_filename, r2_filename):
# Check if they should be kept
keep, result = self._process_reads(r1_seq, r2_seq, valid_bc1s=bc1s, valid_bc2s=bc2s)
# Write the the reads worth keeping
if keep:
bc, umi = result
trim_process.write(to_fastq_lines(bc, umi, r2_seq, r2_qual, r_name))
self.filtering_statistics_counter['Valid'] += 1
else:
self.filtering_statistics_counter[result] += 1
# Track speed per M reads
self.filtering_statistics_counter['Total'] += 1
if self.filtering_statistics_counter['Total']%(10*ping_every_n_reads) == 1:
print_to_stderr(ping_header)
if self.filtering_statistics_counter['Total']%ping_every_n_reads == 0:
print_ping_to_log(last_ping)
last_ping = time.time()
print_ping_to_log(False)
print_to_stderr(self.filtering_statistics_counter)
def _weave_fastqs(self, r1_fastq, r2_fastq):
"""
Merge 2 FastQ files by returning paired reads for each.
Returns only R1_seq, R2_seq and R2_qual.
"""
is_gz_compressed = False
is_bz_compressed = False
if r1_fastq.split('.')[-1] == 'gz' and r2_fastq.split('.')[-1] == 'gz':
is_gz_compressed = True
#Added bz2 support VS
if r1_fastq.split('.')[-1] == 'bz2' and r2_fastq.split('.')[-1] == 'bz2':
is_bz_compressed = True
# Decompress Gzips using subprocesses because python gzip is incredibly slow.
if is_gz_compressed:
r1_gunzip = subprocess.Popen("gzip --stdout -d %s" % (r1_fastq), shell=True, stdout=subprocess.PIPE)
r1_stream = r1_gunzip.stdout
r2_gunzip = subprocess.Popen("gzip --stdout -d %s" % (r2_fastq), shell=True, stdout=subprocess.PIPE)
r2_stream = r2_gunzip.stdout
elif is_bz_compressed:
r1_bunzip = subprocess.Popen("bzcat %s" % (r1_fastq), shell=True, stdout=subprocess.PIPE)
r1_stream = r1_bunzip.stdout
r2_bunzip = subprocess.Popen("bzcat %s" % (r2_fastq), shell=True, stdout=subprocess.PIPE)
r2_stream = r2_bunzip.stdout
else:
r1_stream = open(r1_fastq, 'r')
r2_stream = open(r2_fastq, 'r')
while True:
#Read 4 lines from each FastQ
name = next(r1_stream).rstrip()[1:].split()[0] #Read name
r1_seq = next(r1_stream).rstrip() #Read seq
next(r1_stream) #+ line
r1_qual = next(r1_stream).rstrip() #Read qual
next(r2_stream) #Read name
r2_seq = next(r2_stream).rstrip() #Read seq
next(r2_stream) #+ line
r2_qual = next(r2_stream).rstrip() #Read qual
# changed to allow for empty reads (caused by adapter trimming)
if name:
yield name, r1_seq, r1_qual, r2_seq, r2_qual
else:
# if not r1_seq or not r2_seq:
break
r1_stream.close()
r2_stream.close()
def _process_reads(self, name, read, valid_bc1s={}, valid_bc2s={}):
"""
Returns either:
True, (barcode, umi)
(if read passes filter)
False, name of filter that failed
(for stats collection)
R1 anatomy: BBBBBBBB[BBB]WWWWWWWWWWWWWWWWWWWWWWCCCCCCCCUUUUUUTTTTTTTTTT______________
B = Barcode1, can be 8, 9, 10 or 11 bases long.
W = 'W1' sequence, specified below
C = Barcode2, always 8 bases
U = UMI, always 6 bases
T = Beginning of polyT tail.
_ = Either sequencing survives across the polyT tail, or signal starts dropping off
(and start being anything, likely with poor quality)
"""
minimal_polyT_len_on_R1 = 7
hamming_threshold_for_W1_matching = 3
w1 = "GAGTGATTGCTTGTGACGCCTT"
rev_w1 = "AAGGCGTCACAAGCAATCACTC" #Hard-code so we don't recompute on every one of millions of calls
# If R2 contains rev_W1, this is almost certainly empty library
if rev_w1 in read:
return False, 'W1_in_R2'
# # With reads sufficiently long, we will often see a PolyA sequence in R2.
# if polyA in read:
# return False, 'PolyA_in_R2'
# Check for polyT signal at 3' end.
# 44 is the length of BC1+W1+BC2+UMI, given the longest PolyT
#BC1: 8-11 bases
#W1 : 22 bases
#BC2: 8 bases
#UMI: 6 bases
# check for empty reads (due to adapter trimming)
if not read:
return False, 'empty_read'
#Check for W1 adapter
#Allow for up to hamming_threshold errors
if w1 in name:
w1_pos = name.find(w1)
if not 7 < w1_pos < 12:
return False, 'No_W1'
else:
#Try to find W1 adapter at start positions 8-11
#by checking hamming distance to W1.
for w1_pos in range(8, 12):
if string_hamming_distance(w1, name[w1_pos:w1_pos+22]) <= hamming_threshold_for_W1_matching:
break
else:
return False, 'No_W1'
bc2_pos=w1_pos+22
umi_pos=bc2_pos+8
polyTpos=umi_pos+6
expected_poly_t = name[polyTpos:polyTpos+minimal_polyT_len_on_R1]
if string_hamming_distance(expected_poly_t, 'T'*minimal_polyT_len_on_R1) > 3:
return False, 'No_polyT'
bc1 = str(name[:w1_pos])
bc2 = str(name[bc2_pos:umi_pos])
umi = str(name[umi_pos:umi_pos+6])
#Validate barcode (and try to correct when there is no ambiguity)
if valid_bc1s and valid_bc2s:
# Check if BC1 and BC2 can be mapped to expected barcodes
if bc1 in valid_bc1s:
# BC1 might be a neighboring BC, rather than a valid BC itself.
bc1 = valid_bc1s[bc1]
else:
return False, 'BC1'
if bc2 in valid_bc2s:
bc2 = valid_bc2s[bc2]
else:
return False, 'BC2'
if 'N' in umi:
return False, 'UMI_error'
bc = '%s-%s'%(bc1, bc2)
return True, (bc, umi)
class V3Demultiplexer():
def __init__(self, library_indices, project=None, part_filename="", input_filename="", run_name="", part_name=""):
self.input_filename = input_filename
self.project = project
self.run_name = run_name
self.part_name = part_name
self.libraries = {}
for lib in library_indices:
lib_index = lib['library_index']
lib_name = lib['library_name']
library_part_filename = part_filename.format(library_name=lib_name, library_index=lib_index)
self.libraries[lib_index] = LibrarySequencingPart(filtered_fastq_filename=library_part_filename, project=project, run_name=run_name, library_name=lib_name, part_name=part_name)
def _weave_fastqs(self, fastqs):
last_extension = [fn.split('.')[-1] for fn in fastqs]
if all(ext == 'gz' for ext in last_extension):
processes = [subprocess.Popen("gzip --stdout -d %s" % (fn), shell=True, stdout=subprocess.PIPE) for fn in fastqs]
streams = [r.stdout for r in processes]
elif all(ext == 'bz2' for ext in last_extension):
processes = [subprocess.Popen("bzcat %s" % (fn), shell=True, stdout=subprocess.PIPE) for fn in fastqs]
streams = [r.stdout for r in processes]
elif all(ext == 'fastq' for ext in last_extension):
streams = [open(fn, 'r') for fn in fastqs]
else:
raise("ERROR: Different files are compressed differently. Check input.")
while True:
names = [next(s)[:-1].split()[0] for s in streams]
seqs = [next(s)[:-1] for s in streams]
blanks = [next(s)[:-1] for s in streams]
quals = [next(s)[:-1] for s in streams]
assert all(name==names[0] for name in names)
yield names[0], seqs, quals
for s in streams:
s.close()
def _process_reads(self, name, seqs, quals, valid_bc1s={}, valid_bc2s={}, valid_libs={}):
"""
Returns either:
True, (barcode, umi)
(if read passes filter)
False, name of filter that failed
(for stats collection)
"""
r1, r2, r3, r4 = seqs
if r3 in valid_libs:
lib_index = valid_libs[r3]
else:
return False, r3, 'Invalid_library_index'
if r2 in valid_bc1s:
bc1 = valid_bc1s[r2]
else:
return False, lib_index, 'Invalid_BC1'
orig_bc2 = r4[:8]
umi = r4[8:8+6]
polyA = r4[8+6:]
if orig_bc2 in valid_bc2s:
bc2 = valid_bc2s[orig_bc2]
else:
return False, lib_index, 'Invalid_BC2'
if 'N' in umi:
return False, lib_index, 'UMI_contains_N'
final_bc = '%s-%s' % (bc1, bc2)
return True, lib_index, (final_bc, umi)
def filter_and_count_reads(self):
# Prepare error corrected index sets
self.sequence_to_index_mapping = {}
libs = self.libraries.keys()
self.sequence_to_index_mapping = dict(zip(libs, libs))
index_neighborhoods = [set(seq_neighborhood(lib, 1)) for lib in libs]
for lib, clibs in zip(libs, index_neighborhoods):
# Quick check that error-correction maps to a single index
for clib in clibs:
if sum(clib in hood for hood in index_neighborhoods)==1:
self.sequence_to_index_mapping[clib] = lib
# Prepare error corrected barcode sets
error_corrected_barcodes = self.project.gel_barcode2_list_neighborhood
error_corrected_rev_compl_barcodes = self.project.gel_barcode2_revcomp_list_neighborhood
# Open up our context managers
manager_order = [] #It's imperative to exit managers the opposite order than we open them!
trim_processes = {}
trim_processes_managers = {}
for lib in self.libraries.keys():
manager_order.append(lib)
trim_processes_managers[lib] = self.libraries[lib].trimmomatic_and_low_complexity_filter_process()
trim_processes[lib] = trim_processes_managers[lib].__enter__()
overall_filtering_statistics = defaultdict(int)
# Paths for the 4 expected FastQs
input_fastqs = []
for r in ['R1', 'R2', 'R3', 'R4']:
input_fastqs.append(self.input_filename.format(read=r))
last_ping = time.time()
ping_every_n_reads = 1000000
ping_header = "{0:>12}{1:>16}{2:>12}{3:>10}{4:>10}{5:>10}{6:>10} |" + ''.join("{%d:>12.10}"%i for i in range(7,7+len(manager_order)))
ping_header = ping_header.format("Total Reads", "", "Valid Reads", "No index", "No BC1", "No BC2", "No UMI", *[self.libraries[k].library_name for k in manager_order])
ping_template = "{total:12d} {rate:5.1f} sec/M {Valid:12.1%}{Invalid_library_index:10.1%}{Invalid_BC1:10.1%}{Invalid_BC2:10.1%}{UMI_contains_N:10.1%} |{"+":>12.1%}{".join(manager_order)+":>12.1%}"
def print_ping_to_log(last_ping):
sec_per_mil = (time.time() - last_ping)/(float(ping_every_n_reads)/10**6) if last_ping else 0
total = overall_filtering_statistics['Total']
ping_format_data = {k: float(overall_filtering_statistics[k])/total for k in ['Valid', 'Invalid_library_index', 'Invalid_BC1', 'Invalid_BC2', 'UMI_contains_N']}
if overall_filtering_statistics['Valid'] > 0:
ping_format_data.update({k: float(self.libraries[k].filtering_statistics_counter['Valid'])/overall_filtering_statistics['Valid'] for k in manager_order})
print_to_stderr(ping_template.format(total=total, rate=sec_per_mil, **ping_format_data))
common__ = defaultdict(int)
print_to_stderr('Filtering %s, file %s' % (self.run_name, self.input_filename))
for r_name, seqs, quals in self._weave_fastqs(input_fastqs):
# Python 3 compatibility in mind!
seqs = [s.decode('utf-8') for s in seqs]
keep, lib_index, result = self._process_reads(r_name, seqs, quals,
error_corrected_barcodes, error_corrected_rev_compl_barcodes,
self.sequence_to_index_mapping)
common__[seqs[1]] += 1
if keep:
bc, umi = result
bio_read = seqs[0]
bio_qual = quals[0]
trim_processes[lib_index].write(to_fastq_lines(bc, umi, bio_read, bio_qual, r_name[1:]))
self.libraries[lib_index].filtering_statistics_counter['Valid'] += 1
overall_filtering_statistics['Valid'] += 1
else:
if result != 'Invalid_library_index':
self.libraries[lib_index].filtering_statistics_counter[result] += 1
overall_filtering_statistics[result] += 1
# Track speed per M reads
overall_filtering_statistics['Total'] += 1
if overall_filtering_statistics['Total']%(ping_every_n_reads*10)==1:
print_to_stderr(ping_header)
if overall_filtering_statistics['Total']%ping_every_n_reads == 0:
print_ping_to_log(last_ping)
last_ping = time.time()
print_ping_to_log(False)
# Close up the context managers
for lib in manager_order[::-1]:
trim_processes_managers[lib].__exit__(None, None, None)
def contains_library_in_query(self, query_libraries):
for lib in self.libraries.values():
if lib.contains_library_in_query(query_libraries):
return True
return False
if __name__=="__main__":
import sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument('project', type=argparse.FileType('r'), help='Project YAML File.')
parser.add_argument('-l', '--libraries', type=str, help='[all] Library name(s) to work on. If blank, will iterate over all libraries in project.', nargs='?', default='')
parser.add_argument('-r', '--runs', type=str, help='[all] Run name(s) to work on. If blank, will iterate over all runs in project.', nargs='?', default='')
parser.add_argument('command', type=str, choices=['info', 'filter', 'identify_abundant_barcodes', 'sort', 'quantify', 'aggregate', 'build_index', 'get_reads'])
parser.add_argument('--total-workers', type=int, help='[all] Total workers that are working together. This takes precedence over barcodes-per-worker.', default=1)
parser.add_argument('--worker-index', type=int, help='[all] Index of current worker (the first worker should have index 0).', default=0)
parser.add_argument('--min-reads', type=int, help='[quantify] Minimun number of reads for barcode to be processed', nargs='?', default=750)
parser.add_argument('--min-counts', type=int, help='[aggregate] Minimun number of UMIFM counts for barcode to be aggregated', nargs='?', default=0)
parser.add_argument('--analysis-prefix', type=str, help='[quantify/aggregate/convert_bam/merge_bam] Prefix for analysis files.', nargs='?', default='')
parser.add_argument('--no-bam', help='[quantify] Do not output alignments to bam file.', action='store_true')
parser.add_argument('--genome-fasta-gz', help='[build_index] Path to gzipped soft-masked genomic FASTA file.')
parser.add_argument('--ensembl-gtf-gz', help='[build_index] Path to gzipped ENSEMBL GTF file. ')
parser.add_argument('--override-yaml', help="[all] Dictionnary to update project YAML with.. [You don't need this.]", nargs='?', default='')
args = parser.parse_args()
project = IndropsProject(args.project)
if args.override_yaml:
override = eval(args.override_yaml)
if 'paths' in override:
project.yaml['paths'].update(override['paths'])
if 'parameters' in override:
for k,v in override['parameters'].items():
project.yaml['parameters'][k].update(v)
if hasattr(project, '_paths'):
del project._paths
if hasattr(project, '_parameters'):
del project._parameters
target_libraries = []
if args.libraries:
for lib in args.libraries.split(','):
assert lib in project.libraries
if lib not in target_libraries:
target_libraries.append(lib)
else:
target_libraries = project.libraries.keys()
lib_query = set(target_libraries)
target_runs = []
if args.runs:
for run in args.runs.split(','):
assert run in project.runs
target_runs.append(run)
else:
target_runs = project.runs.keys()
target_library_parts = []
for lib in target_libraries:
for pi, part in enumerate(project.libraries[lib].parts):
if part.run_name in target_runs:
target_library_parts.append((lib, pi))
if args.command == 'info':
print_to_stderr('Project Name: ' + project.name)
target_run_parts = []
for run in target_runs:
target_run_parts += [part for part in project.runs[run] if part.contains_library_in_query(lib_query)]
print_to_stderr('Total library parts in search query: ' + str(len(target_run_parts)))
elif args.command == 'filter':
target_run_parts = []
for run in target_runs:
target_run_parts += [part for part in project.runs[run] if part.contains_library_in_query(lib_query)]
for part in worker_filter(target_run_parts, args.worker_index, args.total_workers):
print_to_stderr('Filtering run "%s", part "%s"' % (part.run_name, part.part_name))
part.filter_and_count_reads()
elif args.command == 'identify_abundant_barcodes':
for library in worker_filter(target_libraries, args.worker_index, args.total_workers):
project.libraries[library].identify_abundant_barcodes()
elif args.command == 'sort':
for library, part_index in worker_filter(target_library_parts, args.worker_index, args.total_workers):
print_to_stderr('Sorting %s, part "%s"' % (library, project.libraries[library].parts[part_index].filtered_fastq_filename))
project.libraries[library].sort_reads_by_barcode(index=part_index)
elif args.command == 'quantify':
for library in target_libraries:
project.libraries[library].quantify_expression(worker_index=args.worker_index, total_workers=args.total_workers,
min_reads=args.min_reads, min_counts=args.min_counts,
analysis_prefix=args.analysis_prefix,
no_bam=args.no_bam, run_filter=target_runs)
for part in project.libraries[library].parts:
if hasattr(part, '_sorted_index'):
del part._sorted_index
elif args.command == 'aggregate':
for library in worker_filter(target_libraries, args.worker_index, args.total_workers):
project.libraries[library].aggregate_counts(analysis_prefix=args.analysis_prefix)
elif args.command == 'build_index':
project.build_transcriptome(args.genome_fasta_gz, args.ensembl_gtf_gz)
elif args.command == 'get_reads':
for library in target_libraries:
sorted_barcode_names = project.libraries[library].sorted_barcode_names(min_reads=args.min_reads)
for bc in sorted_barcode_names:
for line in project.libraries[library].get_reads_for_barcode(bc, run_filter=target_runs):
sys.stdout.write(line)
for part in project.libraries[library].parts:
if hasattr(part, '_sorted_index'):
del part._sorted_index
| 48.696581 | 274 | 0.626653 |
a347c9e03fbaac2dae15a8a5a58cd3c4bcfc16f1 | 618 | py | Python | core/migrations/0001_initial.py | Gadeonio/Django_ItCode | 2cf5386365417b605c80e9c7c9ef74666fa66b10 | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | Gadeonio/Django_ItCode | 2cf5386365417b605c80e9c7c9ef74666fa66b10 | [
"MIT"
] | 6 | 2022-03-07T12:59:38.000Z | 2022-03-25T20:02:46.000Z | core/migrations/0001_initial.py | Gadeonio/Django_ItCode | 2cf5386365417b605c80e9c7c9ef74666fa66b10 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.1 on 2022-03-01 05:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Название')),
('pages', models.IntegerField(blank=True, null=True, verbose_name='Количество страниц')),
],
),
]
| 26.869565 | 117 | 0.597087 |
624fa90053d7f68be7d17d358f6aba6eda91832a | 22,721 | py | Python | test/engine/test_logging.py | bowlofeggs/sqlalchemy | 4042792348481e8c00515f8df6af503ca4d0ee73 | [
"MIT"
] | null | null | null | test/engine/test_logging.py | bowlofeggs/sqlalchemy | 4042792348481e8c00515f8df6af503ca4d0ee73 | [
"MIT"
] | null | null | null | test/engine/test_logging.py | bowlofeggs/sqlalchemy | 4042792348481e8c00515f8df6af503ca4d0ee73 | [
"MIT"
] | 1 | 2021-11-23T17:59:42.000Z | 2021-11-23T17:59:42.000Z | import logging.handlers
import sqlalchemy as tsa
from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import util
from sqlalchemy.sql import util as sql_util
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import eq_regex
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.util import lazy_gc
def exec_sql(engine, sql, *args, **kwargs):
with engine.connect() as conn:
return conn.exec_driver_sql(sql, *args, **kwargs)
class LogParamsTest(fixtures.TestBase):
__only_on__ = "sqlite"
__requires__ = ("ad_hoc_engines",)
def setup(self):
self.eng = engines.testing_engine(options={"echo": True})
self.no_param_engine = engines.testing_engine(
options={"echo": True, "hide_parameters": True}
)
exec_sql(self.eng, "create table if not exists foo (data string)")
exec_sql(
self.no_param_engine,
"create table if not exists foo (data string)",
)
self.buf = logging.handlers.BufferingHandler(100)
for log in [logging.getLogger("sqlalchemy.engine")]:
log.addHandler(self.buf)
def teardown(self):
exec_sql(self.eng, "drop table if exists foo")
for log in [logging.getLogger("sqlalchemy.engine")]:
log.removeHandler(self.buf)
def test_log_large_list_of_dict(self):
exec_sql(
self.eng,
"INSERT INTO foo (data) values (:data)",
[{"data": str(i)} for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[raw sql] [{'data': '0'}, {'data': '1'}, {'data': '2'}, "
"{'data': '3'}, "
"{'data': '4'}, {'data': '5'}, {'data': '6'}, {'data': '7'}"
" ... displaying 10 of 100 total bound "
"parameter sets ... {'data': '98'}, {'data': '99'}]",
)
def test_repr_params_large_list_of_dict(self):
eq_(
repr(
sql_util._repr_params(
[{"data": str(i)} for i in range(100)],
batches=10,
ismulti=True,
)
),
"[{'data': '0'}, {'data': '1'}, {'data': '2'}, {'data': '3'}, "
"{'data': '4'}, {'data': '5'}, {'data': '6'}, {'data': '7'}"
" ... displaying 10 of 100 total bound "
"parameter sets ... {'data': '98'}, {'data': '99'}]",
)
def test_log_no_parameters(self):
exec_sql(
self.no_param_engine,
"INSERT INTO foo (data) values (:data)",
[{"data": str(i)} for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[raw sql] [SQL parameters hidden due to hide_parameters=True]",
)
def test_log_large_list_of_tuple(self):
exec_sql(
self.eng,
"INSERT INTO foo (data) values (?)",
[(str(i),) for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[raw sql] [('0',), ('1',), ('2',), ('3',), ('4',), ('5',), "
"('6',), ('7',) ... displaying 10 of 100 total "
"bound parameter sets ... ('98',), ('99',)]",
)
def test_log_positional_array(self):
with self.eng.connect() as conn:
exc_info = assert_raises(
tsa.exc.DBAPIError,
conn.execute,
tsa.text("SELECT * FROM foo WHERE id IN :foo AND bar=:bar"),
{"foo": [1, 2, 3], "bar": "hi"},
)
assert (
"[SQL: SELECT * FROM foo WHERE id IN ? AND bar=?]\n"
"[parameters: ([1, 2, 3], 'hi')]\n" in str(exc_info)
)
eq_regex(
self.buf.buffer[1].message,
r"\[generated .*\] \(\[1, 2, 3\], 'hi'\)",
)
def test_repr_params_positional_array(self):
eq_(
repr(
sql_util._repr_params(
[[1, 2, 3], 5], batches=10, ismulti=False
)
),
"[[1, 2, 3], 5]",
)
def test_repr_params_unknown_list(self):
# not known if given multiparams or not. repr params with
# straight truncation
eq_(
repr(
sql_util._repr_params(
[[i for i in range(300)], 5], batches=10, max_chars=80
)
),
"[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... "
"(1315 characters truncated) ... , 293, 294, 295, 296, "
"297, 298, 299], 5]",
)
def test_repr_params_positional_list(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
eq_(
repr(
sql_util._repr_params(
[[i for i in range(300)], 5],
batches=10,
max_chars=80,
ismulti=False,
)
),
"[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... "
"292, 293, 294, 295, 296, 297, 298, 299], 5]",
)
def test_repr_params_named_dict(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
params = {"key_%s" % i: i for i in range(10)}
eq_(
repr(
sql_util._repr_params(
params, batches=10, max_chars=80, ismulti=False
)
),
repr(params),
)
def test_repr_params_ismulti_named_dict(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
param = {"key_%s" % i: i for i in range(10)}
eq_(
repr(
sql_util._repr_params(
[param for j in range(50)],
batches=5,
max_chars=80,
ismulti=True,
)
),
"[%(param)r, %(param)r, %(param)r ... "
"displaying 5 of 50 total bound parameter sets ... "
"%(param)r, %(param)r]" % {"param": param},
)
def test_repr_params_ismulti_list(self):
# given multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
eq_(
repr(
sql_util._repr_params(
[
[[i for i in range(300)], 5],
[[i for i in range(300)], 5],
[[i for i in range(300)], 5],
],
batches=10,
max_chars=80,
ismulti=True,
)
),
"[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5]]",
)
def test_log_large_parameter_single(self):
import random
largeparam = "".join(chr(random.randint(52, 85)) for i in range(5000))
exec_sql(self.eng, "INSERT INTO foo (data) values (?)", (largeparam,))
eq_(
self.buf.buffer[1].message,
"[raw sql] ('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
def test_log_large_multi_parameter(self):
import random
lp1 = "".join(chr(random.randint(52, 85)) for i in range(5))
lp2 = "".join(chr(random.randint(52, 85)) for i in range(8))
lp3 = "".join(chr(random.randint(52, 85)) for i in range(670))
exec_sql(self.eng, "SELECT ?, ?, ?", (lp1, lp2, lp3))
eq_(
self.buf.buffer[1].message,
"[raw sql] ('%s', '%s', '%s ... (372 characters truncated) "
"... %s')" % (lp1, lp2, lp3[0:149], lp3[-149:]),
)
def test_log_large_parameter_multiple(self):
import random
lp1 = "".join(chr(random.randint(52, 85)) for i in range(5000))
lp2 = "".join(chr(random.randint(52, 85)) for i in range(200))
lp3 = "".join(chr(random.randint(52, 85)) for i in range(670))
exec_sql(
self.eng,
"INSERT INTO foo (data) values (?)",
[(lp1,), (lp2,), (lp3,)],
)
eq_(
self.buf.buffer[1].message,
"[raw sql] [('%s ... (4702 characters truncated) ... %s',), "
"('%s',), "
"('%s ... (372 characters truncated) ... %s',)]"
% (lp1[0:149], lp1[-149:], lp2, lp3[0:149], lp3[-149:]),
)
def test_exception_format_dict_param(self):
exception = tsa.exc.IntegrityError("foo", {"x": "y"}, None)
eq_regex(
str(exception),
r"\(.*.NoneType\) None\n\[SQL: foo\]\n\[parameters: {'x': 'y'}\]",
)
def test_exception_format_hide_parameters(self):
exception = tsa.exc.IntegrityError(
"foo", {"x": "y"}, None, hide_parameters=True
)
eq_regex(
str(exception),
r"\(.*.NoneType\) None\n\[SQL: foo\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
)
def test_exception_format_hide_parameters_dbapi_round_trip(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values \(:data\)\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
lambda: exec_sql(
self.no_param_engine,
"INSERT INTO nonexistent (data) values (:data)",
[{"data": str(i)} for i in range(10)],
),
)
def test_exception_format_hide_parameters_nondbapi_round_trip(self):
foo = Table("foo", MetaData(), Column("data", String))
with self.no_param_engine.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required "
r"for bind parameter 'the_data_2'\n"
r"\[SQL: SELECT foo.data \nFROM foo \nWHERE "
r"foo.data = \? OR foo.data = \?\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
conn.execute,
select([foo]).where(
or_(
foo.c.data == bindparam("the_data_1"),
foo.c.data == bindparam("the_data_2"),
)
),
{"the_data_1": "some data"},
)
def test_exception_format_unexpected_parameter(self):
# test that if the parameters aren't any known type, we just
# run through repr()
exception = tsa.exc.IntegrityError("foo", "bar", "bat")
eq_regex(
str(exception),
r"\(.*.str\) bat\n\[SQL: foo\]\n\[parameters: 'bar'\]",
)
def test_exception_format_unexpected_member_parameter(self):
# test that if the parameters aren't any known type, we just
# run through repr()
exception = tsa.exc.IntegrityError("foo", ["bar", "bat"], "hoho")
eq_regex(
str(exception),
r"\(.*.str\) hoho\n\[SQL: foo\]\n\[parameters: \['bar', 'bat'\]\]",
)
def test_result_large_param(self):
import random
largeparam = "".join(chr(random.randint(52, 85)) for i in range(5000))
self.eng.echo = "debug"
result = exec_sql(self.eng, "SELECT ?", (largeparam,))
row = result.first()
eq_(
self.buf.buffer[1].message,
"[raw sql] ('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
if util.py3k:
eq_(
self.buf.buffer[3].message,
"Row ('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
else:
eq_(
self.buf.buffer[3].message,
"Row (u'%s ... (4703 characters truncated) ... %s',)"
% (largeparam[0:148], largeparam[-149:]),
)
if util.py3k:
eq_(
repr(row),
"('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
else:
eq_(
repr(row),
"(u'%s ... (4703 characters truncated) ... %s',)"
% (largeparam[0:148], largeparam[-149:]),
)
def test_error_large_dict(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values \(:data\)\]\n"
r"\[parameters: "
r"\[{'data': '0'}, {'data': '1'}, {'data': '2'}, "
r"{'data': '3'}, {'data': '4'}, {'data': '5'}, "
r"{'data': '6'}, {'data': '7'} ... displaying 10 of "
r"100 total bound parameter sets ... {'data': '98'}, "
r"{'data': '99'}\]",
lambda: exec_sql(
self.eng,
"INSERT INTO nonexistent (data) values (:data)",
[{"data": str(i)} for i in range(100)],
),
)
def test_error_large_list(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values "
r"\(\?\)\]\n\[parameters: \[\('0',\), \('1',\), \('2',\), "
r"\('3',\), \('4',\), \('5',\), \('6',\), \('7',\) "
r"... displaying "
r"10 of 100 total bound parameter sets ... "
r"\('98',\), \('99',\)\]",
lambda: exec_sql(
self.eng,
"INSERT INTO nonexistent (data) values (?)",
[(str(i),) for i in range(100)],
),
)
class PoolLoggingTest(fixtures.TestBase):
def setup(self):
self.existing_level = logging.getLogger("sqlalchemy.pool").level
self.buf = logging.handlers.BufferingHandler(100)
for log in [logging.getLogger("sqlalchemy.pool")]:
log.addHandler(self.buf)
def teardown(self):
for log in [logging.getLogger("sqlalchemy.pool")]:
log.removeHandler(self.buf)
logging.getLogger("sqlalchemy.pool").setLevel(self.existing_level)
def _queuepool_echo_fixture(self):
return tsa.pool.QueuePool(creator=mock.Mock(), echo="debug")
def _queuepool_logging_fixture(self):
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
return tsa.pool.QueuePool(creator=mock.Mock())
def _stpool_echo_fixture(self):
return tsa.pool.SingletonThreadPool(creator=mock.Mock(), echo="debug")
def _stpool_logging_fixture(self):
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
return tsa.pool.SingletonThreadPool(creator=mock.Mock())
def _test_queuepool(self, q, dispose=True):
conn = q.connect()
conn.close()
conn = None
conn = q.connect()
conn.close()
conn = None
conn = q.connect()
conn = None
del conn
lazy_gc()
q.dispose()
eq_(
[buf.msg for buf in self.buf.buffer],
[
"Created new connection %r",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return%s",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return%s",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return%s",
"Closing connection %r",
]
+ (["Pool disposed. %s"] if dispose else []),
)
def test_stpool_echo(self):
q = self._stpool_echo_fixture()
self._test_queuepool(q, False)
def test_stpool_logging(self):
q = self._stpool_logging_fixture()
self._test_queuepool(q, False)
def test_queuepool_echo(self):
q = self._queuepool_echo_fixture()
self._test_queuepool(q)
def test_queuepool_logging(self):
q = self._queuepool_logging_fixture()
self._test_queuepool(q)
class LoggingNameTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
def _assert_names_in_execute(self, eng, eng_name, pool_name):
eng.execute(select([1]))
assert self.buf.buffer
for name in [b.name for b in self.buf.buffer]:
assert name in (
"sqlalchemy.engine.Engine.%s" % eng_name,
"sqlalchemy.pool.impl.%s.%s"
% (eng.pool.__class__.__name__, pool_name),
)
def _assert_no_name_in_execute(self, eng):
eng.execute(select([1]))
assert self.buf.buffer
for name in [b.name for b in self.buf.buffer]:
assert name in (
"sqlalchemy.engine.Engine",
"sqlalchemy.pool.impl.%s" % eng.pool.__class__.__name__,
)
def _named_engine(self, **kw):
options = {
"logging_name": "myenginename",
"pool_logging_name": "mypoolname",
"echo": True,
}
options.update(kw)
return engines.testing_engine(options=options)
def _unnamed_engine(self, **kw):
kw.update({"echo": True})
return engines.testing_engine(options=kw)
def setup(self):
self.buf = logging.handlers.BufferingHandler(100)
for log in [
logging.getLogger("sqlalchemy.engine"),
logging.getLogger("sqlalchemy.pool"),
]:
log.addHandler(self.buf)
def teardown(self):
for log in [
logging.getLogger("sqlalchemy.engine"),
logging.getLogger("sqlalchemy.pool"),
]:
log.removeHandler(self.buf)
def test_named_logger_names(self):
eng = self._named_engine()
eq_(eng.logging_name, "myenginename")
eq_(eng.pool.logging_name, "mypoolname")
def test_named_logger_names_after_dispose(self):
eng = self._named_engine()
eng.execute(select([1]))
eng.dispose()
eq_(eng.logging_name, "myenginename")
eq_(eng.pool.logging_name, "mypoolname")
def test_unnamed_logger_names(self):
eng = self._unnamed_engine()
eq_(eng.logging_name, None)
eq_(eng.pool.logging_name, None)
def test_named_logger_execute(self):
eng = self._named_engine()
self._assert_names_in_execute(eng, "myenginename", "mypoolname")
def test_named_logger_echoflags_execute(self):
eng = self._named_engine(echo="debug", echo_pool="debug")
self._assert_names_in_execute(eng, "myenginename", "mypoolname")
def test_named_logger_execute_after_dispose(self):
eng = self._named_engine()
eng.execute(select([1]))
eng.dispose()
self._assert_names_in_execute(eng, "myenginename", "mypoolname")
def test_unnamed_logger_execute(self):
eng = self._unnamed_engine()
self._assert_no_name_in_execute(eng)
def test_unnamed_logger_echoflags_execute(self):
eng = self._unnamed_engine(echo="debug", echo_pool="debug")
self._assert_no_name_in_execute(eng)
class EchoTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
def setup(self):
self.level = logging.getLogger("sqlalchemy.engine").level
logging.getLogger("sqlalchemy.engine").setLevel(logging.WARN)
self.buf = logging.handlers.BufferingHandler(100)
logging.getLogger("sqlalchemy.engine").addHandler(self.buf)
def teardown(self):
logging.getLogger("sqlalchemy.engine").removeHandler(self.buf)
logging.getLogger("sqlalchemy.engine").setLevel(self.level)
def _testing_engine(self):
e = engines.testing_engine()
# do an initial execute to clear out 'first connect'
# messages
e.execute(select([10])).close()
self.buf.flush()
return e
def test_levels(self):
e1 = engines.testing_engine()
eq_(e1._should_log_info(), False)
eq_(e1._should_log_debug(), False)
eq_(e1.logger.isEnabledFor(logging.INFO), False)
eq_(e1.logger.getEffectiveLevel(), logging.WARN)
e1.echo = True
eq_(e1._should_log_info(), True)
eq_(e1._should_log_debug(), False)
eq_(e1.logger.isEnabledFor(logging.INFO), True)
eq_(e1.logger.getEffectiveLevel(), logging.INFO)
e1.echo = "debug"
eq_(e1._should_log_info(), True)
eq_(e1._should_log_debug(), True)
eq_(e1.logger.isEnabledFor(logging.DEBUG), True)
eq_(e1.logger.getEffectiveLevel(), logging.DEBUG)
e1.echo = False
eq_(e1._should_log_info(), False)
eq_(e1._should_log_debug(), False)
eq_(e1.logger.isEnabledFor(logging.INFO), False)
eq_(e1.logger.getEffectiveLevel(), logging.WARN)
def test_echo_flag_independence(self):
"""test the echo flag's independence to a specific engine."""
e1 = self._testing_engine()
e2 = self._testing_engine()
e1.echo = True
e1.execute(select([1])).close()
e2.execute(select([2])).close()
e1.echo = False
e1.execute(select([3])).close()
e2.execute(select([4])).close()
e2.echo = True
e1.execute(select([5])).close()
e2.execute(select([6])).close()
assert self.buf.buffer[0].getMessage().startswith("SELECT 1")
assert self.buf.buffer[2].getMessage().startswith("SELECT 6")
assert len(self.buf.buffer) == 4
| 34.90169 | 79 | 0.529598 |
e121856bb571c277cb208936e115bf3e1e70fb94 | 660 | py | Python | ledfx/api/find_devices.py | Mattallmighty/LedFx-OLD | 9734e7ebf20e9a4b67aca32f932572003badca4d | [
"MIT"
] | null | null | null | ledfx/api/find_devices.py | Mattallmighty/LedFx-OLD | 9734e7ebf20e9a4b67aca32f932572003badca4d | [
"MIT"
] | 1 | 2022-02-19T06:33:19.000Z | 2022-02-19T06:33:19.000Z | ledfx/api/find_devices.py | Mattallmighty/LedFx-OLD | 9734e7ebf20e9a4b67aca32f932572003badca4d | [
"MIT"
] | null | null | null | from ledfx.config import save_config
from ledfx.api import RestEndpoint
from ledfx.utils import generate_id, async_fire_and_forget
from aiohttp import web
import logging
import json
_LOGGER = logging.getLogger(__name__)
class FindDevicesEndpoint(RestEndpoint):
"""REST end-point for detecting and adding wled devices"""
ENDPOINT_PATH = "/api/find_devices"
async def post(self) -> web.Response:
""" Find and add all WLED devices on the LAN """
async_fire_and_forget(self._ledfx.devices.find_wled_devices(), self._ledfx.loop)
response = { 'status' : 'success' }
return web.json_response(data=response, status=200) | 33 | 88 | 0.739394 |
efc2512fd7be7f1848a97e2e7ac1d02be4b85119 | 289 | py | Python | Modulo_1/semana4/practica_jueves/utileria/consola/fondo_colores.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | null | null | null | Modulo_1/semana4/practica_jueves/utileria/consola/fondo_colores.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | null | null | null | Modulo_1/semana4/practica_jueves/utileria/consola/fondo_colores.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | def fondo_amarillo(texto):
return '\x1b[0;39;43m' + texto
def fondo_rojo(texto):
return '\033[41m' + texto
def fondo_verde(texto):
return '\033[42m' + texto
def fondo_azul(texto):
return '\033[44m' + texto
def fondo_restablecer(texto):
return '\033[0m' + texto
| 15.210526 | 34 | 0.653979 |
b4f79bda2551704bf1b7c87c3e502c9a1eeb69a4 | 293,469 | py | Python | pandas/core/frame.py | mike-cramblett/pandas | 597f9f31639eeb5724e49bec602e15b9bf8be092 | [
"BSD-3-Clause"
] | 1 | 2019-01-20T13:14:24.000Z | 2019-01-20T13:14:24.000Z | pandas/core/frame.py | mike-cramblett/pandas | 597f9f31639eeb5724e49bec602e15b9bf8be092 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/frame.py | mike-cramblett/pandas | 597f9f31639eeb5724e49bec602e15b9bf8be092 | [
"BSD-3-Clause"
] | null | null | null | # pylint: disable=E1101
# pylint: disable=W0212,W0703,W0622
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
import collections
import functools
import itertools
import sys
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas._libs import lib, algos as libalgos
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature,
deprecate_kwarg)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas import compat
from pandas.compat import (range, map, zip, lmap, lzip, StringIO, u,
OrderedDict, PY36, raise_with_traceback,
string_and_binary_types)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_dict_like,
is_datetime64tz_dtype,
is_object_dtype,
is_extension_type,
is_extension_array_dtype,
is_datetime64_any_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
infer_dtype_from_object,
ensure_float64,
ensure_int64,
ensure_platform_int,
is_list_like,
is_nested_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCMultiIndex
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms
from pandas.core import common as com
from pandas.core import nanops
from pandas.core import ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import (
DatetimeLikeArrayMixin as DatetimeLikeArray
)
from pandas.core.config import get_option
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, ensure_index,
ensure_index_from_sequences)
from pandas.core.indexes import base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
masked_rec_array_to_mgr, get_names_from_index, to_arrays,
reorder_arrays, init_ndarray, init_dict,
arrays_to_mgr, sanitize_index)
from pandas.core.series import Series
from pandas.io.formats import console
from pandas.io.formats import format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
"""
Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
DataFrame.from_items : From sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['get_value', 'set_value', 'from_csv', 'from_items'])
_accessors = set()
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
# ----------------------------------------------------------------------
# Constructors
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif (isinstance(data, compat.Iterable)
and not isinstance(data, string_and_binary_types)):
if not isinstance(data, compat.Sequence):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: {e}'.format(e=e))
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
# ----------------------------------------------------------------------
@property
def axes(self):
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self):
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case off non-interactive session, no boundaries apply.
`ignore_width` is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if (get_option('display.width') is not None or
console.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split('\n'))
return repr_width < width
def _info_repr(self):
"""
True if the repr should show the info view.
"""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if console.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
# ----------------------------------------------------------------------
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix mutiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Serie. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, '
'{s} vs {r}'.format(s=lvals.shape,
r=rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: {oth}'.format(oth=type(other)))
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
pandas.DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == 'columns':
if columns is not None:
raise ValueError("cannot use columns parameter with "
"orient='columns'")
else: # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(self, dtype=None, copy=False):
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
array : numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogenous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
result = np.array(self.values, dtype=dtype, copy=copy)
return result
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.Mapping
Return a collections.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', [
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False)]
)))
elif orient.lower().startswith('s'):
return into_c((k, com.maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [
into_c((k, com.maybe_box_datetimelike(v))
for k, v in compat.iteritems(row._asdict()))
for row in self.itertuples(index=False)]
elif orient.lower().startswith('i'):
if not self.index.is_unique:
raise ValueError(
"DataFrame index must be unique for orient='index'."
)
return into_c((t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples())
else:
raise ValueError("orient '{o}' not understood".format(o=orient))
def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', auth_local_webserver=False,
table_schema=None, location=None, progress_bar=True,
credentials=None, verbose=None, private_key=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
pandas.read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth, if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, credentials=credentials,
verbose=verbose, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
index_data = [arrays[i] for i in to_remove]
result_index = ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = "<S{}".format(df.index.str.len().max())
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
warnings.warn("The 'convert_datetime64' parameter is "
"deprecated and will be removed in a future "
"version",
FutureWarning, stacklevel=2)
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, compat.string_types)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = ("Invalid dtype {dtype} specified for "
"{element} {name}").format(dtype=dtype_mapping,
element=element, name=name)
raise ValueError(msg)
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Construct a DataFrame from a list of tuples.
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
warnings.warn("from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning, stacklevel=2)
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""
Read CSV file.
.. deprecated:: 0.21.0
Use :func:`pandas.read_csv` instead.
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format : boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Returns
-------
y : DataFrame
See Also
--------
pandas.read_csv
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_csv
return read_csv(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame.
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse()
>>> sdf
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf)
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
.. deprecated:: 0.20.0
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_codes, minor_codes = selfsorted.index.codes
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_codes,
minor_codes],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, version=114,
convert_strl=None):
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
fname : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
version : {114, 117}, default 114
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
kwargs = {}
if version not in (114, 117):
raise ValueError('Only formats 114 and 117 supported.')
if version == 114:
if convert_strl is not None:
raise ValueError('strl support is only available when using '
'format 117')
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
kwargs['convert_strl'] = convert_strl
writer = statawriter(fname, self, convert_dates=convert_dates,
byteorder=byteorder, time_stamp=time_stamp,
data_label=data_label, write_index=write_index,
variable_labels=variable_labels, **kwargs)
writer.write_file()
def to_feather(self, fname):
"""
Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
index=None, partition_cols=None, **kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
the behavior depends on the chosen engine.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset
Columns are partitioned in the order they are given
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, index=index,
partition_cols=partition_cols, **kwargs)
@Substitution(header='Whether to print column labels, default True')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False,
border=None, table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal, table_id=table_id,
render_links=render_links)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
# ----------------------------------------------------------------------
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty {name}'.format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
'({cols:d} != {counts:d})'.format(
cols=len(cols), counts=len(counts)))
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl.format(count=count,
dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return ("{num:3.1f}{size_q} "
"{x}".format(num=num, size_q=size_qualifier, x=x))
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(num=num,
size_q=size_qualifier,
pb='PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
in sorted(compat.iteritems(counts))]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)))
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True`` the memory usage of the
index the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
sizes : Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
pandas.Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 (1+0j) 1 True
1 1 1.0 (1+0j) 1 True
2 1 1.0 (1+0j) 1 True
3 1 1.0 (1+0j) 1 True
4 1 1.0 (1+0j) 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = com._unpickle_array(cols)
index = com._unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
# old unpickling
(vals, idx, cols), object_state = state
index = com._unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=com._unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
Parameters
----------
i : int, slice, or sequence of integers
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
# shortcut if the key is in columns
try:
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
except (TypeError, ValueError):
# The TypeError correctly catches non hashable "key" (e.g. list)
# The ValueError can be removed once GH #21729 is fixed
pass
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self._getitem_frame(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._convert_to_indexer(key, axis=1,
raise_missing=True)
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
See Also
--------
pandas.eval
DataFrame.eval
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
pandas.eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(infer_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
In Python 3.6+, you can create multiple columns within the same assign
where one of the columns depends on another one defined within the same
assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(
value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {item: Series(self._data.iget(idx), index=self.index, name=item)
for idx, item in enumerate(self.columns)}
def lookup(self, row_labels, col_labels):
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = [df.get_value(row, col)
for row, col in zip(row_labels, col_labels)]
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=None, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index, columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
dropped : pandas.DataFrame
Raises
------
KeyError
If none of the labels are found in the selected axis
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3,0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super(DataFrame, self).drop(labels=labels, axis=axis,
index=index, columns=columns,
level=level, inplace=inplace,
errors=errors)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None)])
def rename(self, *args, **kwargs):
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper, index, columns : dict-like or function, optional
dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
axis : int or str, optional
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : DataFrame
See Also
--------
pandas.DataFrame.rename_axis
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad'):
return super(DataFrame, self).replace(to_replace=to_replace,
value=value, inplace=inplace,
limit=limit, regex=regex,
method=method)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis, fill_value=fill_value)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
err_msg = ('The parameter "keys" may be a column key, one-dimensional '
'array, or a list containing only valid column keys and '
'one-dimensional arrays.')
if (is_scalar(keys) or isinstance(keys, tuple)
or isinstance(keys, (ABCIndexClass, ABCSeries, np.ndarray))):
# make sure we have a container of keys/arrays we can iterate over
# tuples can appear as valid column keys!
keys = [keys]
elif not isinstance(keys, list):
raise ValueError(err_msg)
missing = []
for col in keys:
if (is_scalar(col) or isinstance(col, tuple)):
# if col is a valid column key, everything is fine
# tuples are always considered keys, never as list-likes
if col not in self:
missing.append(col)
elif (not isinstance(col, (ABCIndexClass, ABCSeries,
np.ndarray, list))
or getattr(col, 'ndim', 1) > 1):
raise ValueError(err_msg)
if missing:
raise KeyError('{}'.format(missing))
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError('Index has duplicate keys: {dup}'.format(
dup=duplicates))
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.codes)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
# GH20987
msg = ("supplying multiple axes to axis is deprecated and "
"will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series()
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = [self._get_label_or_level_values(x, axis=axis)
for x in by]
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_index.__doc__)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(labels._get_codes_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 11300 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Nauru 11300 182 NR
Anguilla 11300 311 AI
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order. May not drop or
duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if ops.should_series_dispatch(this, other, func):
# iterate over columns
return ops.dispatch_to_series(this, other, _arith_op)
else:
result = _arith_op(this.values, other.values)
return self._constructor(result,
index=new_index, columns=new_columns,
copy=False)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
assert left.index.equals(right.index)
if left._is_mixed_type or right._is_mixed_type:
# operate column-wise; avoid costly object-casting in `.values`
return ops.dispatch_to_series(left, right, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(left.values.T, right.values).T
return self._constructor(new_data,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None):
assert isinstance(other, Series)
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
assert left.columns.equals(right.index)
return ops.dispatch_to_series(left, right, func, axis="columns")
def _combine_const(self, other, func):
assert lib.is_scalar(other) or np.ndim(other) == 0
return ops.dispatch_to_series(self, other, func)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame based on a
passed function.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : boolean, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
result : DataFrame
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 NaN
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1],}, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1],}, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)
def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
combined : DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view('i8')
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
@deprecate_kwarg(old_arg_name='raise_conflict', new_arg_name='errors',
mapping={False: 'ignore', True: 'raise'})
def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ['ignore', 'raise']:
raise ValueError("The parameter errors must be either "
"'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if errors == 'raise':
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
_shared_docs['pivot'] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : string or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column to use to make new frame's columns.
values : string, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged :: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution('')
@Appender(_shared_docs['pivot'])
def pivot(self, index=None, columns=None, values=None):
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Returns
-------
table : DataFrame
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two NaN 6
We can also fill missing values using the `fill_value` parameter.
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
mean mean
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9 7.500000 6
small 5.500000 9 8.500000 8
foo large 2.000000 5 4.500000 4
small 2.333333 6 4.333333 2
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being re-organised from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
unstacked : DataFrame or Series
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
Unpivots a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See Also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.melt import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1.
Returns
-------
diffed : DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self,
key, # type: Union[str, List[str]]
ndim, # type: int
subset=None # type: Union[Series, DataFrame, None]
):
# type: (...) -> Union[Series, DataFrame]
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent("""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d,
axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
pandas.core.groupby.GroupBy : Perform operations over groups.
pandas.core.resample.Resampler : Perform operations over resampled bins.
pandas.core.window.Rolling : Perform operations over rolling window.
pandas.core.window.Expanding : Perform operations over expanding window.
pandas.core.window.EWM : Perform operation over exponential weighted
window.
""")
_agg_examples_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
""")
@Substitution(see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs)
@Appender(_shared_docs['aggregate'])
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = (super(DataFrame, self.T)
._aggregate(arg, *args, **kwargs))
result = result.T if result is not None else result
return result, how
return super(DataFrame, self)._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
if axis == 1:
return super(DataFrame, self.T).transform(func, *args, **kwargs).T
return super(DataFrame, self).transform(func, *args, **kwargs)
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
broadcast : bool, optional
Only relevant for aggregation functions:
* ``False`` or ``None`` : returns a Series whose length is the
length of the index or the number of columns (based on the
`axis` parameter)
* ``True`` : results will be broadcast to the original shape
of the frame, the original index and columns will be retained.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
raw : bool, default False
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
reduce : bool or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
`apply` will use `reduce` to determine whether the result
should be a Series or a DataFrame. If ``reduce=None`` (the
default), `apply`'s return value will be guessed by calling
`func` on an empty Series
(note: while guessing, exceptions raised by `func` will be
ignored).
If ``reduce=True`` a Series will always be returned, and if
``reduce=False`` a DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by ``result_type='reduce'``.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Notes
-----
In the current implementation apply calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[4, 9],] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Retuning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
result_type=result_type,
args=args,
kwds=kwds)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Notes
-----
In the current implementation applymap calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False,
verify_integrity=False, sort=None):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
silence the warning and sort. Explicitly pass ``sort=False`` to
silence the warning and not sort.
.. versionadded:: 0.23.0
Returns
-------
appended : DataFrame
See Also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Returns
-------
DataFrame
See Also
--------
numpy.around
Series.round
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
See Also
--------
DataFrame.corrwith
Series.corr
Examples
--------
>>> histogram_intersection = lambda a, b: np.minimum(a, b
... ).sum().round(decimals=1)
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(ensure_float64(mat),
minp=min_periods)
elif method == 'kendall' or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError("method must be either 'pearson', "
"'spearman', or 'kendall', '{method}' "
"was supplied".format(method=method))
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
pandas.Series.cov : Compute covariance with another Series.
pandas.core.window.EWM.cov: Exponential weighted sample covariance.
pandas.core.window.Expanding.cov : Expanding sample covariance.
pandas.core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method='pearson'):
"""
Compute pairwise correlation between rows or columns of DataFrame
with rows or columns of Series or DataFrame. DataFrames are first
aligned along both axes before computing the correlations.
Parameters
----------
other : DataFrame, Series
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
Returns
-------
correls : Series
See Also
-------
DataFrame.corr
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method),
axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
if axis == 1:
left = left.T
right = right.T
if method == 'pearson':
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ['kendall', 'spearman'] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(map(c,
zip(left.values.T, right.values.T)),
index=left.columns)
else:
raise ValueError("Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable".
format(method=method))
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = (this._get_axis(raxis).
union(other._get_axis(raxis)))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff),
index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : boolean, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical "
"{ax}.".format(ax=self._get_axis_name(axis)))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index),
axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if axis is None and filter_type == 'bool':
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
# exclude timedelta/datetime unless we are uniform types
if (axis == 1 and self._is_datelike_mixed_type
and (not self._is_homogeneous_type
and not is_datetime64tz_dtype(self.dtypes[0]))):
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
if (filter_type == 'bool' and is_object_dtype(values) and
axis is None):
# work around https://github.com/numpy/numpy/issues/10489
# TODO: combine with hasattr(result, 'dtype') further down
# hard since we don't have `values` down there.
result = np.bool_(result)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(self,
func=f,
result_type='expand',
ignore_failures=True)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except Exception:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError(
"Handling exception with filter_type {f} not"
"implemented.".format(f=filter_type))
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type {f}"
"not supported.".format(f=filter_type))
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
result = Series(result, index=labels)
return result
def nunique(self, axis=0, dropna=True):
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
idxmin : Series
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
idxmax : Series
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 0.18.0
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2],'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0},
docs={
'index': 'The index (row labels) of the DataFrame.',
'columns': 'The column labels of the DataFrame.'})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return u'{s}'.format(s=s)[:space].ljust(space)
| 36.752536 | 169 | 0.528598 |
fbae21cfc708328d1c58acdcdd05f5d05c1db1f3 | 1,491 | py | Python | tests/test_exceptions.py | lkolacek/osbs-client | 90c6db9a414214c681c777468079bcbe7dd7c809 | [
"BSD-3-Clause"
] | 4 | 2020-05-16T22:30:32.000Z | 2021-11-09T22:26:38.000Z | tests/test_exceptions.py | lkolacek/osbs-client | 90c6db9a414214c681c777468079bcbe7dd7c809 | [
"BSD-3-Clause"
] | 183 | 2019-06-06T09:10:24.000Z | 2022-03-30T12:05:15.000Z | tests/test_exceptions.py | lkolacek/osbs-client | 90c6db9a414214c681c777468079bcbe7dd7c809 | [
"BSD-3-Clause"
] | 10 | 2019-10-29T21:55:03.000Z | 2021-01-18T14:20:34.000Z | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import, unicode_literals
import pytest
import logging
from osbs.conf import Configuration
from osbs.api import OSBS
from osbs.exceptions import OsbsException
from tempfile import NamedTemporaryFile
logger = logging.getLogger("osbs.tests")
def test_missing_config():
Configuration(conf_file="/nonexistent/path", conf_section="default")
def test_no_config():
os_conf = Configuration(conf_file=None, openshift_url='https://example:8443')
assert os_conf.get_openshift_oauth_api_uri() == 'https://example:8443/oauth/authorize'
def test_missing_section():
with NamedTemporaryFile() as f:
Configuration(conf_file=f.name, conf_section="missing")
def test_no_branch():
with NamedTemporaryFile(mode='w+') as f:
f.write("""
[general]
build_json_dir=/nonexistent/path/
[default]
openshift_url=https://172.0.0.1:8443/
registry_uri=127.0.0.1:5000
""")
f.flush()
f.seek(0)
with pytest.raises(OsbsException):
os_conf = Configuration(conf_file=f.name,
conf_section="default")
osbs = OSBS(os_conf)
osbs.create_binary_container_pipeline_run(git_uri="https://example.com/example.git",
git_ref="master")
| 28.132075 | 96 | 0.688799 |
2b58922fb8e220ead0391b9751b9355d704aff8d | 373 | py | Python | setup.py | emmmile/simpleimage | 6b079592ce64d67903bf9c085a132ddd9712fba8 | [
"MIT"
] | null | null | null | setup.py | emmmile/simpleimage | 6b079592ce64d67903bf9c085a132ddd9712fba8 | [
"MIT"
] | null | null | null | setup.py | emmmile/simpleimage | 6b079592ce64d67903bf9c085a132ddd9712fba8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
VERSION = '0.0.1'
setup(
name = "simpleimage",
version = VERSION,
author = '@emmmile',
install_requires = [
'opencv-python'
],
license = 'MIT',
test_suite = 'tests',
python_requires = '>=3',
packages = find_packages(exclude=["tests*"])
)
| 17.761905 | 48 | 0.589812 |
e181fa5b69012a5dfefd71707e825764158b5b60 | 1,124 | py | Python | websetup.py | milos85vasic/Apache-Factory-Toolkit | c68727f899c8d1ac6debf6d5d18d2b0814d403b7 | [
"Apache-2.0"
] | 2 | 2019-06-04T19:12:57.000Z | 2019-12-13T08:51:39.000Z | websetup.py | milos85vasic/Apache-Factory-Toolkit | c68727f899c8d1ac6debf6d5d18d2b0814d403b7 | [
"Apache-2.0"
] | null | null | null | websetup.py | milos85vasic/Apache-Factory-Toolkit | c68727f899c8d1ac6debf6d5d18d2b0814d403b7 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import subprocess
import time
toolkit_directory = "Toolkit"
toolkit_repo = "https://github.com/milos85vasic/Apache-Factory-Toolkit.git"
if __name__ == '__main__':
exists = True
steps = []
if not os.path.exists(toolkit_directory):
exists = False
steps.extend(
[
"mkdir " + toolkit_directory,
"git clone --recurse-submodules " + toolkit_repo + " ./" + toolkit_directory,
]
)
for cmd in steps:
os.system(cmd)
branch = "master"
what = sys.argv[1]
if len(sys.argv) >= 3:
branch = sys.argv[2]
from Toolkit.commands import get_python_cmd
python_cmd = get_python_cmd()
setup = python_cmd + " ./" + toolkit_directory + "/websetup_run.py " + what
if branch is not "master":
setup += " " + branch
steps = [
setup
]
if not exists:
steps.extend(
[
"rm -rf ./" + toolkit_directory,
"rm -f " + os.path.basename(__file__)
]
)
for cmd in steps:
os.system(cmd) | 22.938776 | 93 | 0.545374 |
71e9a4805bfe7baa8faf8f88d6bb0e880b80251a | 1,733 | py | Python | yt_dlp/extractor/photobucket.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 11 | 2022-01-06T22:09:50.000Z | 2022-03-12T22:26:22.000Z | yt_dlp/extractor/photobucket.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 4 | 2022-02-25T08:20:18.000Z | 2022-03-17T16:16:20.000Z | yt_dlp/extractor/photobucket.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 3 | 2022-02-19T08:59:13.000Z | 2022-03-06T16:11:21.000Z | import json
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class PhotobucketIE(InfoExtractor):
_VALID_URL = r'https?://(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
_TEST = {
'url': 'http://media.photobucket.com/user/rachaneronas/media/TiredofLinkBuildingTryBacklinkMyDomaincom_zpsc0c3b9fa.mp4.html?filters[term]=search&filters[primary]=videos&filters[secondary]=images&sort=1&o=0',
'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99',
'info_dict': {
'id': 'zpsc0c3b9fa',
'ext': 'mp4',
'timestamp': 1367669341,
'upload_date': '20130504',
'uploader': 'rachaneronas',
'title': 'Tired of Link Building? Try BacklinkMyDomain.com!',
}
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
video_extension = mobj.group('ext')
webpage = self._download_webpage(url, video_id)
# Extract URL, uploader, and title from webpage
self.report_extraction(video_id)
info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);',
webpage, 'info json')
info = json.loads(info_json)
url = compat_urllib_parse_unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url'))
return {
'id': video_id,
'url': url,
'uploader': info['username'],
'timestamp': info['creationDate'],
'title': info['title'],
'ext': video_extension,
'thumbnail': info['thumbUrl'],
}
| 39.386364 | 215 | 0.582804 |
1b71b9b37917358de721437cb199318ff69a7e75 | 6,243 | py | Python | scripts/study_case/ID_4/torch_geometric/datasets/shapenet.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 8 | 2021-06-30T06:55:14.000Z | 2022-03-18T01:57:14.000Z | scripts/study_case/ID_4/torch_geometric/datasets/shapenet.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 1 | 2021-06-30T03:08:15.000Z | 2021-06-30T03:08:15.000Z | scripts/study_case/ID_4/torch_geometric/datasets/shapenet.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 2 | 2021-11-17T11:19:48.000Z | 2021-11-18T03:05:58.000Z | import os
import os.path as osp
import glob
import torch
from scripts.study_case.ID_4.torch_geometric.data import (Data, InMemoryDataset, download_url,
extract_zip)
from scripts.study_case.ID_4.torch_geometric.read import read_txt_array
class ShapeNet(InMemoryDataset):
r""" The ShapeNet part level segmentation dataset from the `"A Scalable
Active Framework for Region Annotation in 3D Shape Collections"
<http://web.stanford.edu/~ericyi/papers/part_annotation_16_small.pdf>`_
paper, containing about 17,000 3D shape point clouds from 16 shape
categories.
Each category is annotated with 2 to 6 parts.
Args:
root (string): Root directory where the dataset should be saved.
categories (string or [string], optional): The category of the CAD
models (one or a combination of :obj:`"Airplane"`, :obj:`"Bag"`,
:obj:`"Cap"`, :obj:`"Car"`, :obj:`"Chair"`, :obj:`"Earphone"`,
:obj:`"Guitar"`, :obj:`"Knife"`, :obj:`"Lamp"`, :obj:`"Laptop"`,
:obj:`"Motorbike"`, :obj:`"Mug"`, :obj:`"Pistol"`, :obj:`"Rocket"`,
:obj:`"Skateboard"`, :obj:`"Table"`).
Can be explicitely set to :obj:`None` to load all categories.
(default: :obj:`None`)
train (bool, optional): If :obj:`True`, loads the training dataset,
otherwise the test dataset. (default: :obj:`True`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""
url = 'https://shapenet.cs.stanford.edu/iccv17/partseg'
category_ids = {
'Airplane': '02691156',
'Bag': '02773838',
'Cap': '02954340',
'Car': '02958343',
'Chair': '03001627',
'Earphone': '03261776',
'Guitar': '03467517',
'Knife': '03624134',
'Lamp': '03636649',
'Laptop': '03642806',
'Motorbike': '03790512',
'Mug': '03797390',
'Pistol': '03948459',
'Rocket': '04099429',
'Skateboard': '04225987',
'Table': '04379243',
}
def __init__(self,
root,
categories=None,
train=True,
transform=None,
pre_transform=None,
pre_filter=None):
if categories is None:
categories = list(self.category_ids.keys())
if isinstance(categories, str):
categories = [categories]
assert all(category in self.category_ids for category in categories)
self.categories = categories
super(ShapeNet, self).__init__(root, transform, pre_transform,
pre_filter)
path = self.processed_paths[0] if train else self.processed_paths[1]
self.data, self.slices = torch.load(path)
self.y_mask = torch.load(self.processed_paths[2])
@property
def raw_file_names(self):
return [
'train_data', 'train_label', 'val_data', 'val_label', 'test_data',
'test_label'
]
@property
def processed_file_names(self):
cats = '_'.join([cat[:3].lower() for cat in self.categories])
return [
'{}_{}.pt'.format(cats, s) for s in ['training', 'test', 'y_mask']
]
def download(self):
for name in self.raw_file_names:
url = '{}/{}.zip'.format(self.url, name)
path = download_url(url, self.raw_dir)
extract_zip(path, self.raw_dir)
os.unlink(path)
def process_raw_path(self, data_path, label_path):
y_offset = 0
data_list = []
cat_ys = []
for cat_idx, cat in enumerate(self.categories):
idx = self.category_ids[cat]
point_paths = sorted(glob.glob(osp.join(data_path, idx, '*.pts')))
y_paths = sorted(glob.glob(osp.join(label_path, idx, '*.seg')))
points = [read_txt_array(path) for path in point_paths]
ys = [read_txt_array(path, dtype=torch.long) for path in y_paths]
lens = [y.size(0) for y in ys]
y = torch.cat(ys).unique(return_inverse=True)[1] + y_offset
cat_ys.append(y.unique())
y_offset = y.max().item() + 1
ys = y.split(lens)
for (pos, y) in zip(points, ys):
data = Data(y=y, pos=pos, category=cat_idx)
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
data_list.append(data)
y_mask = torch.zeros((len(self.categories), y_offset),
dtype=torch.uint8)
for i in range(len(cat_ys)):
y_mask[i, cat_ys[i]] = 1
return data_list, y_mask
def process(self):
train_data_list, y_mask = self.process_raw_path(*self.raw_paths[0:2])
val_data_list, _ = self.process_raw_path(*self.raw_paths[2:4])
test_data_list, _ = self.process_raw_path(*self.raw_paths[4:6])
data = self.collate(train_data_list + val_data_list)
torch.save(data, self.processed_paths[0])
torch.save(self.collate(test_data_list), self.processed_paths[1])
torch.save(y_mask, self.processed_paths[2])
def __repr__(self):
return '{}({}, categories={})'.format(self.__class__.__name__,
len(self), self.categories)
| 41.072368 | 94 | 0.584975 |
5a20a279b57973c0de6477818a24424e1a85780c | 8,848 | py | Python | salt/daemons/flo/worker.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | 1 | 2021-04-05T19:46:35.000Z | 2021-04-05T19:46:35.000Z | salt/daemons/flo/worker.py | dv-trading/salt | f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e | [
"Apache-2.0"
] | null | null | null | salt/daemons/flo/worker.py | dv-trading/salt | f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
The core behaviors used by minion and master
'''
# pylint: disable=W0232
# pylint: disable=3rd-party-module-not-gated
from __future__ import absolute_import
# Import python libs
import time
import os
import multiprocessing
import logging
from salt.ext.six.moves import range
# Import salt libs
import salt.daemons.flo
import salt.daemons.masterapi
from raet import raeting
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
from salt.utils import kinds
# Import ioflo libs
import ioflo.base.deeding
log = logging.getLogger(__name__)
# convert to set once list is larger than about 3 because set hashes
INHIBIT_RETURN = [] # ['_return'] # cmd for which we should not send return
@ioflo.base.deeding.deedify(
'SaltRaetWorkerFork',
ioinits={
'opts': '.salt.opts',
'proc_mgr': '.salt.usr.proc_mgr',
'worker_verify': '.salt.var.worker_verify',
'access_keys': '.salt.access_keys',
'mkey': '.salt.var.zmq.master_key',
'aes': '.salt.var.zmq.aes'})
def worker_fork(self):
'''
Fork off the worker procs
FloScript:
do salt raet worker fork at enter
'''
for index in range(int(self.opts.value['worker_threads'])):
time.sleep(0.01)
self.proc_mgr.value.add_process(
Worker,
args=(
self.opts.value,
index + 1,
self.worker_verify.value,
self.access_keys.value,
self.mkey.value,
self.aes.value
)
)
class Worker(multiprocessing.Process):
'''
Create an ioflo worker in a seperate process
'''
def __init__(self, opts, windex, worker_verify, access_keys, mkey, aes):
super(Worker, self).__init__()
self.opts = opts
self.windex = windex
self.worker_verify = worker_verify
self.access_keys = access_keys
self.mkey = mkey
self.aes = aes
def run(self):
'''
Spin up a worker, do this in multiprocess
windex is worker index
'''
self.opts['__worker'] = True
behaviors = ['salt.daemons.flo']
preloads = [('.salt.opts', dict(value=self.opts)),
('.salt.var.worker_verify', dict(value=self.worker_verify))]
preloads.append(('.salt.var.fork.worker.windex', dict(value=self.windex)))
preloads.append(('.salt.var.zmq.master_key', dict(value=self.mkey)))
preloads.append(('.salt.var.zmq.aes', dict(value=self.aes)))
preloads.append(
('.salt.access_keys', dict(value=self.access_keys)))
preloads.extend(salt.daemons.flo.explode_opts(self.opts))
console_logdir = self.opts.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, "worker_{0}.log".format(self.windex))
else: # empty means log to std out
consolepath = ''
ioflo.app.run.start(
name='worker{0}'.format(self.windex),
period=float(self.opts['ioflo_period']),
stamp=0.0,
real=self.opts['ioflo_realtime'],
filepath=self.opts['worker_floscript'],
behaviors=behaviors,
username='',
password='',
mode=None,
houses=None,
metas=None,
preloads=preloads,
verbose=int(self.opts['ioflo_verbose']),
consolepath=consolepath,
)
class SaltRaetWorkerSetup(ioflo.base.deeding.Deed):
'''
FloScript:
do salt raet worker setup at enter
'''
Ioinits = {
'opts': '.salt.opts',
'windex': '.salt.var.fork.worker.windex',
'access_keys': '.salt.access_keys',
'remote_loader': '.salt.loader.remote',
'local_loader': '.salt.loader.local',
'inode': '.salt.lane.manor.',
'stack': 'stack',
'local': {'ipath': 'local',
'ival': {'lanename': 'master'}}
}
def action(self):
'''
Set up the uxd stack and behaviors
'''
name = "worker{0}".format(self.windex.value)
# master application kind
kind = self.opts.value['__role']
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for Master Worker.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master],
kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]:
lanename = 'master'
else: # workers currently are only supported for masters
emsg = ("Invalid application kind '{0}' for Master Worker.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
sockdirpath = self.opts.value['sock_dir']
self.stack.value = LaneStack(
name=name,
lanename=lanename,
sockdirpath=sockdirpath)
self.stack.value.Pk = raeting.PackKind.pack.value
manor_yard = RemoteYard(
stack=self.stack.value,
name='manor',
lanename=lanename,
dirpath=sockdirpath)
self.stack.value.addRemote(manor_yard)
self.remote_loader.value = salt.daemons.masterapi.RemoteFuncs(
self.opts.value)
self.local_loader.value = salt.daemons.masterapi.LocalFuncs(
self.opts.value,
self.access_keys.value)
init = {}
init['route'] = {
'src': (None, self.stack.value.local.name, None),
'dst': (None, manor_yard.name, 'worker_req')
}
self.stack.value.transmit(init, self.stack.value.fetchUidByName(manor_yard.name))
self.stack.value.serviceAll()
def __del__(self):
self.stack.server.close()
class SaltRaetWorkerRouter(ioflo.base.deeding.Deed):
'''
FloScript:
do salt raet worker router
'''
Ioinits = {
'lane_stack': '.salt.lane.manor.stack',
'road_stack': '.salt.road.manor.stack',
'opts': '.salt.opts',
'worker_verify': '.salt.var.worker_verify',
'remote_loader': '.salt.loader.remote',
'local_loader': '.salt.loader.local',
}
def action(self):
'''
Read in a command and execute it, send the return back up to the
main master process
'''
self.lane_stack.value.serviceAll()
while self.lane_stack.value.rxMsgs:
msg, sender = self.lane_stack.value.rxMsgs.popleft()
try:
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
return
log.debug("**** Worker Router rxMsg\n msg= {0}\n".format(msg))
if 'load' in msg:
cmd = msg['load'].get('cmd')
if not cmd:
continue
elif cmd.startswith('__'):
continue
ret = {}
if d_share == 'remote_cmd':
if hasattr(self.remote_loader.value, cmd):
ret['return'] = getattr(self.remote_loader.value, cmd)(msg['load'])
elif d_share == 'local_cmd':
if hasattr(self.local_loader.value, cmd):
ret['return'] = getattr(self.local_loader.value, cmd)(msg['load'])
else:
ret = {'error': 'Invalid request'}
if cmd == 'publish' and 'pub' in ret.get('return', {}):
r_share = 'pub_ret'
ret['__worker_verify'] = self.worker_verify.value
else:
r_share = s_share
if cmd not in INHIBIT_RETURN:
ret['route'] = {
'src': (None, self.lane_stack.value.local.name, None),
'dst': (s_estate, s_yard, r_share)
}
self.lane_stack.value.transmit(ret,
self.lane_stack.value.fetchUidByName('manor'))
self.lane_stack.value.serviceAll()
| 35.96748 | 92 | 0.527916 |
fe5710f10f34ef28c1f83358c15ae7c7aa251bf1 | 903 | py | Python | source/core/python/modules/data_provider.py | Heimdall-Framework/heimdall-framework | d35766e793888a8f512a3d6586adef97cb468c6b | [
"MIT"
] | 13 | 2020-06-11T11:01:20.000Z | 2021-11-01T14:18:10.000Z | source/core/python/modules/data_provider.py | Heimdall-Framework/heimdall-framework | d35766e793888a8f512a3d6586adef97cb468c6b | [
"MIT"
] | null | null | null | source/core/python/modules/data_provider.py | Heimdall-Framework/heimdall-framework | d35766e793888a8f512a3d6586adef97cb468c6b | [
"MIT"
] | null | null | null | import string
import random
from .logger import Logger
class DataProvider():
def generate_random_data_file(self):
"""
Generates a file, containing pseudorandom data
"""
try:
file_content = self.__generate_pseudorandom_string(128)
file = open('dump.me', 'w')
file.write(file_content)
return True
except:
Logger().log('> File creation exception.')
return False
def __generate_pseudorandom_string(self, length=32):
"""
Generates a pseudorandom string
:param length: The length of the string. The default length is set to 32
"""
characters_set =string.ascii_letters + string.digits
pseudorandom_string = ''.join(random.choice(characters_set) for i in range(length))
return pseudorandom_string | 28.21875 | 91 | 0.603544 |
f93a1a7aee00e1a10d5f9d5e89c0344164fe48fa | 23 | py | Python | progeny/__init__.py | grst/progeny-py | d8aa468e87667e5c8769eefe2a067dc51df86a0d | [
"MIT"
] | 12 | 2021-03-11T08:59:11.000Z | 2021-11-08T15:51:56.000Z | progeny/__init__.py | grst/progeny-py | d8aa468e87667e5c8769eefe2a067dc51df86a0d | [
"MIT"
] | 4 | 2021-03-23T16:15:43.000Z | 2021-09-29T10:49:05.000Z | progeny/__init__.py | grst/progeny-py | d8aa468e87667e5c8769eefe2a067dc51df86a0d | [
"MIT"
] | 2 | 2021-03-23T15:18:47.000Z | 2021-04-02T12:30:31.000Z | from .progeny import *
| 11.5 | 22 | 0.73913 |
4879fdd686d08e525b63e67f9e6a5fee4d470e0f | 7,921 | py | Python | examples/school/categories.py | abimurali1993/dev | 295182a75b9b6031656b5c6c10866d517a1992cf | [
"Apache-2.0"
] | 1 | 2018-05-13T09:32:50.000Z | 2018-05-13T09:32:50.000Z | examples/school/categories.py | abimurali1993/dev | 295182a75b9b6031656b5c6c10866d517a1992cf | [
"Apache-2.0"
] | null | null | null | examples/school/categories.py | abimurali1993/dev | 295182a75b9b6031656b5c6c10866d517a1992cf | [
"Apache-2.0"
] | null | null | null | from __future__ import print_statement
from words import *
from nltk.wordnet import *
from operator import itemgetter
import nltk
import re
from string import join
def build_word_associations():
cfd = nltk.ConditionalFreqDist()
# get a list of all English stop words
stopwords_list = nltk.corpus.stopwords.words('english')
# count words that occur within a window of size 5 ahead of other words
for sentence in nltk.corpus.brown.tagged_sents():
sentence = [(token.lower(), tag) for (token, tag) in sentence if token.lower() not in stopwords_list]
for (index, (token, tag)) in enumerate(sentence):
if token not in stopwords_list:
window = sentence[index+1:index+5]
for (window_token, window_tag) in window:
if window_token not in stopwords_list and window_tag[0] is tag[0]:
cfd[token].inc(window_token)
return cfd
def associate():
while True:
word = raw_input("Enter a word: ")
for i in range(100):
next = cfd[word].max()
if next:
print("->", next,)
word = next
else:
break
print()
def build_word_contexts(words):
contexts_to_words = {}
words = [w.lower() for w in words]
for i in range(1,len(words)-1):
context = words[i-1]+"_"+words[i+1]
if context not in contexts_to_words:
contexts_to_words[context] = []
contexts_to_words[context].append(words[i])
# inverted structure, tracking frequency
words_to_contexts = {}
for context in contexts_to_words:
for word in contexts_to_words[context]:
if word not in words_to_contexts:
words_to_contexts[word] = []
words_to_contexts[word].append(context)
return words_to_contexts, contexts_to_words
def search_contexts(words):
words_to_contexts, contexts_to_words = build_word_contexts(words)
while True:
hits = []
word = raw_input("word> ")
if word not in words_to_contexts:
print("Word not found")
continue
contexts = words_to_contexts[word]
for w in words_to_contexts: # all words
for context in words_to_contexts[w]:
if context in contexts:
hits.append(w)
hit_freqs = count_words(hits).items()
sorted_hits = sorted(hit_freqs, key=itemgetter(1), reverse=True)
words = [word for (word, count) in sorted_hits[1:] if count > 1]
print(join(words))
def lookup(word):
for category in [N, V, ADJ, ADV]:
if word in category:
for synset in category[word]:
print(category[word], ":", synset.gloss)
############################################
# Simple Tagger
############################################
# map brown pos tags
# http://khnt.hit.uib.no/icame/manuals/brown/INDEX.HTM
def map1(tag):
tag = re.sub(r'fw-', '', tag) # foreign words
tag = re.sub(r'-[th]l', '', tag) # headlines, titles
tag = re.sub(r'-nc', '', tag) # cited
tag = re.sub(r'ber?', 'vb', tag) # verb "to be"
tag = re.sub(r'hv', 'vb', tag) # verb "to have"
tag = re.sub(r'do', 'vb', tag) # verb "to do"
tag = re.sub(r'nc', 'nn', tag) # cited word
tag = re.sub(r'z', '', tag) # third-person singular
return tag
def map2(tag):
tag = re.sub(r'\bj[^-+]*', 'J', tag) # adjectives
tag = re.sub(r'\bp[^-+]*', 'P', tag) # pronouns
tag = re.sub(r'\bm[^-+]*', 'M', tag) # modals
tag = re.sub(r'\bq[^-+]*', 'Q', tag) # qualifiers
tag = re.sub(r'\babl', 'Q', tag) # qualifiers
tag = re.sub(r'\bab[nx]', 'D', tag) # determiners
tag = re.sub(r'\bap', 'D', tag) # determiners
tag = re.sub(r'\bd[^-+]*', 'D', tag) # determiners
tag = re.sub(r'\bat', 'D', tag) # determiners
tag = re.sub(r'\bw[^-+]*', 'W', tag) # wh words
tag = re.sub(r'\br[^-+]*', 'R', tag) # adverbs
tag = re.sub(r'\bto', 'T', tag) # "to"
tag = re.sub(r'\bc[cs]', 'C', tag) # conjunctions
tag = re.sub(r's', '', tag) # plurals
tag = re.sub(r'\bin', 'I', tag) # prepositions
tag = re.sub(r'\buh', 'U', tag) # interjections (uh)
tag = re.sub(r'\bex', 'E', tag) # existential "there"
tag = re.sub(r'\bvbn', 'VN', tag) # past participle
tag = re.sub(r'\bvbd', 'VD', tag) # past tense
tag = re.sub(r'\bvbg', 'VG', tag) # gerund
tag = re.sub(r'\bvb', 'V', tag) # verb
tag = re.sub(r'\bnn', 'N', tag) # noun
tag = re.sub(r'\bnp', 'NP', tag) # proper noun
tag = re.sub(r'\bnr', 'NR', tag) # adverbial noun
tag = re.sub(r'\bex', 'E', tag) # existential "there"
tag = re.sub(r'\bod', 'OD', tag) # ordinal
tag = re.sub(r'\bcd', 'CD', tag) # cardinal
tag = re.sub(r'-t', '', tag) # misc
tag = re.sub(r'[a-z\*]', '', tag) # misc
return tag
def map(tag):
return map2(map1(tag.lower()))
# print(sorted(set(map2(map1(tag)) for s in brown.tagged() for w,tag in s)))
def load_brown_corpus(sections):
global map
corpus = nltk.corpus.brown.tagged_sents(tuple(sections))
return [[(w.lower(), map(t)) for w, t in sent] for sent in corpus]
def train_tagger(corpus):
t0 = nltk.tag.Default('N')
t1 = nltk.tag.Unigram(cutoff=0, backoff=t0)
t2 = nltk.tag.Bigram(cutoff=0, backoff=t1)
t3 = nltk.tag.Trigram(cutoff=1, backoff=t2)
t1.train(corpus, verbose=True)
t2.train(corpus, verbose=True)
t3.train(corpus, verbose=True)
return t3
def tag(corpus):
print("Training tagger...")
tagger = train_tagger(corpus)
while True:
text = raw_input("sentence> ")
words = text.split()
print(join(word+"/"+tag for word, tag in tagger.tag(words)))
WORD_OR_TAG = '[^/ ]+'
BOUNDARY = r'\b'
def process(pattern):
new = []
for term in pattern.split():
if re.match('[A-Z]+$', term):
new.append(BOUNDARY + WORD_OR_TAG + '/' + term + BOUNDARY)
elif '/' in term:
new.append(BOUNDARY + term + BOUNDARY)
else:
new.append(BOUNDARY + term + '/' + WORD_OR_TAG + BOUNDARY)
return join(new)
def search(corpus, num=25):
print("Loading corpus...")
strings = [join(w+'/'+t for (w,t) in sent) for sent in corpus]
while True:
pattern = ""
while not pattern:
pattern = raw_input("search> ")
pattern = process(pattern)
i = 0
for sent in strings:
m = re.search(pattern, sent)
if m:
sent = ' '*35 + sent + ' '*45
print(sent[m.start():m.start()+80])
i += 1
if i > num:
break
############################################
# Wordnet Browser
# now incorporated into NLTK as wordnet.browse
############################################
############################################
# Mad Libs
############################################
madlib = """Britney Spears will meet up with her %(NP)s label for
crisis talks about the future of her %(N)s this week reports Digital Spy.
%(NP)s Records plan to tell Spears to stop %(VG)s and take more
care of her %(J)s image if she wants to avoid being %(VD)s by the noun.
The news %(V)s shortly after Britney posted a message on her
website promising a new album and tour. The last couple of years
have been quite a ride for me, the media has criticized %(P)s every
noun %(C)s printed a skewed perception of who I really am as a human
being, she wrote in a letter posted %(NR)s."""
# mapping = {}
# mapping['NP'] =
# mapping['N'] =
# mapping['VG'] =
# mapping['J'] =
# mapping['VD'] =
# mapping['V'] =
# mapping['P'] =
# mapping['C'] =
# mapping['NR'] =
# print(madlib % mapping)
| 35.520179 | 109 | 0.549552 |
ada2f83335426f6fe8067780b492557ff19bd54e | 517 | py | Python | sample.py | mpcarolin/pokedex-flex-api | 2ed38792aa53848d4445d66630663b4d32b30815 | [
"Apache-2.0"
] | 1 | 2020-01-14T02:14:05.000Z | 2020-01-14T02:14:05.000Z | sample.py | mpcarolin/pokedex-flex-api | 2ed38792aa53848d4445d66630663b4d32b30815 | [
"Apache-2.0"
] | 2 | 2018-06-02T18:40:59.000Z | 2020-03-10T00:03:50.000Z | sample.py | mpcarolin/pokedex-flex-api | 2ed38792aa53848d4445d66630663b4d32b30815 | [
"Apache-2.0"
] | null | null | null | from mapping import ResponseMapper
from constants import APIS
BASE_URI = APIS["pokeapi"]["base_uri"]
ENDPOINTS = APIS["pokeapi"]["endpoints"]
def uri(key): return BASE_URI + ENDPOINTS[key]
mapper = ResponseMapper()
@mapper.maps(uri('test'))
def test_mapping(self, exchange):
json = exchange.response.json()
json['new key'] = 'new value'
return json
@mapper.maps(uri('pokemon-by-id'))
def pokemon(self, exchange):
json = exchange.response.json()
json['new_key'] = 'new value'
return json
| 22.478261 | 46 | 0.696325 |
44724d5f8b76987e8f294a0c001061874dafd5d0 | 691 | py | Python | hydroDL/master/cmd/WRTDS.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | hydroDL/master/cmd/WRTDS.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | hydroDL/master/cmd/WRTDS.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | 2 | 2021-04-04T02:45:59.000Z | 2022-03-19T09:41:39.000Z | import argparse
import os
from hydroDL.master import basinFull
from hydroDL.app.waterQuality import WRTDS
from hydroDL.data import usgs
from hydroDL import kPath
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-D', dest='dataName', type=str)
parser.add_argument('-T', dest='trainSet', type=str)
args = parser.parse_args()
testSet = 'all'
yW = WRTDS.testWRTDS(args.dataName, args.trainSet, testSet, usgs.newC)
dirRoot = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-dbBasin')
fileName = '{}-{}-{}'.format(args.dataName, args.trainSet, testSet)
np.savez_compressed(os.path.join(dirRoot, fileName), yW)
| 36.368421 | 74 | 0.720695 |
d7bf27bff2fe2dbd7ab9c481ce141c31786d3beb | 450 | py | Python | tests/webutil.py | matsub/slashcommands | 28e96a9c43b740868d08fafb176f3c97f8db8816 | [
"MIT"
] | 4 | 2017-04-05T13:30:39.000Z | 2018-12-04T13:42:56.000Z | tests/webutil.py | matsub/slashcommands | 28e96a9c43b740868d08fafb176f3c97f8db8816 | [
"MIT"
] | null | null | null | tests/webutil.py | matsub/slashcommands | 28e96a9c43b740868d08fafb176f3c97f8db8816 | [
"MIT"
] | 2 | 2020-11-15T10:52:27.000Z | 2020-11-16T00:46:28.000Z | # coding: utf-8
import urllib.request
import urllib.parse
class Client:
def __init__(self, baseurl):
self.baseurl = baseurl
def get(self, url):
url = urllib.parse.urljoin(self.baseurl, url)
return urllib.request.urlopen(url)
def post(self, url, data):
url = urllib.parse.urljoin(self.baseurl, url)
data = urllib.parse.urlencode(data).encode()
return urllib.request.urlopen(url, data)
| 22.5 | 53 | 0.653333 |
3ccb5e0f1143775019b267f636b20a53811d90f5 | 223 | py | Python | spacy_pattern_builder/exceptions.py | cyclecycle/spacy-dependency-pattern-builder | 51a1eb9a2cbd56163103e0e903af585442f8f912 | [
"MIT"
] | 32 | 2019-11-05T00:19:20.000Z | 2021-04-28T09:08:53.000Z | spacy_pattern_builder/exceptions.py | cyclecycle/spacy-dependency-pattern-builder | 51a1eb9a2cbd56163103e0e903af585442f8f912 | [
"MIT"
] | 1 | 2020-01-28T09:06:14.000Z | 2020-09-19T21:28:06.000Z | spacy_pattern_builder/exceptions.py | cyclecycle/spacy-dependency-pattern-builder | 51a1eb9a2cbd56163103e0e903af585442f8f912 | [
"MIT"
] | 6 | 2020-01-27T10:21:40.000Z | 2022-02-21T18:44:31.000Z | class TokensNotFullyConnectedError(Exception):
pass
class DuplicateTokensError(Exception):
pass
class TokenNotInMatchTokensError(Exception):
pass
class FeaturesMissingFromPatternError(Exception):
pass
| 14.866667 | 49 | 0.793722 |
6eab03e8f65a79b94da735ca3986924b2acf77c9 | 26,119 | py | Python | hubspot/cms/performance/models/performance_view.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 1 | 2020-11-12T08:46:32.000Z | 2020-11-12T08:46:32.000Z | hubspot/cms/performance/models/performance_view.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/cms/performance/models/performance_view.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
CMS Performance API
Use these endpoints to get a time series view of your website's performance. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.cms.performance.configuration import Configuration
class PerformanceView(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"_403": "int",
"_404": "int",
"_500": "int",
"_504": "int",
"start_timestamp": "int",
"end_timestamp": "int",
"start_datetime": "str",
"end_datetime": "str",
"total_requests": "int",
"cache_hits": "int",
"cache_hit_rate": "float",
"total_request_time": "int",
"avg_origin_response_time": "int",
"response_time_ms": "int",
"_100_x": "int",
"_20_x": "int",
"_30_x": "int",
"_40_x": "int",
"_50_x": "int",
"_50th": "int",
"_95th": "int",
"_99th": "int",
}
attribute_map = {
"_403": "403",
"_404": "404",
"_500": "500",
"_504": "504",
"start_timestamp": "startTimestamp",
"end_timestamp": "endTimestamp",
"start_datetime": "startDatetime",
"end_datetime": "endDatetime",
"total_requests": "totalRequests",
"cache_hits": "cacheHits",
"cache_hit_rate": "cacheHitRate",
"total_request_time": "totalRequestTime",
"avg_origin_response_time": "avgOriginResponseTime",
"response_time_ms": "responseTimeMs",
"_100_x": "100X",
"_20_x": "20X",
"_30_x": "30X",
"_40_x": "40X",
"_50_x": "50X",
"_50th": "50th",
"_95th": "95th",
"_99th": "99th",
}
def __init__(
self,
_403=None,
_404=None,
_500=None,
_504=None,
start_timestamp=None,
end_timestamp=None,
start_datetime=None,
end_datetime=None,
total_requests=None,
cache_hits=None,
cache_hit_rate=None,
total_request_time=None,
avg_origin_response_time=None,
response_time_ms=None,
_100_x=None,
_20_x=None,
_30_x=None,
_40_x=None,
_50_x=None,
_50th=None,
_95th=None,
_99th=None,
local_vars_configuration=None,
): # noqa: E501
"""PerformanceView - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.__403 = None
self.__404 = None
self.__500 = None
self.__504 = None
self._start_timestamp = None
self._end_timestamp = None
self._start_datetime = None
self._end_datetime = None
self._total_requests = None
self._cache_hits = None
self._cache_hit_rate = None
self._total_request_time = None
self._avg_origin_response_time = None
self._response_time_ms = None
self.__100_x = None
self.__20_x = None
self.__30_x = None
self.__40_x = None
self.__50_x = None
self.__50th = None
self.__95th = None
self.__99th = None
self.discriminator = None
self._403 = _403
self._404 = _404
self._500 = _500
self._504 = _504
self.start_timestamp = start_timestamp
self.end_timestamp = end_timestamp
self.start_datetime = start_datetime
self.end_datetime = end_datetime
self.total_requests = total_requests
self.cache_hits = cache_hits
self.cache_hit_rate = cache_hit_rate
self.total_request_time = total_request_time
self.avg_origin_response_time = avg_origin_response_time
self.response_time_ms = response_time_ms
self._100_x = _100_x
self._20_x = _20_x
self._30_x = _30_x
self._40_x = _40_x
self._50_x = _50_x
self._50th = _50th
self._95th = _95th
self._99th = _99th
@property
def _403(self):
"""Gets the _403 of this PerformanceView. # noqa: E501
The number of responses that had an http status code of 403. # noqa: E501
:return: The _403 of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__403
@_403.setter
def _403(self, _403):
"""Sets the _403 of this PerformanceView.
The number of responses that had an http status code of 403. # noqa: E501
:param _403: The _403 of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _403 is None
): # noqa: E501
raise ValueError(
"Invalid value for `_403`, must not be `None`"
) # noqa: E501
self.__403 = _403
@property
def _404(self):
"""Gets the _404 of this PerformanceView. # noqa: E501
The number of responses that had an http status code of 404. # noqa: E501
:return: The _404 of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__404
@_404.setter
def _404(self, _404):
"""Sets the _404 of this PerformanceView.
The number of responses that had an http status code of 404. # noqa: E501
:param _404: The _404 of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _404 is None
): # noqa: E501
raise ValueError(
"Invalid value for `_404`, must not be `None`"
) # noqa: E501
self.__404 = _404
@property
def _500(self):
"""Gets the _500 of this PerformanceView. # noqa: E501
The number of responses that had an http status code of 500. # noqa: E501
:return: The _500 of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__500
@_500.setter
def _500(self, _500):
"""Sets the _500 of this PerformanceView.
The number of responses that had an http status code of 500. # noqa: E501
:param _500: The _500 of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _500 is None
): # noqa: E501
raise ValueError(
"Invalid value for `_500`, must not be `None`"
) # noqa: E501
self.__500 = _500
@property
def _504(self):
"""Gets the _504 of this PerformanceView. # noqa: E501
The number of responses that had an http status code of 504. # noqa: E501
:return: The _504 of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__504
@_504.setter
def _504(self, _504):
"""Sets the _504 of this PerformanceView.
The number of responses that had an http status code of 504. # noqa: E501
:param _504: The _504 of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _504 is None
): # noqa: E501
raise ValueError(
"Invalid value for `_504`, must not be `None`"
) # noqa: E501
self.__504 = _504
@property
def start_timestamp(self):
"""Gets the start_timestamp of this PerformanceView. # noqa: E501
The timestamp in milliseconds of the start of this interval. # noqa: E501
:return: The start_timestamp of this PerformanceView. # noqa: E501
:rtype: int
"""
return self._start_timestamp
@start_timestamp.setter
def start_timestamp(self, start_timestamp):
"""Sets the start_timestamp of this PerformanceView.
The timestamp in milliseconds of the start of this interval. # noqa: E501
:param start_timestamp: The start_timestamp of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation
and start_timestamp is None
): # noqa: E501
raise ValueError(
"Invalid value for `start_timestamp`, must not be `None`"
) # noqa: E501
self._start_timestamp = start_timestamp
@property
def end_timestamp(self):
"""Gets the end_timestamp of this PerformanceView. # noqa: E501
The timestamp in milliseconds of the end of this interval. # noqa: E501
:return: The end_timestamp of this PerformanceView. # noqa: E501
:rtype: int
"""
return self._end_timestamp
@end_timestamp.setter
def end_timestamp(self, end_timestamp):
"""Sets the end_timestamp of this PerformanceView.
The timestamp in milliseconds of the end of this interval. # noqa: E501
:param end_timestamp: The end_timestamp of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation
and end_timestamp is None
): # noqa: E501
raise ValueError(
"Invalid value for `end_timestamp`, must not be `None`"
) # noqa: E501
self._end_timestamp = end_timestamp
@property
def start_datetime(self):
"""Gets the start_datetime of this PerformanceView. # noqa: E501
:return: The start_datetime of this PerformanceView. # noqa: E501
:rtype: str
"""
return self._start_datetime
@start_datetime.setter
def start_datetime(self, start_datetime):
"""Sets the start_datetime of this PerformanceView.
:param start_datetime: The start_datetime of this PerformanceView. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and start_datetime is None
): # noqa: E501
raise ValueError(
"Invalid value for `start_datetime`, must not be `None`"
) # noqa: E501
self._start_datetime = start_datetime
@property
def end_datetime(self):
"""Gets the end_datetime of this PerformanceView. # noqa: E501
:return: The end_datetime of this PerformanceView. # noqa: E501
:rtype: str
"""
return self._end_datetime
@end_datetime.setter
def end_datetime(self, end_datetime):
"""Sets the end_datetime of this PerformanceView.
:param end_datetime: The end_datetime of this PerformanceView. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and end_datetime is None
): # noqa: E501
raise ValueError(
"Invalid value for `end_datetime`, must not be `None`"
) # noqa: E501
self._end_datetime = end_datetime
@property
def total_requests(self):
"""Gets the total_requests of this PerformanceView. # noqa: E501
The total number of requests received in this period. # noqa: E501
:return: The total_requests of this PerformanceView. # noqa: E501
:rtype: int
"""
return self._total_requests
@total_requests.setter
def total_requests(self, total_requests):
"""Sets the total_requests of this PerformanceView.
The total number of requests received in this period. # noqa: E501
:param total_requests: The total_requests of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation
and total_requests is None
): # noqa: E501
raise ValueError(
"Invalid value for `total_requests`, must not be `None`"
) # noqa: E501
self._total_requests = total_requests
@property
def cache_hits(self):
"""Gets the cache_hits of this PerformanceView. # noqa: E501
The total number of requests that were served cached responses. # noqa: E501
:return: The cache_hits of this PerformanceView. # noqa: E501
:rtype: int
"""
return self._cache_hits
@cache_hits.setter
def cache_hits(self, cache_hits):
"""Sets the cache_hits of this PerformanceView.
The total number of requests that were served cached responses. # noqa: E501
:param cache_hits: The cache_hits of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and cache_hits is None
): # noqa: E501
raise ValueError(
"Invalid value for `cache_hits`, must not be `None`"
) # noqa: E501
self._cache_hits = cache_hits
@property
def cache_hit_rate(self):
"""Gets the cache_hit_rate of this PerformanceView. # noqa: E501
The percentage of requests that were served cached responses. # noqa: E501
:return: The cache_hit_rate of this PerformanceView. # noqa: E501
:rtype: float
"""
return self._cache_hit_rate
@cache_hit_rate.setter
def cache_hit_rate(self, cache_hit_rate):
"""Sets the cache_hit_rate of this PerformanceView.
The percentage of requests that were served cached responses. # noqa: E501
:param cache_hit_rate: The cache_hit_rate of this PerformanceView. # noqa: E501
:type: float
"""
if (
self.local_vars_configuration.client_side_validation
and cache_hit_rate is None
): # noqa: E501
raise ValueError(
"Invalid value for `cache_hit_rate`, must not be `None`"
) # noqa: E501
self._cache_hit_rate = cache_hit_rate
@property
def total_request_time(self):
"""Gets the total_request_time of this PerformanceView. # noqa: E501
:return: The total_request_time of this PerformanceView. # noqa: E501
:rtype: int
"""
return self._total_request_time
@total_request_time.setter
def total_request_time(self, total_request_time):
"""Sets the total_request_time of this PerformanceView.
:param total_request_time: The total_request_time of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation
and total_request_time is None
): # noqa: E501
raise ValueError(
"Invalid value for `total_request_time`, must not be `None`"
) # noqa: E501
self._total_request_time = total_request_time
@property
def avg_origin_response_time(self):
"""Gets the avg_origin_response_time of this PerformanceView. # noqa: E501
The average response time in milliseconds from the origin to the edge. # noqa: E501
:return: The avg_origin_response_time of this PerformanceView. # noqa: E501
:rtype: int
"""
return self._avg_origin_response_time
@avg_origin_response_time.setter
def avg_origin_response_time(self, avg_origin_response_time):
"""Sets the avg_origin_response_time of this PerformanceView.
The average response time in milliseconds from the origin to the edge. # noqa: E501
:param avg_origin_response_time: The avg_origin_response_time of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation
and avg_origin_response_time is None
): # noqa: E501
raise ValueError(
"Invalid value for `avg_origin_response_time`, must not be `None`"
) # noqa: E501
self._avg_origin_response_time = avg_origin_response_time
@property
def response_time_ms(self):
"""Gets the response_time_ms of this PerformanceView. # noqa: E501
The average response time in milliseconds. # noqa: E501
:return: The response_time_ms of this PerformanceView. # noqa: E501
:rtype: int
"""
return self._response_time_ms
@response_time_ms.setter
def response_time_ms(self, response_time_ms):
"""Sets the response_time_ms of this PerformanceView.
The average response time in milliseconds. # noqa: E501
:param response_time_ms: The response_time_ms of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation
and response_time_ms is None
): # noqa: E501
raise ValueError(
"Invalid value for `response_time_ms`, must not be `None`"
) # noqa: E501
self._response_time_ms = response_time_ms
@property
def _100_x(self):
"""Gets the _100_x of this PerformanceView. # noqa: E501
The number of responses that had an http status code between 1000-1999. # noqa: E501
:return: The _100_x of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__100_x
@_100_x.setter
def _100_x(self, _100_x):
"""Sets the _100_x of this PerformanceView.
The number of responses that had an http status code between 1000-1999. # noqa: E501
:param _100_x: The _100_x of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _100_x is None
): # noqa: E501
raise ValueError(
"Invalid value for `_100_x`, must not be `None`"
) # noqa: E501
self.__100_x = _100_x
@property
def _20_x(self):
"""Gets the _20_x of this PerformanceView. # noqa: E501
The number of responses that had an http status code between 200-299. # noqa: E501
:return: The _20_x of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__20_x
@_20_x.setter
def _20_x(self, _20_x):
"""Sets the _20_x of this PerformanceView.
The number of responses that had an http status code between 200-299. # noqa: E501
:param _20_x: The _20_x of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _20_x is None
): # noqa: E501
raise ValueError(
"Invalid value for `_20_x`, must not be `None`"
) # noqa: E501
self.__20_x = _20_x
@property
def _30_x(self):
"""Gets the _30_x of this PerformanceView. # noqa: E501
The number of responses that had an http status code between 300-399. # noqa: E501
:return: The _30_x of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__30_x
@_30_x.setter
def _30_x(self, _30_x):
"""Sets the _30_x of this PerformanceView.
The number of responses that had an http status code between 300-399. # noqa: E501
:param _30_x: The _30_x of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _30_x is None
): # noqa: E501
raise ValueError(
"Invalid value for `_30_x`, must not be `None`"
) # noqa: E501
self.__30_x = _30_x
@property
def _40_x(self):
"""Gets the _40_x of this PerformanceView. # noqa: E501
The number of responses that had an http status code between 400-499. # noqa: E501
:return: The _40_x of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__40_x
@_40_x.setter
def _40_x(self, _40_x):
"""Sets the _40_x of this PerformanceView.
The number of responses that had an http status code between 400-499. # noqa: E501
:param _40_x: The _40_x of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _40_x is None
): # noqa: E501
raise ValueError(
"Invalid value for `_40_x`, must not be `None`"
) # noqa: E501
self.__40_x = _40_x
@property
def _50_x(self):
"""Gets the _50_x of this PerformanceView. # noqa: E501
The number of responses that had an http status code between 500-599. # noqa: E501
:return: The _50_x of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__50_x
@_50_x.setter
def _50_x(self, _50_x):
"""Sets the _50_x of this PerformanceView.
The number of responses that had an http status code between 500-599. # noqa: E501
:param _50_x: The _50_x of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _50_x is None
): # noqa: E501
raise ValueError(
"Invalid value for `_50_x`, must not be `None`"
) # noqa: E501
self.__50_x = _50_x
@property
def _50th(self):
"""Gets the _50th of this PerformanceView. # noqa: E501
The 50th percentile response time. # noqa: E501
:return: The _50th of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__50th
@_50th.setter
def _50th(self, _50th):
"""Sets the _50th of this PerformanceView.
The 50th percentile response time. # noqa: E501
:param _50th: The _50th of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _50th is None
): # noqa: E501
raise ValueError(
"Invalid value for `_50th`, must not be `None`"
) # noqa: E501
self.__50th = _50th
@property
def _95th(self):
"""Gets the _95th of this PerformanceView. # noqa: E501
The 95th percentile response time. # noqa: E501
:return: The _95th of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__95th
@_95th.setter
def _95th(self, _95th):
"""Sets the _95th of this PerformanceView.
The 95th percentile response time. # noqa: E501
:param _95th: The _95th of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _95th is None
): # noqa: E501
raise ValueError(
"Invalid value for `_95th`, must not be `None`"
) # noqa: E501
self.__95th = _95th
@property
def _99th(self):
"""Gets the _99th of this PerformanceView. # noqa: E501
The 99th percentile response time. # noqa: E501
:return: The _99th of this PerformanceView. # noqa: E501
:rtype: int
"""
return self.__99th
@_99th.setter
def _99th(self, _99th):
"""Sets the _99th of this PerformanceView.
The 99th percentile response time. # noqa: E501
:param _99th: The _99th of this PerformanceView. # noqa: E501
:type: int
"""
if (
self.local_vars_configuration.client_side_validation and _99th is None
): # noqa: E501
raise ValueError(
"Invalid value for `_99th`, must not be `None`"
) # noqa: E501
self.__99th = _99th
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PerformanceView):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PerformanceView):
return True
return self.to_dict() != other.to_dict()
| 30.692127 | 108 | 0.590107 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.