text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Support for Ambient Weather Station Service.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/ambient_station/
"""
import logging
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_NAME, ATTR_LOCATION, CONF_API_KEY, CONF_MONITORED_CONDITIONS,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
from .config_flow import configured_instances
from .const import (
ATTR_LAST_DATA, CONF_APP_KEY, DATA_CLIENT, DOMAIN, TOPIC_UPDATE,
TYPE_BINARY_SENSOR, TYPE_SENSOR)
REQUIREMENTS = ['aioambient==0.1.0']
_LOGGER = logging.getLogger(__name__)
DEFAULT_SOCKET_MIN_RETRY = 15
TYPE_24HOURRAININ = '24hourrainin'
TYPE_BAROMABSIN = 'baromabsin'
TYPE_BAROMRELIN = 'baromrelin'
TYPE_BATTOUT = 'battout'
TYPE_CO2 = 'co2'
TYPE_DAILYRAININ = 'dailyrainin'
TYPE_DEWPOINT = 'dewPoint'
TYPE_EVENTRAININ = 'eventrainin'
TYPE_FEELSLIKE = 'feelsLike'
TYPE_HOURLYRAININ = 'hourlyrainin'
TYPE_HUMIDITY = 'humidity'
TYPE_HUMIDITYIN = 'humidityin'
TYPE_LASTRAIN = 'lastRain'
TYPE_MAXDAILYGUST = 'maxdailygust'
TYPE_MONTHLYRAININ = 'monthlyrainin'
TYPE_SOLARRADIATION = 'solarradiation'
TYPE_TEMPF = 'tempf'
TYPE_TEMPINF = 'tempinf'
TYPE_TOTALRAININ = 'totalrainin'
TYPE_UV = 'uv'
TYPE_WEEKLYRAININ = 'weeklyrainin'
TYPE_WINDDIR = 'winddir'
TYPE_WINDDIR_AVG10M = 'winddir_avg10m'
TYPE_WINDDIR_AVG2M = 'winddir_avg2m'
TYPE_WINDGUSTDIR = 'windgustdir'
TYPE_WINDGUSTMPH = 'windgustmph'
TYPE_WINDSPDMPH_AVG10M = 'windspdmph_avg10m'
TYPE_WINDSPDMPH_AVG2M = 'windspdmph_avg2m'
TYPE_WINDSPEEDMPH = 'windspeedmph'
TYPE_YEARLYRAININ = 'yearlyrainin'
SENSOR_TYPES = {
TYPE_24HOURRAININ: ('24 Hr Rain', 'in', TYPE_SENSOR, None),
TYPE_BAROMABSIN: ('Abs Pressure', 'inHg', TYPE_SENSOR, None),
TYPE_BAROMRELIN: ('Rel Pressure', 'inHg', TYPE_SENSOR, None),
TYPE_BATTOUT: ('Battery', None, TYPE_BINARY_SENSOR, 'battery'),
TYPE_CO2: ('co2', 'ppm', TYPE_SENSOR, None),
TYPE_DAILYRAININ: ('Daily Rain', 'in', TYPE_SENSOR, None),
TYPE_DEWPOINT: ('Dew Point', '°F', TYPE_SENSOR, None),
TYPE_EVENTRAININ: ('Event Rain', 'in', TYPE_SENSOR, None),
TYPE_FEELSLIKE: ('Feels Like', '°F', TYPE_SENSOR, None),
TYPE_HOURLYRAININ: ('Hourly Rain Rate', 'in/hr', TYPE_SENSOR, None),
TYPE_HUMIDITY: ('Humidity', '%', TYPE_SENSOR, None),
TYPE_HUMIDITYIN: ('Humidity In', '%', TYPE_SENSOR, None),
TYPE_LASTRAIN: ('Last Rain', None, TYPE_SENSOR, None),
TYPE_MAXDAILYGUST: ('Max Gust', 'mph', TYPE_SENSOR, None),
TYPE_MONTHLYRAININ: ('Monthly Rain', 'in', TYPE_SENSOR, None),
TYPE_SOLARRADIATION: ('Solar Rad', 'W/m^2', TYPE_SENSOR, None),
TYPE_TEMPF: ('Temp', '°F', TYPE_SENSOR, None),
TYPE_TEMPINF: ('Inside Temp', '°F', TYPE_SENSOR, None),
TYPE_TOTALRAININ: ('Lifetime Rain', 'in', TYPE_SENSOR, None),
TYPE_UV: ('uv', 'Index', TYPE_SENSOR, None),
TYPE_WEEKLYRAININ: ('Weekly Rain', 'in', TYPE_SENSOR, None),
TYPE_WINDDIR: ('Wind Dir', '°', TYPE_SENSOR, None),
TYPE_WINDDIR_AVG10M: ('Wind Dir Avg 10m', '°', TYPE_SENSOR, None),
TYPE_WINDDIR_AVG2M: ('Wind Dir Avg 2m', 'mph', TYPE_SENSOR, None),
TYPE_WINDGUSTDIR: ('Gust Dir', '°', TYPE_SENSOR, None),
TYPE_WINDGUSTMPH: ('Wind Gust', 'mph', TYPE_SENSOR, None),
TYPE_WINDSPDMPH_AVG10M: ('Wind Avg 10m', 'mph', TYPE_SENSOR, None),
TYPE_WINDSPDMPH_AVG2M: ('Wind Avg 2m', 'mph', TYPE_SENSOR, None),
TYPE_WINDSPEEDMPH: ('Wind Speed', 'mph', TYPE_SENSOR, None),
TYPE_YEARLYRAININ: ('Yearly Rain', 'in', TYPE_SENSOR, None),
}
CONFIG_SCHEMA = vol.Schema({
DOMAIN:
vol.Schema({
vol.Required(CONF_APP_KEY): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Ambient PWS component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
if conf[CONF_APP_KEY] in configured_instances(hass):
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={'source': SOURCE_IMPORT}, data=conf))
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Ambient PWS as config entry."""
from aioambient import Client
from aioambient.errors import WebsocketConnectionError
session = aiohttp_client.async_get_clientsession(hass)
try:
ambient = AmbientStation(
hass, config_entry,
Client(
config_entry.data[CONF_API_KEY],
config_entry.data[CONF_APP_KEY], session),
config_entry.data.get(CONF_MONITORED_CONDITIONS, []))
hass.loop.create_task(ambient.ws_connect())
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = ambient
except WebsocketConnectionError as err:
_LOGGER.error('Config entry failed: %s', err)
raise ConfigEntryNotReady
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, ambient.client.websocket.disconnect())
return True
async def async_unload_entry(hass, config_entry):
"""Unload an Ambient PWS config entry."""
ambient = hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
hass.async_create_task(ambient.ws_disconnect())
for component in ('binary_sensor', 'sensor'):
await hass.config_entries.async_forward_entry_unload(
config_entry, component)
return True
class AmbientStation:
"""Define a class to handle the Ambient websocket."""
def __init__(self, hass, config_entry, client, monitored_conditions):
"""Initialize."""
self._config_entry = config_entry
self._hass = hass
self._ws_reconnect_delay = DEFAULT_SOCKET_MIN_RETRY
self.client = client
self.monitored_conditions = monitored_conditions
self.stations = {}
async def ws_connect(self):
"""Register handlers and connect to the websocket."""
from aioambient.errors import WebsocketError
def on_connect():
"""Define a handler to fire when the websocket is connected."""
_LOGGER.info('Connected to websocket')
def on_data(data):
"""Define a handler to fire when the data is received."""
mac_address = data['macAddress']
if data != self.stations[mac_address][ATTR_LAST_DATA]:
_LOGGER.debug('New data received: %s', data)
self.stations[mac_address][ATTR_LAST_DATA] = data
async_dispatcher_send(self._hass, TOPIC_UPDATE)
def on_disconnect():
"""Define a handler to fire when the websocket is disconnected."""
_LOGGER.info('Disconnected from websocket')
def on_subscribed(data):
"""Define a handler to fire when the subscription is set."""
for station in data['devices']:
if station['macAddress'] in self.stations:
continue
_LOGGER.debug('New station subscription: %s', data)
# If the user hasn't specified monitored conditions, use only
# those that their station supports (and which are defined
# here):
if not self.monitored_conditions:
self.monitored_conditions = [
k for k in station['lastData'].keys()
if k in SENSOR_TYPES
]
self.stations[station['macAddress']] = {
ATTR_LAST_DATA: station['lastData'],
ATTR_LOCATION: station['info']['location'],
ATTR_NAME: station['info']['name'],
}
for component in ('binary_sensor', 'sensor'):
self._hass.async_create_task(
self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component))
self._ws_reconnect_delay = DEFAULT_SOCKET_MIN_RETRY
self.client.websocket.on_connect(on_connect)
self.client.websocket.on_data(on_data)
self.client.websocket.on_disconnect(on_disconnect)
self.client.websocket.on_subscribed(on_subscribed)
try:
await self.client.websocket.connect()
except WebsocketError as err:
_LOGGER.error("Error with the websocket connection: %s", err)
self._ws_reconnect_delay = min(2 * self._ws_reconnect_delay, 480)
async_call_later(
self._hass, self._ws_reconnect_delay, self.ws_connect)
async def ws_disconnect(self):
"""Disconnect from the websocket."""
await self.client.websocket.disconnect()
class AmbientWeatherEntity(Entity):
"""Define a base Ambient PWS entity."""
def __init__(
self, ambient, mac_address, station_name, sensor_type,
sensor_name):
"""Initialize the sensor."""
self._ambient = ambient
self._async_unsub_dispatcher_connect = None
self._mac_address = mac_address
self._sensor_name = sensor_name
self._sensor_type = sensor_type
self._state = None
self._station_name = station_name
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
'identifiers': {
(DOMAIN, self._mac_address)
},
'name': self._station_name,
'manufacturer': 'Ambient Weather',
}
@property
def name(self):
"""Return the name of the sensor."""
return '{0}_{1}'.format(self._station_name, self._sensor_name)
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
return '{0}_{1}'.format(self._mac_address, self._sensor_name)
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_UPDATE, update)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
|
{
"content_hash": "e1184a8544fa8a5714d9c230715330c4",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 78,
"avg_line_length": 36.73421926910299,
"alnum_prop": 0.6324500316541557,
"repo_name": "PetePriority/home-assistant",
"id": "4aa19dbc69ea65db21372848929c08d3c098b210",
"size": "11064",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ambient_station/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
import Rule
from CommonDataClass.FdfClass import RuleComplexFileClassObject
## complex rule
#
#
class RuleComplexFile(RuleComplexFileClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleComplexFileClassObject.__init__(self)
|
{
"content_hash": "955fce8ea63ba142698a36466b95e1ea",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 64,
"avg_line_length": 23.53846153846154,
"alnum_prop": 0.6862745098039216,
"repo_name": "egraba/vbox_openbsd",
"id": "7ecc32560868778a0ef337d0fa67b76e7b1b7bc6",
"size": "876",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/RuleComplexFile.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "88714"
},
{
"name": "Assembly",
"bytes": "4303680"
},
{
"name": "AutoIt",
"bytes": "2187"
},
{
"name": "Batchfile",
"bytes": "95534"
},
{
"name": "C",
"bytes": "192632221"
},
{
"name": "C#",
"bytes": "64255"
},
{
"name": "C++",
"bytes": "83842667"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "6041"
},
{
"name": "CSS",
"bytes": "26756"
},
{
"name": "D",
"bytes": "41844"
},
{
"name": "DIGITAL Command Language",
"bytes": "56579"
},
{
"name": "DTrace",
"bytes": "1466646"
},
{
"name": "GAP",
"bytes": "350327"
},
{
"name": "Groff",
"bytes": "298540"
},
{
"name": "HTML",
"bytes": "467691"
},
{
"name": "IDL",
"bytes": "106734"
},
{
"name": "Java",
"bytes": "261605"
},
{
"name": "JavaScript",
"bytes": "80927"
},
{
"name": "Lex",
"bytes": "25122"
},
{
"name": "Logos",
"bytes": "4941"
},
{
"name": "Makefile",
"bytes": "426902"
},
{
"name": "Module Management System",
"bytes": "2707"
},
{
"name": "NSIS",
"bytes": "177212"
},
{
"name": "Objective-C",
"bytes": "5619792"
},
{
"name": "Objective-C++",
"bytes": "81554"
},
{
"name": "PHP",
"bytes": "58585"
},
{
"name": "Pascal",
"bytes": "69941"
},
{
"name": "Perl",
"bytes": "240063"
},
{
"name": "PowerShell",
"bytes": "10664"
},
{
"name": "Python",
"bytes": "9094160"
},
{
"name": "QMake",
"bytes": "3055"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "1460572"
},
{
"name": "SourcePawn",
"bytes": "4139"
},
{
"name": "TypeScript",
"bytes": "142342"
},
{
"name": "Visual Basic",
"bytes": "7161"
},
{
"name": "XSLT",
"bytes": "1034475"
},
{
"name": "Yacc",
"bytes": "22312"
}
],
"symlink_target": ""
}
|
import os
import base64
import boto.ec2
# EU-WEST-1: RHEL7 HVM
AMI_ID = 'ami-25158352'
region = 'eu-west-1'
def generate_userdata():
'''Read environment vars and various other things to
get SSH keys, usernames and deploy scripts and inject them into an
instance as a userdata script
'''
ssh_key = os.environ["DEPLOY_SSH_KEY"]
deploy_script_location = os.environ["DEPLOY_SCRIPT_LOCATION"]
with open(deploy_script_location, 'r') as deploy_handle:
deploy_script = deploy_handle.readlines()
deploy_handle.close()
user_data = '''#!/bin/bash
printf '{0}' >> /home/ec2-user/.ssh/authorized_keys;
mkdir -p /opt/code-deploy
curl https://raw.githubusercontent.com/Financial-Times/paasport/master/code-deploy/app/src/main/resources/deploy.sh > /opt/code-deploy/deploy.sh
chmod a+x /opt/code-deploy/deploy.sh
chown -R ec2-user:ec2-user /opt/code-deploy
yum install -y java
echo "all done"
'''.format(ssh_key, deploy_script)
print user_data
return user_data
def create_many(definitions, cluster_id):
return map(format_instance, map(lambda data: create_new(data, cluster_id),
definitions))
def create_new(data, cluster_id):
# Always keep the nursery full
name = data['name'] if 'name' in data else 'unnamed instance'
nursery_instance = create_new_in_nursery(data)
instances = transfer_machine_from_nursery_to_cluster(cluster_id, new_name=name)
if len(instances) == 1:
return instances[0]
raise Exception("No available instances")
def create_new_in_nursery(data):
security_groups = [ 'sg-8a2574ef' ]
connection = boto.ec2.connect_to_region(region)
instance = connection.run_instances(AMI_ID, instance_type='m3.medium',
user_data=generate_userdata(), key_name="LukeBlaney",
security_group_ids=security_groups).instances[0]
connection.create_tags([instance.id], { 'cluster': '__nursery__' })
return instance
def transfer_machine_from_nursery_to_cluster(cluster_id, new_name="clustered_machine"):
connection = boto.ec2.connect_to_region(region)
# TODO: RACE CONDITIONS COULD OCCUR HERE! Need a DLM?
instance = connection.get_only_instances(filters={ 'instance-state-name':
'running', 'tag-key': 'cluster', 'tag-value':
'__nursery__'})[0]
connection.create_tags([instance.id],
tags={ 'cluster': str(cluster_id), 'Name': new_name })
# END OF RACE CONDITION TERRITORY
return [instance]
def get_instances_in_cluster(cluster_id):
connection = boto.ec2.connect_to_region(region)
return map(format_instance, connection.get_only_instances(filters={ 'tag-key': 'cluster',
'tag-value': cluster_id, 'instance-state-name': 'running' }))
def format_instance(instance):
return {
'id': instance.id,
'name': instance.tags['Name'],
# 'hostname': instance.private_ip_address,
'hostname': instance.ip_address,
# hardcoded m3.medium
'cpu': 1,
'memory': '4026531840',
'disk': '4',
'region': region,
'metadata': '{}',
'state': instance.state
}
def get_instance_info(instance_ids):
connection = boto.ec2.connect_to_region(region)
instance_states = connection.get_only_instances(instance_ids=instance_ids)
return instances
def delete_instance(instance_id, region):
connection = boto.ec2.connect_to_region(region)
connection.terminate_instances(instance_ids=[instance_id])
return True
|
{
"content_hash": "0ee2f6eac85ae46b681749ff9a055a4c",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 145,
"avg_line_length": 31.901960784313726,
"alnum_prop": 0.7234173325138291,
"repo_name": "Financial-Times/paasport",
"id": "1ef476da5f5460b46ac1b625211430aad364cc1c",
"size": "3254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "provisioner/models/machine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "26882"
},
{
"name": "Python",
"bytes": "16486"
},
{
"name": "Shell",
"bytes": "3125"
}
],
"symlink_target": ""
}
|
import mxnet as mx
import numpy as np
import unittest
from mxnet.test_utils import rand_ndarray, assert_almost_equal
from common import setup_module, with_seed, assertRaises, teardown
from mxnet.base import py_str, MXNetError
shape = (4, 4)
keys = [5, 7, 11]
str_keys = ['b', 'c', 'd']
def init_kv(stype='default'):
"""init kv """
kv = mx.kv.create()
# single
kv.init(3, mx.nd.zeros(shape=shape, stype=stype))
# list
kv.init(keys, [mx.nd.zeros(shape=shape, stype=stype)] * len(keys))
return kv
def init_kv_with_str(stype='default'):
"""init kv """
kv = mx.kv.create()
# single
kv.init('a', mx.nd.zeros(shape, stype=stype))
# list
kv.init(str_keys, [mx.nd.zeros(shape=shape, stype=stype)] * len(keys))
return kv
def check_diff_to_scalar(A, x):
""" assert A == x"""
assert(np.sum(np.abs((A - x).asnumpy())) == 0)
@with_seed()
def test_single_kv_pair():
"""single key-value pair push & pull"""
def check_single_kv_pair(kv, key, stype):
kv.push(key, mx.nd.ones(shape).tostype(stype))
val = mx.nd.empty(shape)
kv.pull(key, out=val)
check_diff_to_scalar(val, 1)
stypes = ['default', 'row_sparse']
for stype in stypes:
check_single_kv_pair(init_kv(), 3, stype)
check_single_kv_pair(init_kv_with_str(), 'a', stype)
@with_seed()
def test_row_sparse_pull():
kv = init_kv_with_str('row_sparse')
kv.init('e', mx.nd.ones(shape).tostype('row_sparse'))
def check_row_sparse_pull(kv, count):
num_rows = shape[0]
vals = []
row_ids = []
all_row_ids = np.arange(num_rows)
for i in range(count):
vals.append(mx.nd.zeros(shape).tostype('row_sparse'))
row_id = np.random.randint(num_rows, size=num_rows)
row_ids.append(mx.nd.array(row_id).reshape((2, num_rows//2)))
row_ids_to_pull = row_ids[0] if len(row_ids) == 1 else row_ids
vals_to_pull = vals[0] if len(vals) == 1 else vals
kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
for val, row_id in zip(vals, row_ids):
retained = val.asnumpy()
excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
for row in range(num_rows):
expected_val = np.zeros_like(retained[row])
expected_val += 0 if row in excluded_row_ids else 1
assert_almost_equal(retained[row], expected_val)
check_row_sparse_pull(kv, 1)
check_row_sparse_pull(kv, 4)
@with_seed()
def test_init():
"""test init"""
def check_init(kv, key):
kv.init(key, mx.nd.ones(shape)*4)
a = mx.nd.zeros(shape)
kv.pull(key, out=a)
check_diff_to_scalar(a, 4)
check_init(mx.kv.create(), 3)
check_init(mx.kv.create(), 'a')
@with_seed()
def test_list_kv_pair():
"""list key-value pair push & pull"""
def check_list_kv_pair(kv, key, stype):
kv.push(key, [mx.nd.ones(shape).tostype(stype)*4] * len(key))
val = [mx.nd.empty(shape)] * len(key)
kv.pull(key, out=val)
for v in val:
check_diff_to_scalar(v, 4)
stypes = ['default', 'row_sparse']
for stype in stypes:
check_list_kv_pair(init_kv(), keys, stype)
check_list_kv_pair(init_kv_with_str(), str_keys, stype)
@with_seed()
def test_aggregator():
"""aggregate value on muliple devices"""
def check_aggregator(kv, key, key_list, stype):
# devices
num_devs = 4
devs = [mx.Context('cpu', i) for i in range(num_devs)]
# single
vals = [mx.nd.ones(shape, d).tostype(stype) for d in devs]
outs = [mx.nd.empty(shape, d) for d in devs]
kv.push(key, vals)
kv.pull(key, out=outs)
for out in outs:
check_diff_to_scalar(out, num_devs)
# list
vals = [[mx.nd.ones(shape, d).tostype(stype)*2.0 for d in devs]] * len(key_list)
outs = [[mx.nd.empty(shape, d) for d in devs]] * len(key_list)
kv.push(key_list, vals)
kv.pull(key_list, out=outs)
for out in outs:
for o in out:
check_diff_to_scalar(o, num_devs * 2.0)
stypes = ['default', 'row_sparse']
for stype in stypes:
check_aggregator(init_kv(), 3, keys, stype)
check_aggregator(init_kv_with_str(), 'a', str_keys, stype)
@with_seed()
def test_sparse_aggregator():
"""aggregate sparse ndarray on muliple devices"""
stype = 'row_sparse'
kv = init_kv_with_str(stype)
# devices
num_devs = 4
devs = [mx.Context('cpu', i) for i in range(num_devs)]
# single
vals = [rand_ndarray(shape, stype).copyto(devs[i]) for i in range(num_devs)]
expected_sum = np.zeros(shape)
for v in vals:
expected_sum += v.asnumpy()
# prepare row_ids
all_rows = mx.nd.array(np.arange(shape[0]))
kv.push('a', vals)
kv.row_sparse_pull('a', out=vals, row_ids=[all_rows] * len(vals))
result_sum = np.zeros(shape)
for v in vals:
result_sum += v.asnumpy()
assert_almost_equal(result_sum, expected_sum * num_devs)
# list
vals = [[rand_ndarray(shape, stype).copyto(devs[i]) for i in range(num_devs)]] * len(keys)
expected_sum = np.zeros(shape)
for v in vals[0]:
expected_sum += v.asnumpy()
kv.push(str_keys, vals)
kv.row_sparse_pull(str_keys, out=vals, row_ids=[[all_rows] * num_devs] * len(vals))
for vv in vals:
result_sum = np.zeros(shape)
for v in vv:
result_sum += v.asnumpy()
assert_almost_equal(result_sum, expected_sum * num_devs)
def updater(key, recv, local):
"""use updater: += with int keys"""
assert(isinstance(key, int))
local += recv
def str_updater(key, recv, local):
"""use updater: += with str keys"""
if isinstance(key, bytes):
key = py_str(key)
assert(isinstance(key, str))
local += recv
@with_seed()
def test_updater(dev='cpu'):
"""updater"""
def check_updater(kv, key, key_list, stype):
# devices
num_devs = 4
devs = [mx.Context(dev, i) for i in range(num_devs)]
# single
vals = [mx.nd.ones(shape, d).tostype(stype) for d in devs]
outs = [mx.nd.empty(shape, d) for d in devs]
kv.push(key, vals)
kv.pull(key, out=outs)
for out in outs:
check_diff_to_scalar(out, num_devs)
# list
vals = [[mx.nd.ones(shape, d).tostype(stype) for d in devs]] * len(key_list)
outs = [[mx.nd.empty(shape, d) for d in devs]] * len(key_list)
num_push = 4
for i in range(num_push):
kv.push(key_list, vals)
kv.pull(key_list, out=outs)
for out in outs:
for o in out:
check_diff_to_scalar(o, num_devs * num_push)
stypes = ['default', 'row_sparse']
for stype in stypes:
kv = init_kv()
kv._set_updater(updater)
check_updater(kv, 3, keys, stype)
str_kv = init_kv_with_str()
str_kv._set_updater(str_updater)
check_updater(str_kv, 'a', str_keys, stype)
@with_seed()
def test_get_type():
kvtype = 'local_allreduce_cpu'
kv = mx.kv.create(kvtype)
assert kv.type == kvtype
@with_seed()
def test_invalid_pull():
def check_ignored_pull_single(kv, key):
dns_val = (mx.nd.ones(shape) * 2)
rsp_val = dns_val.tostype('row_sparse')
kv.pull(key, out=rsp_val)
check_diff_to_scalar(rsp_val, 2)
def check_ignored_pull_list(kv, key):
dns_val = [mx.nd.ones(shape) * 2] * len(key)
rsp_val = [val.tostype('row_sparse') for val in dns_val]
kv.pull(key, out=rsp_val)
for v in rsp_val:
check_diff_to_scalar(v, 2)
def check_invalid_rsp_pull_single(kv, key):
dns_val = mx.nd.ones(shape) * 2
assertRaises(MXNetError, kv.row_sparse_pull,
key, out=dns_val, row_ids=mx.nd.array([1]))
def check_invalid_rsp_pull_list(kv, key):
dns_val = [mx.nd.ones(shape) * 2] * len(key)
assertRaises(MXNetError, kv.row_sparse_pull, key, out=dns_val,
row_ids=[mx.nd.array([1])] * len(key))
def check_invalid_key_types_single(kv, key):
dns_val = mx.nd.ones(shape) * 2
rsp_val = dns_val.tostype('row_sparse')
assertRaises(MXNetError, kv.init, key, dns_val)
assertRaises(MXNetError, kv.push, key, dns_val)
assertRaises(MXNetError, kv.pull, key, dns_val)
assertRaises(MXNetError, kv.row_sparse_pull, key, rsp_val,
row_ids=mx.nd.array([1]))
def check_invalid_key_types_list(kv, key):
dns_val = [mx.nd.ones(shape) * 2] * len(key)
rsp_val = [val.tostype('row_sparse') for val in dns_val]
assertRaises(MXNetError, kv.init, key, dns_val)
assertRaises(MXNetError, kv.push, key, dns_val)
assertRaises(MXNetError, kv.pull, key, dns_val)
assertRaises(MXNetError, kv.row_sparse_pull, key, rsp_val,
row_ids=[mx.nd.array([1])] * len(key))
int_kv = init_kv()
str_kv = init_kv_with_str()
kvs = [int_kv, str_kv]
single_keys = [3, 'a']
list_keys = [keys, str_keys]
for i in range(2):
# pull with rsp outputs should be ignored with no values updated
check_ignored_pull_single(kvs[i], single_keys[i])
check_ignored_pull_list(kvs[i], list_keys[i])
# row_sparse_pull should be aborted when vals.stype != row_sparse
check_invalid_rsp_pull_single(kvs[i], single_keys[i])
check_invalid_rsp_pull_list(kvs[i], list_keys[i])
# kvstore should be restricted to only accept either int or str keys
check_invalid_key_types_single(kvs[i], single_keys[1 - i])
check_invalid_key_types_list(kvs[i], list_keys[1 - i])
if __name__ == '__main__':
import nose
nose.runmodule()
|
{
"content_hash": "1b583b20dcb8d0db5ff7881937133df1",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 94,
"avg_line_length": 32.445901639344264,
"alnum_prop": 0.5789207760711399,
"repo_name": "jamesliu/mxnet",
"id": "0ab61bb27483492525f8fbd75ba9a4cd6e419010",
"size": "10702",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/python/unittest/test_kvstore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "123214"
},
{
"name": "C++",
"bytes": "5600157"
},
{
"name": "CMake",
"bytes": "84037"
},
{
"name": "Clojure",
"bytes": "375066"
},
{
"name": "Cuda",
"bytes": "948875"
},
{
"name": "Groovy",
"bytes": "16047"
},
{
"name": "Java",
"bytes": "122297"
},
{
"name": "Jupyter Notebook",
"bytes": "1275293"
},
{
"name": "Makefile",
"bytes": "67550"
},
{
"name": "Matlab",
"bytes": "34903"
},
{
"name": "Perl",
"bytes": "1367889"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "Python",
"bytes": "5829940"
},
{
"name": "R",
"bytes": "311579"
},
{
"name": "Scala",
"bytes": "1039625"
},
{
"name": "Shell",
"bytes": "292731"
},
{
"name": "Smalltalk",
"bytes": "43774"
}
],
"symlink_target": ""
}
|
""" extension to test the extension_manager """
from ped_core.editor_common import Editor
from ped_dialog.message_dialog import message
# register shift-F1 to comment the highlighted block
def ped_ext_info():
""" return registration information for extension_manager """
return ( "CMD_COMMENT", "EDITOR", "KEYTAB_F13", "KEYTAB_NOKEY", "comment_extension" )
def ped_ext_invoke( cmd_id, target, ch ):
""" do our thing with the target object """
if target.isMark():
target.line_mark = False
target.span_mark = False
target.rect_mark = False
mark_line_start = target.mark_line_start
mark_line_end = target.getLine()
if mark_line_start > mark_line_end:
mark = mark_line_end
mark_line_end = mark_line_start
mark_line_start = mark
line = mark_line_start
while line <= mark_line_end:
target.goto(line,0)
target.insert("#")
line += 1
return False
|
{
"content_hash": "1614619da71bb2bc59bae92b28a80518",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 89,
"avg_line_length": 35.642857142857146,
"alnum_prop": 0.6172344689378757,
"repo_name": "jpfxgood/ped",
"id": "66a9ce7062f2ff8fd360657d7062afdf79bd34ca",
"size": "1054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ped_extensions/comment_extension.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "375789"
}
],
"symlink_target": ""
}
|
"""Constants and utilities related to Omaha versions."""
_ONECLICK_PLUGIN_NAME = 'npHouseOfLifeOneClick'
_UPDATE_PLUGIN_NAME = 'npHouseOfLifeUpdate'
_BHO_NAME = 'HolupdateBho'
_CRASH_HANDLER_NAME = 'HouseOfLifeCrashHandler'
# List of languages that are fully supported in the current build.
_OMAHA_LANGUAGES = [
'am',
'ar',
'bg',
'bn',
'ca',
'cs',
'da',
'de',
'el',
'en',
'en-GB',
'es',
'es-419',
'et',
'fa',
'fi',
'fil',
'fr',
'gu',
'hi',
'hr',
'hu',
'id',
'is',
'it',
'iw',
'ja',
'kn',
'ko',
'lt',
'lv',
'ml',
'mr',
'ms',
'nl',
'no',
'pl',
'pt-BR',
'pt-PT',
'ro',
'ru',
'sk',
'sl',
'sr',
'sv',
'sw',
'ta',
'te',
'th',
'tr',
'uk',
'ur',
'vi',
'zh-CN',
'zh-TW',
]
# The shell and goopdate.dll contain additional languages.
# 'userdefault' addresses apps that don't look up the resource for the OS
# language. See http://b/1328652.
_ADDITIONAL_SHELL_LANGUAGES = [
'or',
'userdefault',
'zh-HK',
]
def _IsSupportedOmaha2Version(omaha_version):
"""Returns true if omaha_version is an Omaha 2 version and is supported."""
return (omaha_version[0] == 1 and
omaha_version[1] == 2 and
omaha_version[2] >= 183)
# All languages supported by this script currently have the same set of
# languages, so the omaha_version_info parameter is unused.
def _GetMetainstallerPayloadFilenames(prefix,
update_plugin_filename,
bho_filename,
languages,
omaha_version):
"""Returns list of metainstaller payload files for specified Omaha version."""
plugin_dll_name = '%s%s' % (prefix, update_plugin_filename)
bho_dll_name = '%s%s' % (prefix, bho_filename)
# The list of files below needs to be kept in sync with the list in
# SetupFiles::BuildFileLists().
# TODO(omaha): Move the other filename defines in main.scons into this file
# and allow all filenames to be customized. At the moment, while the plugin
# names are generated in one place due to version numbers, most of the other
# files (googleupdate.exe, goopdateres_*.dll, etc.) are hardcoded all over
# the place, and require a ton of point fixes to customize.
payload_files = [
'BitpopUpdate.exe',
'%s.exe' % _CRASH_HANDLER_NAME,
'%sholupdate.dll' % (prefix),
plugin_dll_name,
bho_dll_name,
'HouseOfLifeUpdateHelper.msi',
'HouseOfLifeUpdateBroker.exe',
'HouseOfLifeUpdateOnDemand.exe',
'%spsmachine.dll' % (prefix),
'%spsuser.dll' % (prefix),
]
if (omaha_version[0] == 1 and
omaha_version[1] == 3 and
omaha_version[2] >= 13):
# The BHO is not built yet.
payload_files.remove(bho_dll_name)
elif _IsSupportedOmaha2Version(omaha_version):
payload_files.remove(plugin_dll_name)
payload_files.remove('HouseOfLifeUpdateBroker.exe')
payload_files.remove('HouseOfLifeUpdateOnDemand.exe')
payload_files.remove('psmachine.dll')
payload_files.remove('psuser.dll')
else:
raise Exception('Unsupported version: ' +
ConvertVersionToString(omaha_version))
for language in languages:
payload_files += ['%sholupdateres_%s.dll' % (prefix, language)]
return payload_files
def ConvertVersionToString(version):
"""Converts a four-element version list to a version string."""
return '%d.%d.%d.%d' % (version[0], version[1], version[2], version[3])
def GetONECLICK_PLUGIN_NAME(): # pylint: disable-msg=C6409
"""Returns the value of the ONECLICK_PLUGIN_NAME define for the C++ code."""
return _ONECLICK_PLUGIN_NAME
def GetUPDATE_PLUGIN_NAME(): # pylint: disable-msg=C6409
"""Returns the value of the UPDATE_PLUGIN_NAME define for the C++ code."""
return _UPDATE_PLUGIN_NAME
def GetBHO_NAME(): # pylint: disable-msg=C6409
"""Returns the value of the BHO_NAME define for the C++ code."""
return _BHO_NAME
def GetCRASH_HANDLER_NAME(): # pylint: disable-msg=C6409
"""Returns the value of the CRASH_HANDLER_NAME define for the C++ code."""
return _CRASH_HANDLER_NAME
def GetLanguagesForVersion(omaha_version):
"""Returns a list of languages supported by omaha_version."""
# Make a copy in case the list is modified below.
supported_languages = list(_OMAHA_LANGUAGES)
# When languages are added, add a version check for older versions without the
# new languages and remove the new languages from supported_languages.
if (omaha_version[0] == 1 and
omaha_version[1] == 3 and
omaha_version[2] >= 21):
# All languages are supported.
pass
elif _IsSupportedOmaha2Version(omaha_version):
# All current languages are supported. 'or' was also supported.
supported_languages += ['or']
supported_languages.remove('am')
supported_languages.remove('sw')
else:
raise Exception('Unsupported version: ' +
ConvertVersionToString(omaha_version))
return supported_languages
def GetShellLanguagesForVersion(omaha_version):
"""Returns a list of languages supported by the omaha_version shell."""
# Silence PyLint. All languages supported by this script currently have the
# same set of languages, so this variable is unused.
omaha_version = omaha_version
return _OMAHA_LANGUAGES + _ADDITIONAL_SHELL_LANGUAGES
class OmahaVersionInfo(object):
"""Contains information about a specific version of Omaha.
Attributes:
filename_prefix: Prefix to use for all output files.
version_major: Major version.
version_minor: Minor version.
version_build: Build version.
version_patch: Patch version.
oneclick_plugin_version: Version of the OneClick plug-in.
oneclick_plugin_filename: Name of the signed OneClick DLL.
update_plugin_version: Version of the Omaha 3 plug-in.
update_plugin_filename: Name of the signed Omaha 3 plug-in DLL.
bho_filename: Name of the signed BHO DLL.
crash_handler_filename: Name of the Crash Handler EXE.
oneclick_signed_file_info: SignedFileInfo object for the OneClick DLL.
bho_signed_file_info: SignedFileInfo object for the BHO DLL.
"""
def __init__(self, version_file):
"""Initializes the class based on data from a VERSION file."""
self._ReadFile(version_file)
self.filename_prefix = ''
# Objects containing more properties used to build the file.
self.oneclick_signed_file_info = SignedFileInfo(
_ONECLICK_PLUGIN_NAME,
'dll',
self.oneclick_plugin_version)
self.plugin_signed_file_info = SignedFileInfo(
_UPDATE_PLUGIN_NAME,
'dll',
self.update_plugin_version)
self.bho_signed_file_info = SignedFileInfo(_BHO_NAME, 'dll')
# Simple properties for callers that only need the final filename. Not
# affected by internal build changes.
self.oneclick_plugin_filename = self.oneclick_signed_file_info.filename
self.update_plugin_filename = self.plugin_signed_file_info.filename
self.bho_filename = self.bho_signed_file_info.filename
self.crash_handler_filename = _CRASH_HANDLER_NAME
def _ReadFile(self, version_file):
"""Reads and stores data from a VERSION file."""
execfile(version_file, globals())
# Silence Pylint. Values from version_file are not defined in this file.
# E0602: Undefined variable.
# pylint: disable-msg=E0602
if version_patch > 0:
incrementing_value = version_patch
incrementing_value_name = 'patch'
else:
incrementing_value = version_build
incrementing_value_name = 'build'
if 0 == incrementing_value % 2:
raise Exception('ERROR: By convention, the %s number in VERSION '
'(currently %d) should be odd.' %
(incrementing_value_name, incrementing_value))
self.version_major = version_major
self.version_minor = version_minor
self.version_build = version_build
self.version_patch = version_patch
self.oneclick_plugin_version = oneclick_plugin_version
# update_plugin_version does not exist in Omaha 2 VERSION file. Handle this.
try:
self.update_plugin_version = update_plugin_version
except NameError:
if _IsSupportedOmaha2Version(self.GetVersion()):
self.update_plugin_version = -1
else:
raise
# pylint: enable-msg=E0602
def MakeTestVersion(self, delta=1):
"""Changes this object to be for a TEST version of Omaha."""
if delta <= 0:
raise Exception('Delta must be greater than 0.')
# If we're doing a patch, increment patch; else, increment build.
if self.version_patch > 0:
self.version_patch += delta
else:
self.version_build += delta
self.filename_prefix = 'TEST_'
def GetVersion(self):
"""Returns the version elements as a list."""
return [self.version_major,
self.version_minor,
self.version_build,
self.version_patch
]
def GetVersionString(self):
"""Returns the version as a string."""
return ConvertVersionToString(self.GetVersion())
def GetSupportedLanguages(self):
"""Returns a list of languages supported by this version."""
return GetLanguagesForVersion(self.GetVersion())
def GetMetainstallerPayloadFilenames(self):
"""Returns list of metainstaller payload files for this version of Omaha."""
return _GetMetainstallerPayloadFilenames(self.filename_prefix,
self.update_plugin_filename,
self.bho_filename,
self.GetSupportedLanguages(),
self.GetVersion())
class SignedFileInfo(object):
"""Contains information, including intermediate names, for signed file."""
def __init__(self, unversioned_name, extension, file_version=None):
"""Initializes the class members based on the parameters."""
if file_version:
base_name = '%s%d' % (unversioned_name, file_version)
else:
base_name = unversioned_name
self.filename_base = base_name
self.filename = '%s.%s' % (self.filename_base, extension)
self.unsigned_filename_base = '%s_unsigned' % base_name
self.unsigned_filename = '%s.%s' % (self.unsigned_filename_base, extension)
|
{
"content_hash": "5ca73d4b76f1ac6a6448558e893c9f93",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 80,
"avg_line_length": 31.599397590361445,
"alnum_prop": 0.6463635497092746,
"repo_name": "Crystalnix/bitpop-omaha",
"id": "c4f5d85f468c6300ec9186368dc6c674baf48bce",
"size": "11166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "omaha_version_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3914"
},
{
"name": "C",
"bytes": "2176252"
},
{
"name": "C#",
"bytes": "123756"
},
{
"name": "C++",
"bytes": "11921521"
},
{
"name": "Java",
"bytes": "92279"
},
{
"name": "Objective-C",
"bytes": "37794"
},
{
"name": "Python",
"bytes": "237192"
}
],
"symlink_target": ""
}
|
""" FileTailer: File 'tail/cat' interface with optional:
simple filters, a.k.a. 'grep -P'
complex filters, a.k.a. break each line into "columns", with each column being a separate 'filter'
"""
import logging
import os.path
import re
from termcolor import colored
from .color_chooser import colorize
###############################################################################
# EXCEPTIONS
###############################################################################
class FileTailException(Exception): pass
###############################################################################
# CONSTANTS
###############################################################################
###############################################################################
# LOGGING
###############################################################################
logger = logging.getLogger(__name__)
#logger.addHandler(logging.NullHandler()) # Disabling logging by default
class FileTailer(object):
""" File "tail" interface
"""
def __init__(self, file_name, color, full_color, format, label):
""" CONSTRUCTOR
file_name: File name to tail
color: ('green', 'red', 'red_on_white', ...) Color output from this file
full_color: (True/False) Whether to color 'the entire output' or just the label
format: (regex, i.e. [(?P<id>[^\]]+)\]: (?P<msg>.*))
Line structure, see: http://www.regular-expressions.info/named.html
label: File label (usually, file name) to be prepended/colorized
"""
self._file_name = file_name
self._color = color
self._full_color = full_color
self._format = re.compile(format)
self._label = colorize("[%s]" % label, self._color)
self._file_handle = None
logger.debug("FileTailer() successfully initialized for file: %s" % file_name)
def __del__(self):
""" DESTRUCTOR
Cleanup: Close file handle if opened
"""
logger.debug("DESTRUCTOR closing open file handle: %s" % self._file_name)
self._close()
def _close(self):
""" Close file
"""
if self._file_handle:
logger.info("Closing log file: %s" % self._file_name)
self._file_handle.close()
def _open_at(self, file_name, open_at_top):
""" Open file name either "at the top" or "at_the_end"
returns False if the file cannot be open
"""
if not os.path.isfile(file_name):
logger.warn("Unable to locate file: %s" % file_name)
return False
logger.debug("Opening file: %s" % file_name)
self._file_handle = open(file_name)
if not open_at_top:
self._file_handle.seek(0, 2) # Set position to the end of the file
return True
def _format_line(self, line, line_format):
""" If the line matches "format", return matched dictionary
otherwise, return None
format must include "named" patterns, i.e.:
[(?P<id>[^\]]+)\]: (?P<msg>.*)
see: http://www.regular-expressions.info/named.html
"""
ret = None
logger.debug("Matching line: %s with format: %s" % (line, line_format.pattern))
matches = line_format.match(line)
if matches:
logger.debug("Line: %s matches format: %s" % (line, line_format.pattern))
ret = matches.groupdict()
else:
logger.debug("Line: %s DOES NOT match format: %s" % (line, line_format.pattern))
return ret
def _filter_match(self, parsed_items, filters):
""" Check if (parsed line) items match user supplied "filters"
Return True if so, False otherwise
"""
if not filters:
logger.debug("Filters not supplied. Passing all through")
return True
common_keys = list(set(parsed_items.keys()) & set(filters.keys()))
if not common_keys:
msg = "No common keys between 'parsed items': %s and 'filters': %s" % \
(parsed_items, filters)
msg += ". Skipping by default"
logger.debug(msg)
return False
logger.debug("Matching filters: %s with 'parsed items': %s on common keys: %s" % \
(filters, parsed_items, common_keys))
matches = all([filters[_].search(parsed_items[_]) for _ in common_keys])
if matches:
logger.debug('MATCHED on common keys: %s' % common_keys)
else:
logger.debug('NOT MATCHED on common keys: %s' % common_keys)
return matches
def _highlight_line(self, line, hi_pattern):
""" Highlight supplied line with (predefined) color and attributes
Keep the rest of the line colorized based on the actual log
"""
def colorize_item(item, pattern):
""" If item matches "pattern" -> "highlight" it
Otherwise, keep exactly as it is
"""
if pattern.match(item):
return colored(item, 'red', attrs=['bold', 'reverse'])
else:
return self._color_line(item)
line_items = hi_pattern.split(line)
return "".join([colorize_item(_, hi_pattern) for _ in line_items])
def _color_line(self, line):
""" Colorize "line" by "color"
"""
return colorize(line, self._color) if self._full_color else line
def _process_lines(self, lines, filters, highlight):
""" Process lines, a.k.a.: filter, highlight and emit them
based on what the user requested
"""
logger.debug("Found: %d new lines in file: %s" % (len(lines), self._file_handle.name))
current_line = ""
for line in lines:
line = line.strip()
logger.debug("Processing line: %s" % line)
matched_items = self._format_line(line, self._format)
# If line is not formatted, we treat it as a continuation of previous line
if not matched_items:
msg = "Line: %s does not match format: %s" % (line, self._format.pattern)
msg += "Assuming, it's a continuation of previous line"
logger.debug(msg)
current_line += "\n%s" % line
continue
else:
current_line = line
# Match parsed line items to user suppplied "filters"
filter_match = self._filter_match(matched_items, filters)
if filter_match:
if highlight:
current_line = self._highlight_line(current_line, highlight)
else:
current_line = self._color_line(current_line)
else:
logger.debug("Line: %s does not match filters: %s. Skipping" % \
(current_line, filters))
current_line = ""
continue
# MAIN output of FileTailer
print "%s %s" % (self._label, current_line)
current_line = ""
###############################################################################
# PROPERTIES
###############################################################################
@property
def name(self):
return self._file_name
###############################################################################
# PUBLIC ROUTINES
###############################################################################
def open(self, open_at_top):
""" Open file
return False if the file cannot be opened for some reason
"""
if not self._file_handle:
print "[+ LOG] %s %s" % (self._label, self._color_line("Following log file: %s" % self._file_name))
# logger.info("Opening log file: %s" % self._file_name)
return self._open_at(self._file_name, open_at_top)
def close(self):
""" Close file
"""
print "[- LOG] %s %s" % (self._label, self._color_line("Unfollowing log file: %s" % self._file_name))
self._close()
def tail(self, filters, highlight):
""" File "tailer"
1. Read new lines
2. Match lines by filters and only print 'matched' lines
3. If 'highlight' is requested, highlight lines if pattern is detected
"""
# If for whatever reason the file was not open (open() not called) -> force open
# And go to the end of the file
if not self._file_handle:
self._open_at(self._file_name, open_at_top=False)
logger.debug("Tailing: %s file" % self._file_handle.name)
where = self._file_handle.tell()
lines = self._file_handle.readlines()
if lines:
self._process_lines(lines, filters, highlight)
else:
logger.debug("No new lines in file: %s" % self._file_handle.name)
self._file_handle.seek(where)
|
{
"content_hash": "15aa2384e647944dcba569ae83c83577",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 111,
"avg_line_length": 34.08646616541353,
"alnum_prop": 0.5070034189919488,
"repo_name": "gluent/gluent-eng",
"id": "9299bd8fac1651bccb2311598a73f200f798da58",
"size": "9090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gluent_eng/file_tailer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "79829"
}
],
"symlink_target": ""
}
|
import re
from typing import Dict, Sequence, Tuple
from unittest import TestCase, mock
import pytest
from google.api_core.retry import Retry
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.os_login import OSLoginHook
from tests.providers.google.cloud.utils.base_gcp_mock import (
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
TEST_GCP_CONN_ID: str = "test-gcp-conn-id"
TEST_DELEGATE_TO: str = "test-delegate-to"
TEST_PROJECT_ID: str = "test-project-id"
TEST_PROJECT_ID_2: str = "test-project-id-2"
TEST_USER: str = "test-user"
TEST_CREDENTIALS = mock.MagicMock()
TEST_BODY: Dict = mock.MagicMock()
TEST_RETRY: Retry = mock.MagicMock()
TEST_TIMEOUT: float = 4
TEST_METADATA: Sequence[Tuple[str, str]] = ()
TEST_PARENT: str = "users/test-user"
class TestOSLoginHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(
parent=TEST_PARENT,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
class TestOSLoginHookWithDefaultProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_2),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(
parent=TEST_PARENT,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID_2,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
class TestOSLoginHookWithoutDefaultProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_2),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(parent=TEST_PARENT, ssh_public_key=TEST_BODY, project_id=TEST_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
TEST_MESSAGE = re.escape(
"The project id must be passed either as keyword project_id parameter or as project_id extra in "
"Google Cloud connection definition. Both are not set!"
)
class TestOSLoginHookMissingProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
|
{
"content_hash": "656f5d9f684daec4c79d7a41c1fd970b",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 107,
"avg_line_length": 36.20245398773006,
"alnum_prop": 0.6246398915438062,
"repo_name": "apache/incubator-airflow",
"id": "d2b88e4c6c895d41c48be44ae0839ff8167dc4b7",
"size": "6686",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/hooks/test_os_login.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
}
|
from django import forms
from gollahalli_cms.editor.models import ContentModel, MetaContentModel, EducationModel, ProjectsModel, TutorialsModel, ExperienceModel
class ContentModelForm(forms.ModelForm):
ref_id = forms.CharField(required=False)
bio = forms.CharField(widget=forms.Textarea)
created = forms.DateTimeField(required=False)
class Meta:
model = ContentModel
fields = "__all__"
class EducationContentModelForm(forms.ModelForm):
"""
Education content form.
"""
id = forms.CharField(required=False)
ref_id = forms.CharField(required=False)
class Meta:
model = EducationModel
fields = "__all__"
class ProjectContentModelForm(forms.ModelForm):
"""
Education content form.
"""
id = forms.CharField(required=False)
ref_id = forms.CharField(required=False)
class Meta:
model = ProjectsModel
fields = "__all__"
class TutorialContentModelForm(forms.ModelForm):
"""
Education content form.
"""
id = forms.CharField(required=False)
ref_id = forms.CharField(required=False)
class Meta:
model = TutorialsModel
fields = "__all__"
class ExperienceContentModelForm(forms.ModelForm):
"""
Education content form.
"""
id = forms.CharField(required=False)
ref_id = forms.CharField(required=False)
class Meta:
model = ExperienceModel
fields = "__all__"
class MetaContentModelForm(forms.ModelForm):
ref_id = forms.CharField(required=False)
footer = forms.CharField(required=False)
header = forms.CharField(required=False)
meta = forms.CharField(required=False)
class Meta:
model = MetaContentModel
fields = "__all__"
|
{
"content_hash": "0b86bb5212631afb85e4768b165a5c0e",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 135,
"avg_line_length": 24.305555555555557,
"alnum_prop": 0.668,
"repo_name": "akshaybabloo/gollahalli-com",
"id": "b2c3381236137ff928ceb608394b643964163b0a",
"size": "1750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gollahalli_cms/editor/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "575955"
},
{
"name": "HTML",
"bytes": "174680"
},
{
"name": "JavaScript",
"bytes": "626303"
},
{
"name": "Python",
"bytes": "222676"
},
{
"name": "TeX",
"bytes": "244"
},
{
"name": "XSLT",
"bytes": "6993"
}
],
"symlink_target": ""
}
|
import argparse
import os
import queue
import subprocess
import sys
import threading
PASS_COLOR = '\033[92m'
FAIL_COLOR = '\033[31m'
ENDC_COLOR = '\033[0m'
class Task:
def __init__(self, name, cmd):
self.name = name
self.cmd = cmd
self.success = False
self.output = ""
def run_test_program(task_queue):
while not task_queue.empty():
try:
task = task_queue.get_nowait()
except queue.Empty:
break
try:
output = subprocess.check_output(task.cmd, stderr=subprocess.STDOUT,
shell=True, universal_newlines=True)
task.success = True
task.output = output
tag = "%sPASS:%s" % (PASS_COLOR, ENDC_COLOR)
except subprocess.CalledProcessError as e:
task.success = False
task.output = e.output
tag = "%sFAIL:%s" % (FAIL_COLOR, ENDC_COLOR)
print("%s %s" % (tag, task.name))
task_queue.task_done()
def writeOutput(task, f):
f.write("# Running " + task.cmd + "\n")
f.write(task.output)
f.write("\n")
def print_log(log_file, tasks):
success_count = 0
fail_count = 0
fail_header = False
f = open(log_file, mode="w+")
for task in tasks:
writeOutput(task, f)
if task.success:
success_count += 1
else:
if not fail_header:
print("""\
============================================================================
Failed tests output
============================================================================""")
fail_header = True
writeOutput(task, sys.stdout)
fail_count += 1
summary = """\
============================================================================
Testsuite summary
============================================================================
# TOTAL: %d
# SUCCESS: %d
# FAIL: %d
============================================================================
See %s for full output.
============================================================================"""
summary = summary % (len(tasks), success_count, fail_count, log_file)
f.write(summary)
f.close()
print(summary)
VALGRIND_OPTS = "--tool=memcheck --leak-check=full --error-exitcode=1 --num-callers=30"
def run_tests(args):
log_file = "test-suite.log"
tasks = []
task_queue = queue.Queue()
prefix = ""
if args.valgrind:
prefix = " ".join([args.valgrind, args.valgrind_supp, VALGRIND_OPTS])
log_file = "test-suite-memcheck.log"
for test in args.tests.split():
cmd = test
if prefix:
cmd = prefix + " " + cmd
task = Task(test, cmd)
tasks.append(task)
task_queue.put(task)
for i in range(os.cpu_count()):
threading.Thread(target=run_test_program, args=(task_queue,)).start()
task_queue.join()
print_log(log_file, tasks)
for task in tasks:
if not task.success:
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tests", help="List of tests to run", type=str)
parser.add_argument("--valgrind", help="Path to valgrind, if provided " \
"the tests are run with it", type=str)
parser.add_argument("--valgrind-supp", help="Path to valgrind's suppression file", type=str)
parser.add_argument("--color", metavar="WHEN", type=str,
help="Use colors. WHEN can be always, auto and never.")
parser.set_defaults(color="auto")
args = parser.parse_args()
if args.valgrind_supp:
args.valgrind_supp = "--suppressions=%s" % args.valgrind_supp
if args.color == "auto":
if sys.stdout.isatty():
args.color = "always"
else:
args.color = "never"
if args.color == "never":
PASS_COLOR = ''
FAIL_COLOR = ''
ENDC_COLOR = ''
run_tests(args)
|
{
"content_hash": "e082bc828a4f629f8ab7535abc1ea579",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 96,
"avg_line_length": 29.610294117647058,
"alnum_prop": 0.4954060094363049,
"repo_name": "bdilly/soletta",
"id": "37d1b1bd946687ce71e14d8842b91a51bb5bda94",
"size": "5657",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "data/scripts/suite.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4879046"
},
{
"name": "C++",
"bytes": "57080"
},
{
"name": "JavaScript",
"bytes": "28032"
},
{
"name": "Makefile",
"bytes": "51722"
},
{
"name": "NSIS",
"bytes": "2338"
},
{
"name": "Python",
"bytes": "224668"
},
{
"name": "Shell",
"bytes": "14349"
},
{
"name": "Smarty",
"bytes": "1145"
},
{
"name": "VimL",
"bytes": "748"
}
],
"symlink_target": ""
}
|
import setuptools
with open('README.rst') as f:
long_description = f.read()
setuptools.setup(
name='happymongo',
version='0.1.1',
description=('Python module for making it easy and consistent to '
'connect to MongoDB via PyMongo either in Flask or in'
' a non-flask application'),
long_description=long_description,
author='Matt Martz',
author_email='matt@sivel.net',
url='https://github.com/sivel/happymongo',
license='Apache License, Version 2.0',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=['pymongo']
)
|
{
"content_hash": "dfaac1802d47824fb65304e9af2dc91f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 32.94736842105263,
"alnum_prop": 0.6533546325878594,
"repo_name": "sivel/happymongo",
"id": "62ba87ad561ec68dd05e0357e848915806bc4a70",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6089"
}
],
"symlink_target": ""
}
|
"""
unknown.py
Created by Thomas Mangin on 2014-06-30.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message.open.capability.capability import Capability
# ============================================================ UnknownCapability
#
@Capability.unknown
class UnknownCapability (Capability):
def set (self, capability, data=''):
self.capability = capability
self.data = data
return self
def __str__ (self):
if self.capability in Capability.CODE.reserved:
return 'Reserved %s' % str(self.capability)
if self.capability in Capability.CODE.unassigned:
return 'Unassigned %s' % str(self.capability)
return 'Unknown %s' % str(self.capability)
def json (self):
if self.capability in Capability.CODE.reserved:
iana = 'reserved'
elif self.capability in Capability.CODE.unassigned:
iana = 'unassigned'
else:
iana = 'unknown'
return '{ "name": "unknown", "iana": "%s", "value": %d, "raw": "%s" }' % (iana,self.capability,self.data)
def extract (self):
return []
@staticmethod
def unpack_capability (instance, data, capability):
return instance.set(capability,data)
|
{
"content_hash": "35b8546999681f9141c92fc53a36cac1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 107,
"avg_line_length": 27.261904761904763,
"alnum_prop": 0.6716157205240174,
"repo_name": "blablacar/exabgp",
"id": "4b3ddede2f96c8e0c2be0d2ab6495fd9a0442612",
"size": "1163",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/exabgp/bgp/message/open/capability/unknown.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "1191461"
},
{
"name": "Shell",
"bytes": "17891"
}
],
"symlink_target": ""
}
|
"""Extract parse_example op configuration to a proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.example import example_parser_configuration_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def extract_example_parser_configuration(parse_example_op, sess):
"""Returns an ExampleParserConfig proto.
Args:
parse_example_op: A ParseExample `Operation`
sess: A tf.Session needed to obtain some configuration values.
Returns:
A ExampleParserConfig proto.
Raises:
ValueError: If attributes are inconsistent.
"""
config = example_parser_configuration_pb2.ExampleParserConfiguration()
num_sparse = parse_example_op.get_attr("Nsparse")
num_dense = parse_example_op.get_attr("Ndense")
total_features = num_dense + num_sparse
sparse_types = parse_example_op.get_attr("sparse_types")
dense_types = parse_example_op.get_attr("Tdense")
dense_shapes = parse_example_op.get_attr("dense_shapes")
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) attribute does not match "
"Nsparse attribute (%d vs %d)" %
(len(sparse_types), num_sparse))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) attribute does not match "
"Ndense attribute (%d vs %d)" %
(len(dense_types), num_dense))
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) attribute does not match "
"Ndense attribute (%d vs %d)" %
(len(dense_shapes), num_dense))
# Skip over the serialized input, and the names input.
fetch_list = parse_example_op.inputs[2:]
# Fetch total_features key names and num_dense default values.
if len(fetch_list) != (total_features + num_dense):
raise ValueError("len(fetch_list) does not match total features + "
"num_dense (%d vs %d)" %
(len(fetch_list), (total_features + num_dense)))
fetched = sess.run(fetch_list)
if len(fetched) != len(fetch_list):
raise ValueError("len(fetched) does not match len(fetch_list) "
"(%d vs %d)" % (len(fetched), len(fetch_list)))
# Fetch indices.
sparse_keys_start = 0
dense_keys_start = sparse_keys_start + num_sparse
dense_def_start = dense_keys_start + num_dense
# Output tensor indices.
sparse_indices_start = 0
sparse_values_start = num_sparse
sparse_shapes_start = sparse_values_start + num_sparse
dense_values_start = sparse_shapes_start + num_sparse
# Dense features.
for i in range(num_dense):
key = fetched[dense_keys_start + i]
feature_config = config.feature_map[key]
# Convert the default value numpy array fetched from the session run
# into a TensorProto.
fixed_config = feature_config.fixed_len_feature
fixed_config.default_value.CopyFrom(
tensor_util.make_tensor_proto(fetched[dense_def_start + i]))
# Convert the shape from the attributes
# into a TensorShapeProto.
fixed_config.shape.CopyFrom(
tensor_shape.TensorShape(dense_shapes[i]).as_proto())
fixed_config.dtype = dense_types[i].as_datatype_enum
# Get the output tensor name.
fixed_config.values_output_tensor_name = parse_example_op.outputs[
dense_values_start + i].name
# Sparse features.
for i in range(num_sparse):
key = fetched[sparse_keys_start + i]
feature_config = config.feature_map[key]
var_len_feature = feature_config.var_len_feature
var_len_feature.dtype = sparse_types[i].as_datatype_enum
var_len_feature.indices_output_tensor_name = parse_example_op.outputs[
sparse_indices_start + i].name
var_len_feature.values_output_tensor_name = parse_example_op.outputs[
sparse_values_start + i].name
var_len_feature.shapes_output_tensor_name = parse_example_op.outputs[
sparse_shapes_start + i].name
return config
|
{
"content_hash": "949a41591c1b6bd6d773108082fdad51",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 74,
"avg_line_length": 37.5,
"alnum_prop": 0.677037037037037,
"repo_name": "jbedorf/tensorflow",
"id": "dc8937a31995c1752ea49638ff23ff805a39753f",
"size": "4739",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/util/example_parser_configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "647467"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59799751"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1508512"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46379626"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "480235"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
"""Parse prices of a device from geizhals."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
from homeassistant.const import CONF_NAME
_LOGGER = logging.getLogger(__name__)
CONF_DESCRIPTION = 'description'
CONF_PRODUCT_ID = 'product_id'
CONF_LOCALE = 'locale'
ICON = 'mdi:coin'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_PRODUCT_ID): cv.positive_int,
vol.Optional(CONF_DESCRIPTION, default='Price'): cv.string,
vol.Optional(CONF_LOCALE, default='DE'): vol.In(
['AT',
'EU',
'DE',
'UK',
'PL']),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Geizwatch sensor."""
name = config.get(CONF_NAME)
description = config.get(CONF_DESCRIPTION)
product_id = config.get(CONF_PRODUCT_ID)
domain = config.get(CONF_LOCALE)
add_entities([Geizwatch(name, description, product_id, domain)],
True)
class Geizwatch(Entity):
"""Implementation of Geizwatch."""
def __init__(self, name, description, product_id, domain):
"""Initialize the sensor."""
from geizhals import Device, Geizhals
# internal
self._name = name
self._geizhals = Geizhals(product_id, domain)
self._device = Device()
# external
self.description = description
self.product_id = product_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the best price of the selected product."""
if not self._device.prices:
return None
return self._device.prices[0]
@property
def device_state_attributes(self):
"""Return the state attributes."""
while len(self._device.prices) < 4:
self._device.prices.append('None')
attrs = {'device_name': self._device.name,
'description': self.description,
'unit_of_measurement': self._device.price_currency,
'product_id': self.product_id,
'price1': self._device.prices[0],
'price2': self._device.prices[1],
'price3': self._device.prices[2],
'price4': self._device.prices[3]}
return attrs
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest price from geizhals and updates the state."""
self._device = self._geizhals.parse()
|
{
"content_hash": "248320915bfe07121517dae34f00c0bd",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 71,
"avg_line_length": 29.444444444444443,
"alnum_prop": 0.6222984562607204,
"repo_name": "molobrakos/home-assistant",
"id": "03c263f54ab1d5be5ba4a6434965f049468fe33d",
"size": "2915",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "homeassistant/components/geizhals/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15057917"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
from jsonpath_rw import jsonpath, parse
import json
__author__ = 'Masataka'
class JobInfoList:
def __init__(self):
self.current_job_id = -1
self.jobList = []
def get_current_job(self):
if len(self.jobList) > 0:
return self.jobList[self.current_job_id]
def get_job_list(self):
return self.jobList
def set_current_job_id(self, job_id):
if len(self.jobList) > job_id:
self.current_job_id = job_id
else:
self.current_job_id = -1
class JobInfo:
def __init__(self, param=None, dispatcher_list=None, config_info=None):
# interface.IJob.__init__(self)
self.test = 1
self._param = []
self._param.append({"job_setting_override": {
"dispatcherIndex": 0 # GUI Select
}})
self._param.append({"configInfo": config_info})
self._param.append({"fileInfo": param})
self._param.append({"dispatcherInfo": dispatcher_list})
self._param_key_list = {"job_setting_override": 0, "configInfo": 1, "fileInfo": 2, "dispatcherInfo": 3}
# for paramKey in self._param_key_list:
# self._param["job_setting_override"][0][paramKey] = 0
# print "inst"
# print self._param
# print json.dumps(self._param, sort_keys=False, indent=4)
# print self
def setvalue(self, key, value):
self._param[0]["job_setting_override"][key] = value
# print json.dumps(self._param, sort_keys=False, indent=4)
def getvalue(self, key):
jsonpath_expr = parse(key)
ret = [match.value for match in jsonpath_expr.find(self._param)]
# print json.dumps(ret, sort_keys=False, indent=4)
if len(ret) <= 0:
ret = [""]
return ret
def get_jobname(self):
template = self.getvalue("[*].configInfo.[*].*.jobNameTemplate")[0]
job_name = template.replace("<filename>", self.getvalue("[*].*.fileNameWithoutExt")[0])
return job_name
|
{
"content_hash": "446ff3c5f4bc8ca9da4376f2ad73beaa",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 111,
"avg_line_length": 31.40625,
"alnum_prop": 0.5805970149253732,
"repo_name": "plinecom/JobManager",
"id": "81c7ec64d330f5e087da05362fa214697844a37e",
"size": "2010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "job/jobinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59376"
}
],
"symlink_target": ""
}
|
"""Read the contents of a QE output file containing QE dynmical matrix"""
import re
import math
import numpy as np
from PDielec.Constants import amu, angs2bohr, hartree2ev
from PDielec.UnitCell import UnitCell
from PDielec.GenericOutputReader import GenericOutputReader
class QEOutputReader(GenericOutputReader):
"""Read the contents of a QE output file containing QE dynmical matrix"""
def __init__(self, filenames):
GenericOutputReader.__init__(self, filenames)
self.type = 'QE output'
self._alat = None
return
def _read_output_files(self):
"""Read the QE file names"""
# Define the search keys to be looked for in the files
self.manage = {} # Empty the dictionary matching phrases
self.manage['header'] = (re.compile('Dynamical matrix file'), self._read_header)
self.manage['lattice'] = (re.compile('Basis vectors'), self._read_lattice_vectors)
self.manage['lattice2'] = (re.compile('cubic'), self._read_lattice_vectors)
self.manage['lattice3'] = (re.compile('CELL_PARAMETERS'), self._read_cell_parameters)
self.manage['positions'] = (re.compile('ATOMIC_POSITIONS'), self._read_fractional_coordinates)
self.manage['dynamical'] = (re.compile(' *Dynamical Matrix in c'), self._read_dynamical)
self.manage['epsilon'] = (re.compile(' *Dielectric Tensor:'), self._read_epsilon)
self.manage['charges'] = (re.compile(' *Effective Charges E-U:'), self._read_born_charges)
self.manage['energy_cutoff'] = (re.compile(' *kinetic-energy cutoff'), self._read_energy_cutoff)
self.manage['kpoints'] = (re.compile(' *number of k points'), self._read_kpoints)
self.manage['kpoint_grid'] = (re.compile('K_POINTS automatic'), self._read_kpoint_grid)
self.manage['electrons'] = (re.compile('^ *number of electrons'), self._read_electrons)
self.manage['energy'] = (re.compile('^ *total energy *='), self._read_energy)
self.manage['alat'] = (re.compile('^ *lattice parameter'), self._read_alat)
self.manage['alat'] = (re.compile('^ *A = '), self._read_alat2)
self.manage['celldm1'] = (re.compile('^ *celldm.1. ='), self._read_celldm1)
self.manage['pressure'] = (re.compile('^ *total *stress *.Ry'), self._read_pressure)
self.manage['nions'] = (re.compile('^ *number of atoms/cell'), self._read_nions)
for f in self._outputfiles:
self._read_output_file(f)
return
def _read_nions(self, line):
#
# Read from log file
#
self.nions = int(line.split()[4])
if self.debug:
print('_read_nions: nions={}'.format(self.nions) )
return
def _read_pressure(self, line):
self.pressure = float(line.split()[5])/10.0
if self.debug:
print('_read_pressure: pressure={}'.format(self.pressure) )
return
def _read_celldm1(self, line):
#
# Read from log file
#
string = line.split()[2]
string = string.replace(',','')
t = float(string)
if abs(t - angs2bohr) < 1.0e-4:
t = angs2bohr
# There are rounding errors when reading from the log file
# So only read if there is no alternative
if self._alat is None:
self._alat = t
if self.debug:
print('_read_celldm1: _alat={}'.format(self._alat) )
return
def _read_alat(self, line):
#
# Read from log file
#
t = float(line.split()[4])
if abs(t - angs2bohr) < 1.0e-4:
t = angs2bohr
# There are rounding errors when reading from the log file
# So only read if there is no alternative
if self._alat is None:
self._alat = t
if self.debug:
print('_read_alat: _alat={}'.format(self._alat) )
return
def _read_alat2(self, line):
#
# Read from log file
#
t = float(line.split()[2])
if abs(t - angs2bohr) < 1.0e-4:
t = angs2bohr
# There are rounding errors when reading from the log file
# So only read if there is no alternative
if self._alat is None:
self._alat = t
if self.debug:
print('_read_alat2: _alat={}'.format(self._alat) )
return
def _read_electrons(self, line):
#
# Read from log file
#
self.electrons = float(line.split()[4])
if self.debug:
print('_read_electrons: electrons={}'.format(self.electrons) )
return
def _read_energy(self, line):
#
# Read from log file
#
self.final_energy_without_entropy = float(line.split()[3]) * hartree2ev / 2.0
self.final_free_energy = float(line.split()[3]) * hartree2ev / 2.0
if self.debug:
print('_read_energy: energy={}'.format(self.final_free_energy) )
return
def _read_energy_cutoff(self, line):
#
# Read from log file
#
self.energy_cutoff = float(line.split()[3]) * hartree2ev / 2.0
if self.debug:
print('_read_energy_cutoff: energy_cutoff={}'.format(self.energy_cutoff) )
return
def _read_kpoints(self, line):
#
# Read from log file
#
self.kpoints = int(line.split()[4])
if self.debug:
print('_read_kpoints: kpoints={}'.format(self.kpoints) )
return
def _read_kpoint_grid(self, line):
#
# Read from log file
#
line = self.file_descriptor.readline()
self.kpoint_grid = [ float(f) for f in line.split()[0:3] ]
if self.debug:
print('_read_kpoints_grid kpoint_grid={}'.format(self.kpoint_grid) )
return
def _read_header(self, line):
#
# Read from dynG
#
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
self.nspecies = int(line.split()[0])
self.nions = int(line.split()[1])
t = float(line.split()[3])
if abs(t - angs2bohr) < 1.0e-4:
t = angs2bohr
self._alat = t
if self.debug:
print('_read_header alat={}'.format(self._alat) )
return
def _read_epsilon(self, line):
#
# Read from dynG
#
self.file_descriptor.readline()
linea = self.file_descriptor.readline().split()
self.zerof_optical_dielectric = []
self.zerof_optical_dielectric.append([float(f) for f in linea[0:3]])
linea = self.file_descriptor.readline().split()
self.zerof_optical_dielectric.append([float(f) for f in linea[0:3]])
linea = self.file_descriptor.readline().split()
self.zerof_optical_dielectric.append([float(f) for f in linea[0:3]])
if self.debug:
print('_read_espilon zerof_optical_dielectric={}'.format(self.zerof_optical_dielectric) )
return
def _read_masses(self):
#
# Read from dynG
#
self.masses_per_type = []
self.species = []
for i in range(self.nspecies):
linea = self.file_descriptor.readline().replace('\'', '').split()
self.species.append(linea[1].capitalize())
# The factor of two is because au in pwscf are half mass of electron
self.masses_per_type.append(float(linea[2])*2/amu)
if self.debug:
print('_read_masses masses={}'.format(self.masses_per_type) )
line = ''
self._read_dyng_coordinates(line)
return
def _read_dynamical(self, line):
#
# Read from dynG
#
nmodes = self.nions*3
hessian = np.zeros((nmodes, nmodes))
self.file_descriptor.readline()
linea = self.file_descriptor.readline().split()
# We only want to read the hessian at gamma
q = [float(q) for q in linea[3:6]]
qsum = q[0]*q[0] + q[1]*q[1] + q[2]*q[2]
if qsum > 0.0001:
return
# We read the hessian and store the mass weighted matrix
linea = self.file_descriptor.readline().split()
for a in range(self.nions):
for b in range(self.nions):
self.file_descriptor.readline()
for ixyz in range(3):
ipos = a*3 + ixyz
linea = self.file_descriptor.readline().split()
for jxyz in range(3):
jpos = b*3 + jxyz
# factor of 0.5 'cos of au units in pwscf
hessian[ipos, jpos] = 0.5*float(linea[2*jxyz])/(amu*math.sqrt(self.masses[a]*self.masses[b]))
# end for jxyz
# end for ixyz
# end for b
# end for a
self._dynamical_matrix(hessian)
if self.debug:
print('_read_dynamical')
return
def _read_born_charges(self, line):
#
# Read from dynG
#
self.born_charges = []
line = self.file_descriptor.readline()
for i in range(self.nions):
b = []
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])
line = self.file_descriptor.readline()
b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])
line = self.file_descriptor.readline()
b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])
self.born_charges.append(b)
if self.debug:
print('_read_born_charges')
return
def _read_cell_parameters(self, line):
#
# Read from the log file
#
line = line.replace( '(', ' ')
line = line.replace( ')', ' ')
linea = line.split()
# Overwrite alat here as it is more accurate than anywhere else in the log file
if len(linea) > 2:
self._alat = float(linea[2])
linea = self.file_descriptor.readline().split()
avector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]
linea = self.file_descriptor.readline().split()
bvector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]
linea = self.file_descriptor.readline().split()
cvector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]
self.unit_cells.append(UnitCell(avector, bvector, cvector))
self.ncells = len(self.unit_cells)
self.volume = self.unit_cells[-1].volume
if self.debug:
print('_read_cell_parameters: volume={}'.format(self.volume))
return
def _read_lattice_vectors(self, line):
#
# Read from dynG
#
linea = self.file_descriptor.readline().split()
avector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]
linea = self.file_descriptor.readline().split()
bvector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]
linea = self.file_descriptor.readline().split()
cvector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]
self.unit_cells.append(UnitCell(avector, bvector, cvector))
self.ncells = len(self.unit_cells)
self.volume = self.unit_cells[-1].volume
if self.debug:
print('_read_lattices_vectors: volume={}'.format(self.volume))
self._read_masses()
return
def _read_fractional_coordinates(self,line):
#
# Read from log file
#
if self.nions <= 0:
return
species_list = []
fractional_coordinates = []
for i in range(self.nions):
linea = self.file_descriptor.readline().split()
species_list.append( linea[0] )
fractional_coordinates.append([float(linea[1]), float(linea[2]), float(linea[3])])
self.unit_cells[-1].set_fractional_coordinates(fractional_coordinates)
self.unit_cells[-1].set_element_names(species_list)
self.ncells = len(self.unit_cells)
self.volume = self.unit_cells[-1].volume
if self.debug:
print('_read_fractional_coordinates: volume={}'.format(self.volume))
return
def _read_dyng_coordinates(self,line):
#
# Read from dynG
#
self.masses = []
self.atom_type_list = []
self.ions_per_type = [ 0 for i in range(self.nspecies) ]
species_list = []
xyz_coordinates = []
# It took a long time to work out that alat is in bohr
const = self._alat/angs2bohr
for i in range(self.nions):
linea = self.file_descriptor.readline().split()
species_index = int(linea[1])
xyz_coordinates.append([const*float(linea[2]), const*float(linea[3]), const*float(linea[4])])
self.masses.append(self.masses_per_type[species_index-1])
self.atom_type_list.append(species_index-1)
self.ions_per_type[species_index-1] += 1
species_list.append(self.species[species_index-1])
self.unit_cells[-1].set_xyz_coordinates(xyz_coordinates)
self.unit_cells[-1].set_element_names(species_list)
self.ncells = len(self.unit_cells)
self.volume = self.unit_cells[-1].volume
if self.debug:
print('_read_dyng_coordinates: volume={}'.format(self.volume))
return
|
{
"content_hash": "6f00f9ee7c20666432bd2dab9a034d7f",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 117,
"avg_line_length": 39.673469387755105,
"alnum_prop": 0.5627572016460906,
"repo_name": "JohnKendrick/PDielec",
"id": "92992a8e71781db18be157e2769abdf6f34ee63e",
"size": "14124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PDielec/QEOutputReader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "811"
},
{
"name": "Makefile",
"bytes": "802"
},
{
"name": "Python",
"bytes": "879573"
},
{
"name": "TeX",
"bytes": "70400"
}
],
"symlink_target": ""
}
|
from ..errors import *
from .state import State
from .links import BoundLink
from .variable import Variable
from .assignment import InferVariable
class Identifier(BoundLink):
name = None
def __init__(self, name:str, tokens = None):
BoundLink.__init__(self, None, tokens)
self.name = name
def _resolveIdentifier(self):
# Use sets to ignore duplicate entries
#TODO: Fix duplicate entries
found = set(State.scope.resolveIdentifier(self.name))
if State.builtins is not None:
found |= set(State.builtins.resolveIdentifier(self.name))
if len(found) > 1:
err = AmbiguityError(message="Ambiguous reference to").add(content=self.name, object=self).addNote(message="Matches:")
for match in found:
err.addNote(object=match)
raise err
return found
def verify(self):
if self.value is not None: return
found = self._resolveIdentifier()
if len(found) < 1:
raise MissingReferenceError(message="Missing reference to").add(content=self.name, object=self)
self.value = found.pop()
BoundLink.verify(self)
def verifyAssignment(self, value):
if self.value is not None:
return BoundLink.verifyAssignment(self, value)
found = self._resolveIdentifier()
if len(found) == 1:
self.value = found.pop()
return BoundLink.verifyAssignment(self, value)
# Infer variable existence
else:
# Inject a new variable into the enclosing hard scope
self.value = Variable(self.name, value.resolveType())
# Make variable have the same tokens. Hack for nicer error messages
self.value.tokens = self.tokens
self.value.source = self.source
raise InferVariable(self.value)
def __repr__(self):
return "{}".format(self.name)
|
{
"content_hash": "3feac22952eaf77e5c96cd8be2d8473c",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 130,
"avg_line_length": 31.467741935483872,
"alnum_prop": 0.6217324449000513,
"repo_name": "pektin/jam",
"id": "9eaae8c5aee4b4635c314c276e2d424369e906a6",
"size": "1951",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "compiler/lekvar/identifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "546"
},
{
"name": "Python",
"bytes": "207276"
}
],
"symlink_target": ""
}
|
from io import BytesIO
import sys
import pytest
from pandas.io.msgpack import ExtType, OutOfData, Unpacker, packb
class TestUnpack(object):
def test_unpack_array_header_from_file(self):
f = BytesIO(packb([1, 2, 3, 4]))
unpacker = Unpacker(f)
assert unpacker.read_array_header() == 4
assert unpacker.unpack() == 1
assert unpacker.unpack() == 2
assert unpacker.unpack() == 3
assert unpacker.unpack() == 4
msg = "No more data to unpack"
with pytest.raises(OutOfData, match=msg):
unpacker.unpack()
def test_unpacker_hook_refcnt(self):
if not hasattr(sys, 'getrefcount'):
pytest.skip('no sys.getrefcount()')
result = []
def hook(x):
result.append(x)
return x
basecnt = sys.getrefcount(hook)
up = Unpacker(object_hook=hook, list_hook=hook)
assert sys.getrefcount(hook) >= basecnt + 2
up.feed(packb([{}]))
up.feed(packb([{}]))
assert up.unpack() == [{}]
assert up.unpack() == [{}]
assert result == [{}, [{}], {}, [{}]]
del up
assert sys.getrefcount(hook) == basecnt
def test_unpacker_ext_hook(self):
class MyUnpacker(Unpacker):
def __init__(self):
super(MyUnpacker, self).__init__(ext_hook=self._hook,
encoding='utf-8')
def _hook(self, code, data):
if code == 1:
return int(data)
else:
return ExtType(code, data)
unpacker = MyUnpacker()
unpacker.feed(packb({'a': 1}, encoding='utf-8'))
assert unpacker.unpack() == {'a': 1}
unpacker.feed(packb({'a': ExtType(1, b'123')}, encoding='utf-8'))
assert unpacker.unpack() == {'a': 123}
unpacker.feed(packb({'a': ExtType(2, b'321')}, encoding='utf-8'))
assert unpacker.unpack() == {'a': ExtType(2, b'321')}
|
{
"content_hash": "25af92090e60652d3258d9bb4b878531",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 73,
"avg_line_length": 30.134328358208954,
"alnum_prop": 0.524517087667162,
"repo_name": "GuessWhoSamFoo/pandas",
"id": "356156296c067f6097ea24f07a01db10409fa74e",
"size": "2019",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/io/msgpack/test_unpack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "556"
},
{
"name": "Python",
"bytes": "14926624"
},
{
"name": "Shell",
"bytes": "29351"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
"""Basic Model Interface (BMI) for the Diffusion model."""
import numpy as np
from basic_modeling_interface import Bmi
from .diffusion import Diffusion
class BmiDiffusion(Bmi):
"""BMI for the Diffusion model."""
_name = 'Diffusion model'
_input_var_names = ('plate_surface__temperature',)
_output_var_names = ('plate_surface__temperature',)
def __init__(self):
"""Create a Diffusion model that's ready for initialization."""
self._model = None
self._values = {}
self._var_units = {}
self._grids = {}
self._grid_type = {}
def initialize(self, filename=None):
"""Initialize the Diffusion model.
Parameters
----------
filename : str, optional
Path to name of input file.
"""
self._model = Diffusion(config_file=filename)
self._values = {
'plate_surface__temperature': self._model.temperature,
}
self._var_units = {
'plate_surface__temperature': 'K'
}
self._grids = {
0: ['plate_surface__temperature']
}
self._grid_type = {
0: 'uniform_rectilinear_grid'
}
def update(self):
"""Advance model by one time step."""
self._model.advance()
def update_frac(self, time_frac):
"""Update model by a fraction of a time step.
Parameters
----------
time_frac : float
Fraction fo a time step.
"""
time_step = self.get_time_step()
self._model.dt = time_frac * time_step
self.update()
self._model.dt = time_step
def update_until(self, then):
"""Update model until a particular time.
Parameters
----------
then : float
Time to run model until.
"""
n_steps = (then - self.get_current_time()) / self.get_time_step()
for _ in range(int(n_steps)):
self.update()
if (n_steps - int(n_steps)) > 0.0:
self.update_frac(n_steps - int(n_steps))
def finalize(self):
"""Finalize model."""
self._model = None
def get_var_type(self, var_name):
"""Data type of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
str
Data type.
"""
return str(self.get_value(var_name).dtype)
def get_var_units(self, var_name):
"""Get units of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
str
Variable units.
"""
return self._var_units[var_name]
def get_var_nbytes(self, var_name):
"""Get units of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
int
Size of data array in bytes.
"""
return self.get_value(var_name).nbytes
def get_var_grid(self, var_name):
"""Grid id for a variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
int
Grid id.
"""
for grid_id, var_name_list in self._grids.items():
if var_name in var_name_list:
return grid_id
def get_grid_rank(self, grid_id):
"""Rank of grid.
Parameters
----------
grid_id : int
Identifier of a grid.
Returns
-------
int
Rank of grid.
"""
return len(self.get_grid_shape(grid_id))
def get_grid_size(self, grid_id):
"""Size of grid.
Parameters
----------
grid_id : int
Identifier of a grid.
Returns
-------
int
Size of grid.
"""
return np.prod(self.get_grid_shape(grid_id))
def get_value_ref(self, var_name):
"""Reference to values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
array_like
Value array.
"""
return self._values[var_name].reshape(-1)
def get_value(self, var_name):
"""Copy of values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
array_like
Copy of values.
"""
return self.get_value_ref(var_name).copy()
# def get_value_at_indices(self, var_name, indices):
# """Get values at particular indices.
# Parameters
# ----------
# var_name : str
# Name of variable as CSDMS Standard Name.
# indices : array_like
# Array of indices.
# Returns
# -------
# array_like
# Values at indices.
# """
# return self.get_value_ref(var_name).take(indices)
def set_value(self, var_name, src):
"""Set model values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
src : array_like
Array of new values.
"""
val = self.get_value_ref(var_name)
val[:] = src
# def set_value_at_indices(self, var_name, src, indices):
# """Set model values at particular indices.
# Parameters
# ----------
# var_name : str
# Name of variable as CSDMS Standard Name.
# src : array_like
# Array of new values.
# indices : array_like
# Array of indices.
# """
# val = self.get_value_ref(var_name)
# val.flat[indices] = src
def get_component_name(self):
"""Name of the component."""
return self._name
def get_input_var_names(self):
"""Get names of input variables."""
return self._input_var_names
def get_output_var_names(self):
"""Get names of output variables."""
return self._output_var_names
def get_grid_shape(self, grid_id):
"""Number of columns and rows of uniform rectilinear grid."""
return (self._model.ny, self._model.nx)
def get_grid_spacing(self, grid_id):
"""Spacing of columns and rows of uniform rectilinear grid."""
return (self._model.dy, self._model.dx)
def get_grid_origin(self, grid_id):
"""Origin of uniform rectilinear grid."""
return (0.0, 0.0)
def get_grid_type(self, grid_id):
"""Type of grid."""
return self._grid_type[grid_id]
def get_start_time(self):
"""Start time of model."""
return 0.0
def get_end_time(self):
"""End time of model."""
return np.finfo('d').max
def get_current_time(self):
"""Current time of model."""
return self._model.time
def get_time_step(self):
"""Time step of model."""
return self._model.dt
def get_time_units(self):
"""Time units of model."""
return '-'
|
{
"content_hash": "0673adef79d675a37b22abf25c09be9a",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 73,
"avg_line_length": 24.737373737373737,
"alnum_prop": 0.5019735946644889,
"repo_name": "csdms/bmi-live-2017",
"id": "0b64c9791331f1d8f385cfc72c5d78667ca6cd33",
"size": "7347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".totally-hidden-directory/bmi_diffusion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "19863"
},
{
"name": "Python",
"bytes": "9416"
}
],
"symlink_target": ""
}
|
import wpf # Loads required .NET Assemblies behind the scenes
from System.Windows import (GridLength, SizeToContent, TextWrapping, Thickness,
Window, WindowStartupLocation)
from System.Windows.Controls import (Button, ColumnDefinition, Grid, Label, ListBox,
PasswordBox, RowDefinition, TextBlock, TextBox, SelectionMode)
class _WpfDialog(Window):
_left_button = 'OK'
_right_button = 'Cancel'
def __init__(self, message, value=None, **extra):
self._initialize_dialog()
self._create_body(message, value, **extra)
self._create_buttons()
self._bind_esc_to_close_dialog()
self._result = None
def _initialize_dialog(self):
self.Title = 'Robot Framework'
self.SizeToContent = SizeToContent.WidthAndHeight
self.WindowStartupLocation = WindowStartupLocation.CenterScreen
self.MinWidth = 300
self.MinHeight = 100
self.MaxWidth = 640
grid = Grid()
left_column = ColumnDefinition()
right_column = ColumnDefinition()
grid.ColumnDefinitions.Add(left_column)
grid.ColumnDefinitions.Add(right_column)
label_row = RowDefinition()
label_row.Height = GridLength.Auto
selection_row = RowDefinition()
selection_row.Height = GridLength.Auto
button_row = RowDefinition()
button_row.Height = GridLength(50)
grid.RowDefinitions.Add(label_row)
grid.RowDefinitions.Add(selection_row)
grid.RowDefinitions.Add(button_row)
self.Content = grid
def _create_body(self, message, value, **extra):
_label = Label()
textblock = TextBlock()
textblock.Text = message
textblock.TextWrapping = TextWrapping.Wrap
_label.Content = textblock
_label.Margin = Thickness(10)
_label.SetValue(Grid.ColumnSpanProperty, 2)
_label.SetValue(Grid.RowProperty, 0)
self.Content.AddChild(_label)
selector = self._create_selector(value, **extra)
if selector:
self.Content.AddChild(selector)
selector.Focus()
def _create_selector(self, value):
return None
def _create_buttons(self):
self.left_button = self._create_button(self._left_button,
self._left_button_clicked)
self.left_button.SetValue(Grid.ColumnProperty, 0)
self.left_button.IsDefault = True
self.right_button = self._create_button(self._right_button,
self._right_button_clicked)
if self.right_button:
self.right_button.SetValue(Grid.ColumnProperty, 1)
self.Content.AddChild(self.right_button)
self.left_button.SetValue(Grid.ColumnProperty, 0)
self.Content.AddChild(self.left_button)
else:
self.left_button.SetValue(Grid.ColumnSpanProperty, 2)
self.Content.AddChild(self.left_button)
def _create_button(self, content, callback):
if content:
button = Button()
button.Margin = Thickness(10)
button.MaxHeight = 50
button.MaxWidth = 150
button.SetValue(Grid.RowProperty, 2)
button.Content = content
button.Click += callback
return button
def _bind_esc_to_close_dialog(self):
# There doesn't seem to be easy way to bind esc otherwise than having
# a cancel button that binds it automatically. We don't always have
# actual cancel button so need to create one and make it invisible.
# Cannot actually hide it because it won't work after that so we just
# make it so small it is not seen.
button = Button()
button.IsCancel = True
button.MaxHeight = 1
button.MaxWidth = 1
self.Content.AddChild(button)
def _left_button_clicked(self, sender, event_args):
if self._validate_value():
self._result = self._get_value()
self._close()
def _validate_value(self):
return True
def _get_value(self):
return None
def _close(self):
self.Close()
def _right_button_clicked(self, sender, event_args):
self._result = self._get_right_button_value()
self._close()
def _get_right_button_value(self):
return None
def show(self):
self.ShowDialog()
return self._result
class MessageDialog(_WpfDialog):
_right_button = None
class InputDialog(_WpfDialog):
def __init__(self, message, default='', hidden=False):
_WpfDialog.__init__(self, message, default, hidden=hidden)
def _create_selector(self, default, hidden):
if hidden:
self._entry = PasswordBox()
self._entry.Password = default if default else ''
else:
self._entry = TextBox()
self._entry.Text = default if default else ''
self._entry.SetValue(Grid.RowProperty, 1)
self._entry.SetValue(Grid.ColumnSpanProperty, 2)
self.Margin = Thickness(10)
self._entry.Height = 30
self._entry.Width = 150
self._entry.SelectAll()
return self._entry
def _get_value(self):
try:
return self._entry.Text
except AttributeError:
return self._entry.Password
class SelectionDialog(_WpfDialog):
def __init__(self, message, values):
_WpfDialog.__init__(self, message, values)
def _create_selector(self, values):
self._listbox = ListBox()
self._listbox.SetValue(Grid.RowProperty, 1)
self._listbox.SetValue(Grid.ColumnSpanProperty, 2)
self._listbox.Margin = Thickness(10)
for item in values:
self._listbox.Items.Add(item)
return self._listbox
def _validate_value(self):
return bool(self._listbox.SelectedItem)
def _get_value(self):
return self._listbox.SelectedItem
class MultipleSelectionDialog(_WpfDialog):
def __init__(self, message, values):
_WpfDialog.__init__(self, message, values)
def _create_selector(self, values):
self._listbox = ListBox()
self._listbox.SelectionMode = SelectionMode.Multiple
self._listbox.SetValue(Grid.RowProperty, 1)
self._listbox.SetValue(Grid.ColumnSpanProperty, 2)
self._listbox.Margin = Thickness(10)
for item in values:
self._listbox.Items.Add(item)
return self._listbox
def _get_value(self):
return sorted(self._listbox.SelectedItems,
key=list(self._listbox.Items).index)
class PassFailDialog(_WpfDialog):
_left_button = 'PASS'
_right_button = 'FAIL'
def _get_value(self):
return True
def _get_right_button_value(self):
return False
|
{
"content_hash": "304b18b06815ea2cb5f2cb33c7c22710",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 99,
"avg_line_length": 33.470873786407765,
"alnum_prop": 0.6110224800580131,
"repo_name": "robotframework/RIDE",
"id": "39d1b9e2d3f40e5a191a8cdfdae068962b43a6ca",
"size": "7539",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robotide/lib/robot/libraries/dialogs_ipy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31131"
},
{
"name": "HTML",
"bytes": "96342"
},
{
"name": "JavaScript",
"bytes": "42656"
},
{
"name": "Python",
"bytes": "3703410"
},
{
"name": "RobotFramework",
"bytes": "378004"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
}
|
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's C{batchSize}
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
from itertools import izip as zip, imap as map
else:
import pickle
xrange = range
pickle_protocol = pickle.HIGHEST_PROTOCOL
from pyspark import cloudpickle
from pyspark.util import _exception_message
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowCollectSerializer(Serializer):
"""
Deserialize a stream of batches followed by batch order information. Used in
DataFrame._collectAsArrow() after invoking Dataset.collectAsArrowToPython() in the JVM.
"""
def __init__(self):
self.serializer = ArrowStreamSerializer()
def dump_stream(self, iterator, stream):
return self.serializer.dump_stream(iterator, stream)
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices
num = read_int(stream)
batch_order = []
for i in xrange(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order
def __repr__(self):
return "ArrowCollectSerializer(%s)" % self.serializer
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
def _create_batch(series, timezone, safecheck):
"""
Create an Arrow record batch from the given pandas.Series or list of Series, with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:param timezone: A timezone to respect when handling timestamp values
:return: Arrow RecordBatch
"""
import decimal
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.types import _check_series_convert_timestamps_internal
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
# TODO: maybe don't need None check anymore as of Arrow 0.9.1
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s.fillna(0), timezone)
# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2
return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False)
elif t is not None and pa.types.is_string(t) and sys.version < '3':
# TODO: need decode before converting to Arrow in Python 2
# TODO: don't need as of Arrow 0.9.1
return pa.Array.from_pandas(s.apply(
lambda v: v.decode("utf-8") if isinstance(v, str) else v), mask=mask, type=t)
elif t is not None and pa.types.is_decimal(t) and \
LooseVersion("0.9.0") <= LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
# TODO: see ARROW-2432. Remove when the minimum PyArrow version becomes 0.10.0.
return pa.Array.from_pandas(s.apply(
lambda v: decimal.Decimal('NaN') if v is None else v), mask=mask, type=t)
elif LooseVersion(pa.__version__) < LooseVersion("0.11.0"):
# TODO: see ARROW-1949. Remove when the minimum PyArrow version becomes 0.11.0.
return pa.Array.from_pandas(s, mask=mask, type=t)
try:
array = pa.Array.from_pandas(s, mask=mask, type=t, safe=safecheck)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to Arrow " + \
"Array (%s). It can be caused by overflows or other unsafe " + \
"conversions warned by Arrow. Arrow safe type check can be " + \
"disabled by using SQL config " + \
"`spark.sql.execution.pandas.arrowSafeTypeConversion`."
raise RuntimeError(error_msg % (s.dtype, t), e)
return array
arrs = [create_array(s, t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def __init__(self, timezone, safecheck):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
self._safecheck = safecheck
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.types import from_arrow_type, \
_arrow_column_to_pandas, _check_series_localize_timestamps
s = _arrow_column_to_pandas(arrow_column, from_arrow_type(arrow_column.type))
s = _check_series_localize_timestamps(s, self._timezone)
return s
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series, self._timezone, self._safecheck)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, pickle_protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, pickle_protocol)
except pickle.PickleError:
raise
except Exception as e:
emsg = _exception_message(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
cloudpickle.print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
class ChunkedStream(object):
"""
This is a file-like object takes a stream of data, of unknown length, and breaks it into fixed
length frames. The intended use case is serializing large data and sending it immediately over
a socket -- we do not want to buffer the entire data before sending it, but the receiving end
needs to know whether or not there is more data coming.
It works by buffering the incoming data in some fixed-size chunks. If the buffer is full, it
first sends the buffer size, then the data. This repeats as long as there is more data to send.
When this is closed, it sends the length of whatever data is in the buffer, then that data, and
finally a "length" of -1 to indicate the stream has completed.
"""
def __init__(self, wrapped, buffer_size):
self.buffer_size = buffer_size
self.buffer = bytearray(buffer_size)
self.current_pos = 0
self.wrapped = wrapped
def write(self, bytes):
byte_pos = 0
byte_remaining = len(bytes)
while byte_remaining > 0:
new_pos = byte_remaining + self.current_pos
if new_pos < self.buffer_size:
# just put it in our buffer
self.buffer[self.current_pos:new_pos] = bytes[byte_pos:]
self.current_pos = new_pos
byte_remaining = 0
else:
# fill the buffer, send the length then the contents, and start filling again
space_left = self.buffer_size - self.current_pos
new_byte_pos = byte_pos + space_left
self.buffer[self.current_pos:self.buffer_size] = bytes[byte_pos:new_byte_pos]
write_int(self.buffer_size, self.wrapped)
self.wrapped.write(self.buffer)
byte_remaining -= space_left
byte_pos = new_byte_pos
self.current_pos = 0
def close(self):
# if there is anything left in the buffer, write it out first
if self.current_pos > 0:
write_int(self.current_pos, self.wrapped)
self.wrapped.write(self.buffer[:self.current_pos])
# -1 length indicates to the receiving end that we're done.
write_int(-1, self.wrapped)
self.wrapped.close()
@property
def closed(self):
"""
Return True if the `wrapped` object has been closed.
NOTE: this property is required by pyarrow to be used as a file-like object in
pyarrow.RecordBatchStreamWriter from ArrowStreamSerializer
"""
return self.wrapped.closed
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
|
{
"content_hash": "592df562babccc7a34301c9f825c2f40",
"timestamp": "",
"source": "github",
"line_count": 826,
"max_line_length": 100,
"avg_line_length": 33.50726392251816,
"alnum_prop": 0.617516349315316,
"repo_name": "WindCanDie/spark",
"id": "a2c59fedfc8cdec8383c89217dda9e5f1bbb77c5",
"size": "28462",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "35161"
},
{
"name": "Batchfile",
"bytes": "30468"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26884"
},
{
"name": "Dockerfile",
"bytes": "8760"
},
{
"name": "HTML",
"bytes": "70197"
},
{
"name": "HiveQL",
"bytes": "1823426"
},
{
"name": "Java",
"bytes": "3428135"
},
{
"name": "JavaScript",
"bytes": "196704"
},
{
"name": "Makefile",
"bytes": "9397"
},
{
"name": "PLpgSQL",
"bytes": "191716"
},
{
"name": "PowerShell",
"bytes": "3856"
},
{
"name": "Python",
"bytes": "2858499"
},
{
"name": "R",
"bytes": "1168957"
},
{
"name": "Roff",
"bytes": "15669"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "28234516"
},
{
"name": "Shell",
"bytes": "202816"
},
{
"name": "Thrift",
"bytes": "33605"
},
{
"name": "q",
"bytes": "146878"
}
],
"symlink_target": ""
}
|
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import shutil
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_blocks,
)
class KeypoolRestoreTest(PivxTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-keypool=3'], ['-keypool=100']]
def run_test(self):
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Generate keys for wallet")
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
assert_equal(self.nodes[1].getbalance(), 15)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
assert_equal(self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdkeypath'], "m/44'/119'/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
|
{
"content_hash": "f1509316492cc5f2834495e719fbc084",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 164,
"avg_line_length": 35.642857142857146,
"alnum_prop": 0.6541082164328658,
"repo_name": "martexcoin/martexcoin",
"id": "e36a46930537c943f14d25e2181d10bde77d17fe",
"size": "2704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/wallet_keypool_topup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "6549796"
},
{
"name": "C++",
"bytes": "5425220"
},
{
"name": "CMake",
"bytes": "12720"
},
{
"name": "CSS",
"bytes": "184584"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "205547"
},
{
"name": "Makefile",
"bytes": "121633"
},
{
"name": "Objective-C++",
"bytes": "6690"
},
{
"name": "Python",
"bytes": "1023906"
},
{
"name": "QMake",
"bytes": "26119"
},
{
"name": "Sage",
"bytes": "30188"
},
{
"name": "Shell",
"bytes": "35318"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load AccAssess class from file AccAssess
from accassess import AccAssess
return AccAssess(iface)
|
{
"content_hash": "f5c57562577c9543e4cf094f824a1e2e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 44.73684210526316,
"alnum_prop": 0.4164705882352941,
"repo_name": "jkibele/acc-assess",
"id": "754673d7dc6470af50c136558937288c34038cb3",
"size": "1348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53978"
},
{
"name": "Shell",
"bytes": "4124"
}
],
"symlink_target": ""
}
|
from common import BaseTest
class DynamodbTest(BaseTest):
def test_resources(self):
session_factory = self.replay_flight_data('test_dynamodb_table')
p = self.load_policy(
{'name': 'tables',
'resource': 'dynamodb-table'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['TableName'], 'rolltop')
self.assertEqual(resources[0]['TableStatus'], 'ACTIVE')
def test_invoke_action(self):
session_factory = self.replay_flight_data(
'test_dynamodb_invoke_action')
p = self.load_policy(
{'name': 'tables',
'resource': 'dynamodb-table',
'actions': [
{'type': 'invoke-lambda',
'function': 'process_resources'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
|
{
"content_hash": "17706518938b9d5f65c71d3de1ba0a57",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 72,
"avg_line_length": 34.48275862068966,
"alnum_prop": 0.561,
"repo_name": "stevenmjo/cloud-custodian",
"id": "3200455001792b97008642271ba9b00d74a1535c",
"size": "1585",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_dynamodb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1177"
},
{
"name": "Python",
"bytes": "718241"
}
],
"symlink_target": ""
}
|
"""
This script transfers pages from a source wiki to a target wiki.
It also copies edit history to a subpage.
-tolang: The target site code.
-tosite: The target site family.
-prefix: Page prefix on the new site.
-overwrite: Existing pages are skipped by default. Use his option to
overwrite pages.
Internal links are *not* repaired!
Pages to work on can be specified using any of:
¶ms;
Example commands:
# Transfer all pages in category "Query service" from the Toolserver wiki to
# wikitech, adding Nova_Resource:Tools/Tools/ as prefix
transferbot.py -v -family:toolserver -tofamily:wikitech -cat:"Query service" -prefix:Nova_Resource:Tools/Tools/
# Copy the template "Query service" from the Toolserver wiki to wikitech
transferbot.py -v -family:toolserver -tofamily:wikitech -page:"Template:Query service"
"""
#
# (C) Merlijn van Deen, 2014
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import pagegenerators
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class WikiTransferException(Exception):
"""Base class for exceptions from this script.
Makes it easier for clients to catch all expected exceptions that the script might
throw
"""
pass
class TargetSiteMissing(WikiTransferException):
"""Thrown when the target site is the same as the source site.
Based on the way each are initialized, this is likely to happen when the target site
simply hasn't been specified.
"""
pass
class TargetPagesMissing(WikiTransferException):
"""Thrown if no page range has been specified for the script to operate on."""
pass
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
local_args = pywikibot.handle_args(args)
fromsite = pywikibot.Site()
tolang = fromsite.code
tofamily = fromsite.family.name
prefix = ''
overwrite = False
gen_args = []
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if genFactory.handleArg(arg):
gen_args.append(arg)
continue
if arg.startswith('-tofamily'):
tofamily = arg[len('-tofamily:'):]
elif arg.startswith('-tolang'):
tolang = arg[len('-tolang:'):]
elif arg.startswith('-prefix'):
prefix = arg[len('-prefix:'):]
elif arg == "-overwrite":
overwrite = True
tosite = pywikibot.Site(tolang, tofamily)
if fromsite == tosite:
raise TargetSiteMissing('Target site not different from source site')
gen = genFactory.getCombinedGenerator()
if not gen:
raise TargetPagesMissing('Target pages not specified')
gen_args = ' '.join(gen_args)
pywikibot.output(u"""
Page transfer configuration
---------------------------
Source: %(fromsite)r
Target: %(tosite)r
Pages to transfer: %(gen_args)s
Prefix for transferred pages: %(prefix)s
""" % locals())
for page in gen:
summary = "Moved page from %s" % page.title(asLink=True)
targetpage = pywikibot.Page(tosite, prefix + page.title())
edithistpage = pywikibot.Page(tosite, prefix + page.title() + '/edithistory')
if targetpage.exists() and not overwrite:
pywikibot.output(
u"Skipped %s (target page %s exists)" % (
page.title(asLink=True),
targetpage.title(asLink=True)
)
)
continue
pywikibot.output(u"Moving %s to %s..."
% (page.title(asLink=True),
targetpage.title(asLink=True)))
pywikibot.log("Getting page text.")
text = page.get(get_redirect=True)
text += "<noinclude>\n\n<small>This page was moved from %s. It's edit history can be viewed at %s</small></noinclude>" % (
page.title(asLink=True, insite=targetpage.site),
edithistpage.title(asLink=True, insite=targetpage.site))
pywikibot.log("Getting edit history.")
historytable = page.getVersionHistoryTable()
pywikibot.log("Putting page text.")
targetpage.put(text, summary=summary)
pywikibot.log("Putting edit history.")
edithistpage.put(historytable, summary=summary)
if __name__ == "__main__":
try:
main()
except TargetSiteMissing as e:
pywikibot.error(u'Need to specify a target site and/or language')
pywikibot.error(u'Try running this script with -help for help/usage')
pywikibot.exception()
except TargetPagesMissing as e:
pywikibot.error(u'Need to specify a page range')
pywikibot.error(u'Try running this script with -help for help/usage')
pywikibot.exception()
|
{
"content_hash": "2d1c86baf376571539eecd541ae19d0c",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 130,
"avg_line_length": 28.71590909090909,
"alnum_prop": 0.6379105658884052,
"repo_name": "emijrp/pywikibot-core",
"id": "239a991db4a52b15b91c70162f541c0c8d2beb21",
"size": "5097",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/transferbot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "3691866"
}
],
"symlink_target": ""
}
|
"""
This package contains algorithms for extracting document representations from their raw
bag-of-word counts.
"""
# bring model classes directly into package namespace, to save some typing
from .hdpmodel import HdpModel
from .ldamodel import LdaModel
from .lsimodel import LsiModel
from .tfidfmodel import TfidfModel
from .rpmodel import RpModel
from .logentropy_model import LogEntropyModel
from .word2vec import Word2Vec
from .doc2vec import Doc2Vec
from .ldamulticore import LdaMulticore
from .phrases import Phrases
from . import wrappers
from gensim import interfaces, utils
class VocabTransform(interfaces.TransformationABC):
"""
Remap feature ids to new values.
Given a mapping between old ids and new ids (some old ids may be missing = these
features are to be discarded), this will wrap a corpus so that iterating over
`VocabTransform[corpus]` returns the same vectors but with the new ids.
Old features that have no counterpart in the new ids are discarded. This
can be used to filter vocabulary of a corpus "online"::
>>> old2new = dict((oldid, newid) for newid, oldid in enumerate(ids_you_want_to_keep))
>>> vt = VocabTransform(old2new)
>>> for vec_with_new_ids in vt[corpus_with_old_ids]:
>>> ...
"""
def __init__(self, old2new, id2token=None):
# id2word = dict((newid, oldid2word[oldid]) for oldid, newid in old2new.iteritems())
self.old2new = old2new
self.id2token = id2token
def __getitem__(self, bow):
"""
Return representation with the ids transformed.
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
return sorted((self.old2new[oldid], weight) for oldid, weight in bow if oldid in self.old2new)
#endclass VocabTransform
|
{
"content_hash": "e4ada9bc533ba0e38b7942b318399472",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 102,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.7072916666666667,
"repo_name": "TitasNandi/Summer_Project",
"id": "dc028e24fef4e4bcff11b3b370feb6e2f9232fd2",
"size": "1920",
"binary": false,
"copies": "49",
"ref": "refs/heads/master",
"path": "gensim-0.12.4/build/lib.linux-x86_64-2.7/gensim/models/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "291"
},
{
"name": "Batchfile",
"bytes": "119331"
},
{
"name": "C",
"bytes": "620"
},
{
"name": "CSS",
"bytes": "746530"
},
{
"name": "Groff",
"bytes": "8389332"
},
{
"name": "Groovy",
"bytes": "887"
},
{
"name": "HTML",
"bytes": "84461564"
},
{
"name": "Java",
"bytes": "22523686"
},
{
"name": "JavaScript",
"bytes": "1254038"
},
{
"name": "Jupyter Notebook",
"bytes": "413571"
},
{
"name": "Makefile",
"bytes": "4287"
},
{
"name": "Perl",
"bytes": "15597"
},
{
"name": "Python",
"bytes": "1888984"
},
{
"name": "Shell",
"bytes": "177570"
},
{
"name": "XQuery",
"bytes": "11244"
},
{
"name": "XSLT",
"bytes": "390648"
}
],
"symlink_target": ""
}
|
import NeuralNetwork as nn
import numpy as np
import PIL.Image
import os
import os.path
import json
import io
import datetime
import math
try:
import matplotlib.pyplot as plt
USE_MATPLOTLIB = True
except ImportError:
plt = None
USE_MATPLOTLIB = False
DEBUG_ENABLED = False
def main():
usr_valid_responses = {'T', 'H'}
usr_response = ''
while usr_response not in usr_valid_responses:
usr_response = input('(H)ypothesis or (T)rain? ')
if usr_response == 'T':
img_res_side = 60
train_nn = nn.NeuralNetwork([img_res_side ** 2, 900, 400, 25, 1])
train_data(train_nn, img_res_side, output_path='data/trained_params')
elif usr_response == 'H':
hypothesis_main(True)
def hypothesis_main(show_img=False):
default_params_dir = 'data/trained_params/'
file_list = get_file_list(default_params_dir, '.json')
print('Parameters found: ')
for file_index in range(len(file_list)):
print(str.format('{0}. {1}', str(file_index + 1), file_list[file_index]))
usr_response = None
while not usr_response:
usr_response = input('Enter a number or another parameter (from the main directory): ')
if usr_response.isnumeric():
int_response = int(usr_response)
if 1 <= int_response <= len(file_list):
usr_response = os.path.join(default_params_dir, file_list[int_response - 1])
else:
usr_response = None
print('Error. Index out of range.')
param_obj = load_json_obj(usr_response)
test_nn = nn.NeuralNetwork(param_obj['layer_size'])
test_nn.load_param_json(param_obj)
image_res = int(math.sqrt(param_obj['layer_size'][0]))
hyp_data, *args, img_list = create_test_data('data/test_data/', img_res=image_res)
hypothesis(test_nn, hyp_data, img_list)
if USE_MATPLOTLIB and show_img:
plt.gray()
test_nn.create_data_vis(9, np.matrix(hyp_data))
repeat = 'y'
while repeat:
for layer in range(len(param_obj['layer_size']) - 1):
vis_image = test_nn.create_visualized_image(layer)
plt.imshow(vis_image)
plt.show()
repeat = input('Leave blank to quit')
def load_json_obj(file_name):
json_file = io.open(file_name)
param_str = json_file.read()
json_file.close()
return json.loads(param_str)
def get_file_list(directory, extension):
return [name for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))
and name.endswith(extension)]
def hypothesis(input_neural_network, data, face_file_list=None):
assert isinstance(input_neural_network, nn.NeuralNetwork)
result = input_neural_network.hypothesis(data).tolist()[0]
for i in range(len(result)):
result_hyp = result[i]
if face_file_list is not None:
print('Face data: ', face_file_list[i])
print('Raw Hypothesis: ', str(result_hyp))
if result_hyp >= 0.5:
print('PASSED: Well, it seems I like this face. :)')
else:
print('FAILED: Well, looks like not this one :(')
print()
print('-'*20)
print()
def train_data(input_neural_network, img_res_data, output_path=None, output_name=None):
assert isinstance(input_neural_network, nn.NeuralNetwork)
path_folder = 'data/train_data/'
label_path = 'data/train_label.json'
label_file = io.open(label_path)
json_str = label_file.read()
label_file.close()
label_obj = json.loads(json_str)
data, label, *args = create_test_data(path_folder, label_obj, img_res_data)
input_neural_network.load_data(data, label)
print(str.format('Input size: {0}', str(len(data))))
print('Starting training.')
input_neural_network.train()
print('Training successful.')
if output_name is None:
output_name = str.format('traindata_{0}.json',
str(datetime.datetime.now()).replace(':', '-').replace(' ', '_'))
# for compatibility issues (Windows doesn't accept ':' in filename)
if output_path is None:
output_path = '' # empty string
input_neural_network.save_param(os.path.join(output_path, output_name), includemetadata=True)
def create_test_data(path, label=None, img_res=25):
img_extensions = {'.jpg', '.png', '.tiff', '.tif'}
image_list = get_file_list(path, tuple(img_extensions))
final_data = []
final_label = []
for image_file in image_list:
file_path = os.path.join(path, image_file)
if (label is None) or (image_file in label and not image_file.startswith('_')):
final_data.append(create_data(img_res, file_path))
if label is not None and image_file in label:
final_label.append(float(label[image_file]))
if DEBUG_ENABLED:
print('Appended', file_path)
assert len(final_data) == len(final_label) or label is None
return final_data, final_label, image_list
def create_data(img_res_side, img_file):
test_img = PIL.Image.open(img_file)
test_img = test_img.resize((img_res_side, img_res_side)).convert('L')
img_1_feature = []
for x in range(0,img_res_side):
for y in range(0, img_res_side):
img_1_feature.append(test_img.getpixel((x, y)) / 255)
return img_1_feature
if __name__ == '__main__':
main()
|
{
"content_hash": "b03b70939ab27465354854af86185e81",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 99,
"avg_line_length": 33.89375,
"alnum_prop": 0.6186612576064908,
"repo_name": "supakorn-ras/Face-Filter",
"id": "56f65e027e232f0e40d7b73c435e1e28f474141b",
"size": "5447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Selective_Camera/Selective_Camera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16662"
}
],
"symlink_target": ""
}
|
from abc import ABC
from typing import List, Union
from py4j.java_gateway import JavaClass, JavaObject
from pyflink.java_gateway import get_gateway
class TypeInformation(ABC):
"""
TypeInformation is the core class of Flink's type system. FLink requires a type information
for all types that are used as input or return type of a user function. This type information
class acts as the tool to generate serializers and comparators, and to perform semantic checks
such as whether the fields that are used as join/grouping keys actually exist.
The type information also bridges between the programming languages object model and a logical
flat schema. It maps fields from the types to columns (fields) in a flat schema. Not all fields
from a type are mapped to a separate fields in the flat schema and often, entire types are
mapped to one field.. It is important to notice that the schema must hold for all instances of a
type. For that reason, elements in lists and arrays are not assigned to individual fields, but
the lists and arrays are considered to be one field in total, to account for different lengths
in the arrays.
a) Basic types are indivisible and are considered as a single field.
b) Arrays and collections are one field.
c) Tuples represents as many fields as the class has fields.
To represent this properly, each type has an arity (the number of fields it contains directly),
and a total number of fields (number of fields in the entire schema of this type, including
nested types).
"""
class WrapperTypeInfo(TypeInformation):
"""
A wrapper class for java TypeInformation Objects.
"""
def __init__(self, j_typeinfo):
self._j_typeinfo = j_typeinfo
def get_java_type_info(self) -> JavaObject:
return self._j_typeinfo
def __eq__(self, o) -> bool:
if type(o) is type(self):
return self._j_typeinfo.equals(o._j_typeinfo)
else:
return False
def __hash__(self) -> int:
return hash(self._j_typeinfo)
def __str__(self):
return self._j_typeinfo.toString()
class BasicTypeInfo(TypeInformation, ABC):
"""
Type information for primitive types (int, long, double, byte, ...), String, BigInteger,
and BigDecimal.
"""
@staticmethod
def STRING_TYPE_INFO():
return WrapperTypeInfo(get_gateway().jvm
.org.apache.flink.api.common.typeinfo.BasicTypeInfo.STRING_TYPE_INFO)
@staticmethod
def BOOLEAN_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.BOOLEAN_TYPE_INFO)
@staticmethod
def BYTE_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.BYTE_TYPE_INFO)
@staticmethod
def SHORT_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.SHORT_TYPE_INFO)
@staticmethod
def INT_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.INT_TYPE_INFO)
@staticmethod
def LONG_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.LONG_TYPE_INFO)
@staticmethod
def FLOAT_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.FLOAT_TYPE_INFO)
@staticmethod
def DOUBLE_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.DOUBLE_TYPE_INFO)
@staticmethod
def CHAR_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.CHAR_TYPE_INFO)
@staticmethod
def BIG_INT_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.BIG_INT_TYPE_INFO)
@staticmethod
def BIG_DEC_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.BIG_DEC_TYPE_INFO)
class SqlTimeTypeInfo(TypeInformation, ABC):
"""
SqlTimeTypeInfo enables users to get Sql Time TypeInfo.
"""
@staticmethod
def DATE():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.DATE)
@staticmethod
def TIME():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.TIME)
@staticmethod
def TIMESTAMP():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.TIMESTAMP)
class PrimitiveArrayTypeInfo(TypeInformation, ABC):
"""
A TypeInformation for arrays of primitive types (int, long, double, ...).
Supports the creation of dedicated efficient serializers for these types.
"""
@staticmethod
def BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def BYTE_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def SHORT_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def INT_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def LONG_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def FLOAT_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def CHAR_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO)
class PickledBytesTypeInfo(TypeInformation, ABC):
"""
A PickledBytesTypeInfo indicates the data is a primitive byte array generated by pickle
serializer.
"""
@staticmethod
def PICKLED_BYTE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(get_gateway().jvm.org.apache.flink.datastream.typeinfo.python
.PickledByteArrayTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO)
class RowTypeInfo(WrapperTypeInfo):
"""
TypeInformation for Row.
"""
def __init__(self, types: List[TypeInformation], field_names: List[str] = None):
self.types = types
self.field_names = field_names
self.j_types_array = get_gateway().new_array(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.TypeInformation, len(types))
for i in range(len(types)):
self.j_types_array[i] = types[i].get_java_type_info()
if field_names is None:
self._j_typeinfo = get_gateway().jvm.org.apache.flink.api.java.typeutils.RowTypeInfo(
self.j_types_array)
else:
j_names_array = get_gateway().new_array(get_gateway().jvm.java.lang.String,
len(field_names))
for i in range(len(field_names)):
j_names_array[i] = field_names[i]
self._j_typeinfo = get_gateway().jvm.org.apache.flink.api.java.typeutils.RowTypeInfo(
self.j_types_array, j_names_array)
super(RowTypeInfo, self).__init__(self._j_typeinfo)
def get_field_names(self) -> List[str]:
j_field_names = self._j_typeinfo.getFieldNames()
field_names = [name for name in j_field_names]
return field_names
def get_field_index(self, field_name: str) -> int:
return self._j_typeinfo.getFieldIndex(field_name)
def get_field_types(self) -> List[TypeInformation]:
return self.types
def __eq__(self, other) -> bool:
return self._j_typeinfo.equals(other._j_typeinfo)
def __hash__(self) -> int:
return self._j_typeinfo.hashCode()
def __str__(self) -> str:
return "RowTypeInfo(%s)" % ', '.join([field_name + ': ' + field_type.__str__()
for field_name, field_type in
zip(self.get_field_names(),
self.get_field_types())])
class TupleTypeInfo(WrapperTypeInfo):
"""
TypeInformation for Tuple.
"""
def __init__(self, types: List[TypeInformation]):
self.types = types
j_types_array = get_gateway().new_array(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.TypeInformation, len(types))
for i in range(len(types)):
j_types_array[i] = types[i].get_java_type_info()
j_typeinfo = get_gateway().jvm \
.org.apache.flink.api.java.typeutils.TupleTypeInfo(j_types_array)
super(TupleTypeInfo, self).__init__(j_typeinfo=j_typeinfo)
def get_field_types(self) -> List[TypeInformation]:
return self.types
def __eq__(self, other) -> bool:
return self._j_typeinfo.equals(other._j_typeinfo)
def __hash__(self) -> int:
return self._j_typeinfo.hashCode()
def __str__(self) -> str:
return "TupleTypeInfo(%s)" % ', '.join([field_type.__str__() for field_type in self.types])
class Types(object):
"""
This class gives access to the type information of the most common types for which Flink has
built-in serializers and comparators.
"""
STRING = BasicTypeInfo.STRING_TYPE_INFO
BYTE = BasicTypeInfo.BYTE_TYPE_INFO
BOOLEAN = BasicTypeInfo.BOOLEAN_TYPE_INFO
SHORT = BasicTypeInfo.SHORT_TYPE_INFO
INT = BasicTypeInfo.INT_TYPE_INFO
LONG = BasicTypeInfo.LONG_TYPE_INFO
FLOAT = BasicTypeInfo.FLOAT_TYPE_INFO
DOUBLE = BasicTypeInfo.DOUBLE_TYPE_INFO
CHAR = BasicTypeInfo.CHAR_TYPE_INFO
BIG_INT = BasicTypeInfo.BIG_INT_TYPE_INFO
BIG_DEC = BasicTypeInfo.BIG_DEC_TYPE_INFO
SQL_DATE = SqlTimeTypeInfo.DATE
SQL_TIME = SqlTimeTypeInfo.TIME
SQL_TIMESTAMP = SqlTimeTypeInfo.TIMESTAMP
PICKLED_BYTE_ARRAY = PickledBytesTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO
@staticmethod
def ROW(types: List[TypeInformation]):
"""
Returns type information for Row with fields of the given types. A row itself must not be
null.
:param types: the types of the row fields, e.g., Types.String(), Types.INT()
"""
return RowTypeInfo(types)
@staticmethod
def ROW_NAMED(names: List[str], types: List[TypeInformation]):
"""
Returns type information for Row with fields of the given types and with given names. A row
must not be null.
:param names: array of field names.
:param types: array of field types.
"""
return RowTypeInfo(types, names)
@staticmethod
def TUPLE(types: List[TypeInformation]):
"""
Returns type information for Tuple with fields of the given types. A Tuple itself must not
be null.
:param types: array of field types.
"""
return TupleTypeInfo(types)
@staticmethod
def PRIMITIVE_ARRAY(element_type: TypeInformation):
"""
Returns type information for arrays of primitive type (such as byte[]). The array must not
be null.
:param element_type element type of the array (e.g. Types.BOOLEAN(), Types.INT(),
Types.DOUBLE())
"""
if element_type == Types.BOOLEAN():
return PrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.BYTE():
return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.SHORT():
return PrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.INT():
return PrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.LONG():
return PrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.FLOAT():
return PrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.DOUBLE():
return PrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.CHAR():
return PrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO()
else:
raise TypeError("Invalid element type for a primitive array.")
def _from_java_type(j_type_info: JavaObject) -> TypeInformation:
gateway = get_gateway()
JBasicTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo
if _is_instance_of(j_type_info, JBasicTypeInfo.STRING_TYPE_INFO):
return Types.STRING()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BOOLEAN_TYPE_INFO):
return Types.BOOLEAN()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BYTE_TYPE_INFO):
return Types.BYTE()
elif _is_instance_of(j_type_info, JBasicTypeInfo.SHORT_TYPE_INFO):
return Types.SHORT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.INT_TYPE_INFO):
return Types.INT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.LONG_TYPE_INFO):
return Types.LONG()
elif _is_instance_of(j_type_info, JBasicTypeInfo.FLOAT_TYPE_INFO):
return Types.FLOAT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.DOUBLE_TYPE_INFO):
return Types.DOUBLE()
elif _is_instance_of(j_type_info, JBasicTypeInfo.CHAR_TYPE_INFO):
return Types.CHAR()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BIG_INT_TYPE_INFO):
return Types.BIG_INT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BIG_DEC_TYPE_INFO):
return Types.BIG_DEC()
JSqlTimeTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo
if _is_instance_of(j_type_info, JSqlTimeTypeInfo.DATE):
return Types.SQL_DATE()
elif _is_instance_of(j_type_info, JSqlTimeTypeInfo.TIME):
return Types.SQL_TIME()
elif _is_instance_of(j_type_info, JSqlTimeTypeInfo.TIMESTAMP):
return Types.SQL_TIMESTAMP()
JPrimitiveArrayTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo \
.PrimitiveArrayTypeInfo
if _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.BOOLEAN())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.BYTE())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.SHORT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.INT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.LONG())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.FLOAT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.DOUBLE())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.CHAR())
JPickledBytesTypeInfo = gateway.jvm \
.org.apache.flink.datastream.typeinfo.python.PickledByteArrayTypeInfo\
.PICKLED_BYTE_ARRAY_TYPE_INFO
if _is_instance_of(j_type_info, JPickledBytesTypeInfo):
return Types.PICKLED_BYTE_ARRAY()
JRowTypeInfo = gateway.jvm.org.apache.flink.api.java.typeutils.RowTypeInfo
if _is_instance_of(j_type_info, JRowTypeInfo):
j_row_field_names = j_type_info.getFieldNames()
j_row_field_types = j_type_info.getFieldTypes()
row_field_types = [_from_java_type(j_row_field_type) for j_row_field_type in
j_row_field_types]
return Types.ROW_NAMED(j_row_field_names, row_field_types)
JTupleTypeInfo = gateway.jvm.org.apache.flink.api.java.typeutils.TupleTypeInfo
if _is_instance_of(j_type_info, JTupleTypeInfo):
j_field_types = []
for i in range(j_type_info.getArity()):
j_field_types.append(j_type_info.getTypeAt(i))
field_types = [_from_java_type(j_field_type) for j_field_type in j_field_types]
return TupleTypeInfo(field_types)
raise TypeError("The java type info: %s is not supported in PyFlink currently." % j_type_info)
def _is_instance_of(java_object: JavaObject, java_type: Union[JavaObject, JavaClass]) -> bool:
if isinstance(java_type, JavaObject):
return java_object.equals(java_type)
elif isinstance(java_type, JavaClass):
return java_object.getClass().isAssignableFrom(java_type._java_lang_class)
return False
|
{
"content_hash": "29d0ee0acf41a2976c9059ae7c43fe60",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 100,
"avg_line_length": 39.69450549450549,
"alnum_prop": 0.6650794529649521,
"repo_name": "tzulitai/flink",
"id": "8b1da5d2564a641fb935a97deb6e1cb4898b12b7",
"size": "19020",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/common/typeinfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5718"
},
{
"name": "CSS",
"bytes": "57936"
},
{
"name": "Clojure",
"bytes": "90539"
},
{
"name": "Dockerfile",
"bytes": "10807"
},
{
"name": "FreeMarker",
"bytes": "11389"
},
{
"name": "HTML",
"bytes": "224454"
},
{
"name": "Java",
"bytes": "46348883"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "731653"
},
{
"name": "Scala",
"bytes": "12432812"
},
{
"name": "Shell",
"bytes": "463267"
},
{
"name": "TypeScript",
"bytes": "243702"
}
],
"symlink_target": ""
}
|
from openid.extensions.draft import pape5 as pape
from openid.message import *
from openid.server import server
import warnings
warnings.filterwarnings(
'ignore', module=__name__, message='"none" used as a policy URI')
import unittest
class PapeRequestTestCase(unittest.TestCase):
def setUp(self):
self.req = pape.Request()
def test_construct(self):
self.assertEqual([], self.req.preferred_auth_policies)
self.assertEqual(None, self.req.max_auth_age)
self.assertEqual('pape', self.req.ns_alias)
self.assertFalse(self.req.preferred_auth_level_types)
bogus_levels = ['http://janrain.com/our_levels']
req2 = pape.Request([pape.AUTH_MULTI_FACTOR], 1000, bogus_levels)
self.assertEqual([pape.AUTH_MULTI_FACTOR],
req2.preferred_auth_policies)
self.assertEqual(1000, req2.max_auth_age)
self.assertEqual(bogus_levels, req2.preferred_auth_level_types)
def test_addAuthLevel(self):
self.req.addAuthLevel('http://example.com/', 'example')
self.assertEqual(['http://example.com/'],
self.req.preferred_auth_level_types)
self.assertEqual('http://example.com/',
self.req.auth_level_aliases['example'])
self.req.addAuthLevel('http://example.com/1', 'example1')
self.assertEqual(['http://example.com/', 'http://example.com/1'],
self.req.preferred_auth_level_types)
self.req.addAuthLevel('http://example.com/', 'exmpl')
self.assertEqual(['http://example.com/', 'http://example.com/1'],
self.req.preferred_auth_level_types)
self.req.addAuthLevel('http://example.com/', 'example')
self.assertEqual(['http://example.com/', 'http://example.com/1'],
self.req.preferred_auth_level_types)
self.assertRaises(KeyError, self.req.addAuthLevel,
'http://example.com/2', 'example')
# alias is None; we expect a new one to be generated.
uri = 'http://another.example.com/'
self.req.addAuthLevel(uri)
self.assertTrue(uri in list(self.req.auth_level_aliases.values()))
# We don't expect a new alias to be generated if one already
# exists.
before_aliases = list(self.req.auth_level_aliases.keys())
self.req.addAuthLevel(uri)
after_aliases = list(self.req.auth_level_aliases.keys())
self.assertEqual(before_aliases, after_aliases)
def test_add_policy_uri(self):
self.assertEqual([], self.req.preferred_auth_policies)
self.req.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual([pape.AUTH_MULTI_FACTOR],
self.req.preferred_auth_policies)
self.req.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual([pape.AUTH_MULTI_FACTOR],
self.req.preferred_auth_policies)
self.req.addPolicyURI(pape.AUTH_PHISHING_RESISTANT)
self.assertEqual(
[pape.AUTH_MULTI_FACTOR,
pape.AUTH_PHISHING_RESISTANT], self.req.preferred_auth_policies)
self.req.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual(
[pape.AUTH_MULTI_FACTOR,
pape.AUTH_PHISHING_RESISTANT], self.req.preferred_auth_policies)
def test_getExtensionArgs(self):
self.assertEqual({
'preferred_auth_policies': ''
}, self.req.getExtensionArgs())
self.req.addPolicyURI('http://uri')
self.assertEqual({
'preferred_auth_policies': 'http://uri'
}, self.req.getExtensionArgs())
self.req.addPolicyURI('http://zig')
self.assertEqual({
'preferred_auth_policies': 'http://uri http://zig'
}, self.req.getExtensionArgs())
self.req.max_auth_age = 789
self.assertEqual({
'preferred_auth_policies': 'http://uri http://zig',
'max_auth_age': '789'
}, self.req.getExtensionArgs())
def test_getExtensionArgsWithAuthLevels(self):
uri = 'http://example.com/auth_level'
alias = 'my_level'
self.req.addAuthLevel(uri, alias)
uri2 = 'http://example.com/auth_level_2'
alias2 = 'my_level_2'
self.req.addAuthLevel(uri2, alias2)
expected_args = {
('auth_level.ns.%s' % alias): uri,
('auth_level.ns.%s' % alias2): uri2,
'preferred_auth_level_types': ' '.join([alias, alias2]),
'preferred_auth_policies': '',
}
self.assertEqual(expected_args, self.req.getExtensionArgs())
def test_parseExtensionArgsWithAuthLevels(self):
uri = 'http://example.com/auth_level'
alias = 'my_level'
uri2 = 'http://example.com/auth_level_2'
alias2 = 'my_level_2'
request_args = {
('auth_level.ns.%s' % alias): uri,
('auth_level.ns.%s' % alias2): uri2,
'preferred_auth_level_types': ' '.join([alias, alias2]),
'preferred_auth_policies': '',
}
# Check request object state
self.req.parseExtensionArgs(
request_args, is_openid1=False, strict=False)
expected_auth_levels = [uri, uri2]
self.assertEqual(expected_auth_levels,
self.req.preferred_auth_level_types)
self.assertEqual(uri, self.req.auth_level_aliases[alias])
self.assertEqual(uri2, self.req.auth_level_aliases[alias2])
def test_parseExtensionArgsWithAuthLevels_openID1(self):
request_args = {
'preferred_auth_level_types': 'nist jisa',
}
expected_auth_levels = [pape.LEVELS_NIST, pape.LEVELS_JISA]
self.req.parseExtensionArgs(request_args, is_openid1=True)
self.assertEqual(expected_auth_levels,
self.req.preferred_auth_level_types)
self.req = pape.Request()
self.req.parseExtensionArgs(request_args, is_openid1=False)
self.assertEqual([], self.req.preferred_auth_level_types)
self.req = pape.Request()
self.assertRaises(
ValueError,
self.req.parseExtensionArgs,
request_args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_ignoreBadAuthLevels(self):
request_args = {'preferred_auth_level_types': 'monkeys'}
self.req.parseExtensionArgs(request_args, False)
self.assertEqual([], self.req.preferred_auth_level_types)
def test_parseExtensionArgs_strictBadAuthLevels(self):
request_args = {'preferred_auth_level_types': 'monkeys'}
self.assertRaises(
ValueError,
self.req.parseExtensionArgs,
request_args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs(self):
args = {
'preferred_auth_policies': 'http://foo http://bar',
'max_auth_age': '9'
}
self.req.parseExtensionArgs(args, False)
self.assertEqual(9, self.req.max_auth_age)
self.assertEqual(['http://foo', 'http://bar'],
self.req.preferred_auth_policies)
self.assertEqual([], self.req.preferred_auth_level_types)
def test_parseExtensionArgs_strict_bad_auth_age(self):
args = {'max_auth_age': 'not an int'}
self.assertRaises(
ValueError,
self.req.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_empty(self):
self.req.parseExtensionArgs({}, False)
self.assertEqual(None, self.req.max_auth_age)
self.assertEqual([], self.req.preferred_auth_policies)
self.assertEqual([], self.req.preferred_auth_level_types)
def test_fromOpenIDRequest(self):
policy_uris = [pape.AUTH_MULTI_FACTOR, pape.AUTH_PHISHING_RESISTANT]
openid_req_msg = Message.fromOpenIDArgs({
'mode':
'checkid_setup',
'ns':
OPENID2_NS,
'ns.pape':
pape.ns_uri,
'pape.preferred_auth_policies':
' '.join(policy_uris),
'pape.max_auth_age':
'5476'
})
oid_req = server.OpenIDRequest()
oid_req.message = openid_req_msg
req = pape.Request.fromOpenIDRequest(oid_req)
self.assertEqual(policy_uris, req.preferred_auth_policies)
self.assertEqual(5476, req.max_auth_age)
def test_fromOpenIDRequest_no_pape(self):
message = Message()
openid_req = server.OpenIDRequest()
openid_req.message = message
pape_req = pape.Request.fromOpenIDRequest(openid_req)
assert (pape_req is None)
def test_preferred_types(self):
self.req.addPolicyURI(pape.AUTH_PHISHING_RESISTANT)
self.req.addPolicyURI(pape.AUTH_MULTI_FACTOR)
pt = self.req.preferredTypes(
[pape.AUTH_MULTI_FACTOR, pape.AUTH_MULTI_FACTOR_PHYSICAL])
self.assertEqual([pape.AUTH_MULTI_FACTOR], pt)
class DummySuccessResponse:
def __init__(self, message, signed_stuff):
self.message = message
self.signed_stuff = signed_stuff
def isOpenID1(self):
return False
def getSignedNS(self, ns_uri):
return self.signed_stuff
class PapeResponseTestCase(unittest.TestCase):
def setUp(self):
self.resp = pape.Response()
def test_construct(self):
self.assertEqual([], self.resp.auth_policies)
self.assertEqual(None, self.resp.auth_time)
self.assertEqual('pape', self.resp.ns_alias)
self.assertEqual(None, self.resp.nist_auth_level)
req2 = pape.Response([pape.AUTH_MULTI_FACTOR], "2004-12-11T10:30:44Z",
{pape.LEVELS_NIST: 3})
self.assertEqual([pape.AUTH_MULTI_FACTOR], req2.auth_policies)
self.assertEqual("2004-12-11T10:30:44Z", req2.auth_time)
self.assertEqual(3, req2.nist_auth_level)
def test_add_policy_uri(self):
self.assertEqual([], self.resp.auth_policies)
self.resp.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual([pape.AUTH_MULTI_FACTOR], self.resp.auth_policies)
self.resp.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual([pape.AUTH_MULTI_FACTOR], self.resp.auth_policies)
self.resp.addPolicyURI(pape.AUTH_PHISHING_RESISTANT)
self.assertEqual(
[pape.AUTH_MULTI_FACTOR,
pape.AUTH_PHISHING_RESISTANT], self.resp.auth_policies)
self.resp.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual(
[pape.AUTH_MULTI_FACTOR,
pape.AUTH_PHISHING_RESISTANT], self.resp.auth_policies)
self.assertRaises(RuntimeError, self.resp.addPolicyURI, pape.AUTH_NONE)
def test_getExtensionArgs(self):
self.assertEqual({
'auth_policies': pape.AUTH_NONE
}, self.resp.getExtensionArgs())
self.resp.addPolicyURI('http://uri')
self.assertEqual({
'auth_policies': 'http://uri'
}, self.resp.getExtensionArgs())
self.resp.addPolicyURI('http://zig')
self.assertEqual({
'auth_policies': 'http://uri http://zig'
}, self.resp.getExtensionArgs())
self.resp.auth_time = "1776-07-04T14:43:12Z"
self.assertEqual({
'auth_policies': 'http://uri http://zig',
'auth_time': "1776-07-04T14:43:12Z"
}, self.resp.getExtensionArgs())
self.resp.setAuthLevel(pape.LEVELS_NIST, '3')
self.assertEqual({
'auth_policies': 'http://uri http://zig',
'auth_time': "1776-07-04T14:43:12Z",
'auth_level.nist': '3',
'auth_level.ns.nist': pape.LEVELS_NIST
}, self.resp.getExtensionArgs())
def test_getExtensionArgs_error_auth_age(self):
self.resp.auth_time = "long ago"
self.assertRaises(ValueError, self.resp.getExtensionArgs)
def test_parseExtensionArgs(self):
args = {
'auth_policies': 'http://foo http://bar',
'auth_time': '1970-01-01T00:00:00Z'
}
self.resp.parseExtensionArgs(args, is_openid1=False)
self.assertEqual('1970-01-01T00:00:00Z', self.resp.auth_time)
self.assertEqual(['http://foo', 'http://bar'], self.resp.auth_policies)
def test_parseExtensionArgs_valid_none(self):
args = {'auth_policies': pape.AUTH_NONE}
self.resp.parseExtensionArgs(args, is_openid1=False)
self.assertEqual([], self.resp.auth_policies)
def test_parseExtensionArgs_old_none(self):
args = {'auth_policies': 'none'}
self.resp.parseExtensionArgs(args, is_openid1=False)
self.assertEqual([], self.resp.auth_policies)
def test_parseExtensionArgs_old_none_strict(self):
args = {'auth_policies': 'none'}
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_empty(self):
self.resp.parseExtensionArgs({}, is_openid1=False)
self.assertEqual(None, self.resp.auth_time)
self.assertEqual([], self.resp.auth_policies)
def test_parseExtensionArgs_empty_strict(self):
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs, {},
is_openid1=False,
strict=True)
def test_parseExtensionArgs_ignore_superfluous_none(self):
policies = [pape.AUTH_NONE, pape.AUTH_MULTI_FACTOR_PHYSICAL]
args = {
'auth_policies': ' '.join(policies),
}
self.resp.parseExtensionArgs(args, is_openid1=False, strict=False)
self.assertEqual([pape.AUTH_MULTI_FACTOR_PHYSICAL],
self.resp.auth_policies)
def test_parseExtensionArgs_none_strict(self):
policies = [pape.AUTH_NONE, pape.AUTH_MULTI_FACTOR_PHYSICAL]
args = {
'auth_policies': ' '.join(policies),
}
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_strict_bogus1(self):
args = {
'auth_policies': 'http://foo http://bar',
'auth_time': 'yesterday'
}
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_openid1_strict(self):
args = {
'auth_level.nist': '0',
'auth_policies': pape.AUTH_NONE,
}
self.resp.parseExtensionArgs(args, strict=True, is_openid1=True)
self.assertEqual('0', self.resp.getAuthLevel(pape.LEVELS_NIST))
self.assertEqual([], self.resp.auth_policies)
def test_parseExtensionArgs_strict_no_namespace_decl_openid2(self):
# Test the case where the namespace is not declared for an
# auth level.
args = {
'auth_policies': pape.AUTH_NONE,
'auth_level.nist': '0',
}
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_nostrict_no_namespace_decl_openid2(self):
# Test the case where the namespace is not declared for an
# auth level.
args = {
'auth_policies': pape.AUTH_NONE,
'auth_level.nist': '0',
}
self.resp.parseExtensionArgs(args, is_openid1=False, strict=False)
# There is no namespace declaration for this auth level.
self.assertRaises(KeyError, self.resp.getAuthLevel, pape.LEVELS_NIST)
def test_parseExtensionArgs_strict_good(self):
args = {
'auth_policies': 'http://foo http://bar',
'auth_time': '1970-01-01T00:00:00Z',
'auth_level.nist': '0',
'auth_level.ns.nist': pape.LEVELS_NIST
}
self.resp.parseExtensionArgs(args, is_openid1=False, strict=True)
self.assertEqual(['http://foo', 'http://bar'], self.resp.auth_policies)
self.assertEqual('1970-01-01T00:00:00Z', self.resp.auth_time)
self.assertEqual(0, self.resp.nist_auth_level)
def test_parseExtensionArgs_nostrict_bogus(self):
args = {
'auth_policies': 'http://foo http://bar',
'auth_time': 'when the cows come home',
'nist_auth_level': 'some'
}
self.resp.parseExtensionArgs(args, is_openid1=False)
self.assertEqual(['http://foo', 'http://bar'], self.resp.auth_policies)
self.assertEqual(None, self.resp.auth_time)
self.assertEqual(None, self.resp.nist_auth_level)
def test_fromSuccessResponse(self):
policy_uris = [pape.AUTH_MULTI_FACTOR, pape.AUTH_PHISHING_RESISTANT]
openid_req_msg = Message.fromOpenIDArgs({
'mode':
'id_res',
'ns':
OPENID2_NS,
'ns.pape':
pape.ns_uri,
'pape.auth_policies':
' '.join(policy_uris),
'pape.auth_time':
'1970-01-01T00:00:00Z'
})
signed_stuff = {
'auth_policies': ' '.join(policy_uris),
'auth_time': '1970-01-01T00:00:00Z'
}
oid_req = DummySuccessResponse(openid_req_msg, signed_stuff)
req = pape.Response.fromSuccessResponse(oid_req)
self.assertEqual(policy_uris, req.auth_policies)
self.assertEqual('1970-01-01T00:00:00Z', req.auth_time)
def test_fromSuccessResponseNoSignedArgs(self):
policy_uris = [pape.AUTH_MULTI_FACTOR, pape.AUTH_PHISHING_RESISTANT]
openid_req_msg = Message.fromOpenIDArgs({
'mode':
'id_res',
'ns':
OPENID2_NS,
'ns.pape':
pape.ns_uri,
'pape.auth_policies':
' '.join(policy_uris),
'pape.auth_time':
'1970-01-01T00:00:00Z'
})
signed_stuff = {}
class NoSigningDummyResponse(DummySuccessResponse):
def getSignedNS(self, ns_uri):
return None
oid_req = NoSigningDummyResponse(openid_req_msg, signed_stuff)
resp = pape.Response.fromSuccessResponse(oid_req)
self.assertTrue(resp is None)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c595a8383d85eadb1f3be7c6ca1a18aa",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 79,
"avg_line_length": 37.36418511066398,
"alnum_prop": 0.5965535810446957,
"repo_name": "necaris/python3-openid",
"id": "0d3905c23eb526cd4fee0226a1138ba09fddd082",
"size": "18570",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "openid/test/test_pape_draft5.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2892"
},
{
"name": "Makefile",
"bytes": "885"
},
{
"name": "Python",
"bytes": "902717"
}
],
"symlink_target": ""
}
|
"""
tests for hep_ml.speedup module
"""
from __future__ import division, print_function, absolute_import
import numpy
import pandas
import time
from hep_ml.speedup import LookupClassifier
from hep_ml.commonutils import generate_sample
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import roc_auc_score
from collections import OrderedDict
from nose.tools import raises
__author__ = 'Alex Rogozhnikov'
def test_lookup(n_samples=10000, n_features=7, n_bins=8):
X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=0.6)
base_estimator = GradientBoostingClassifier()
clf = LookupClassifier(base_estimator=base_estimator, n_bins=n_bins, keep_trained_estimator=True).fit(X, y)
p = clf.predict_proba(X)
assert roc_auc_score(y, p[:, 1]) > 0.8, 'quality of classification is too low'
assert p.shape == (n_samples, 2)
assert numpy.allclose(p.sum(axis=1), 1), 'probabilities are not summed up to 1'
# checking conversions
lookup_size = n_bins ** n_features
lookup_indices = numpy.arange(lookup_size, dtype=int)
bins_indices = clf.convert_lookup_index_to_bins(lookup_indices=lookup_indices)
lookup_indices2 = clf.convert_bins_to_lookup_index(bins_indices=bins_indices)
assert numpy.allclose(lookup_indices, lookup_indices2), 'something wrong with conversions'
assert len(clf._lookup_table) == n_bins ** n_features, 'wrong size of lookup table'
# checking speed
X = pandas.concat([X] * 10)
start = time.time()
p1 = clf.trained_estimator.predict_proba(clf.transform(X))
time_old = time.time() - start
start = time.time()
p2 = clf.predict_proba(X)
time_new = time.time() - start
print(time_old, ' now takes ', time_new)
assert numpy.allclose(p1, p2), "pipeline doesn't work as expected"
def test_sizes(n_samples=10000, n_features=4, n_bins=8):
X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=0.6)
base_estimator = GradientBoostingClassifier(n_estimators=1)
clf = LookupClassifier(base_estimator=base_estimator, n_bins=n_bins).fit(X, y)
bin_indices = clf.transform(X)
assert numpy.allclose(numpy.max(bin_indices, axis=0) + 1, n_bins)
maximals = OrderedDict()
for column in X.columns:
maximals[column] = numpy.random.randint(low=n_bins // 2, high=n_bins)
clf = LookupClassifier(base_estimator=base_estimator, n_bins=maximals).fit(X, y)
bin_indices = clf.transform(X)
assert numpy.allclose(numpy.max(bin_indices, axis=0) + 1, list(maximals.values()))
assert numpy.allclose(numpy.min(bin_indices, axis=0), 0)
@raises(ValueError)
def test_raising_exception():
X, y = generate_sample(n_samples=100, n_features=10)
LookupClassifier(GradientBoostingClassifier(), n_bins=16).fit(X, y)
def test_classifier_with_dataframe():
try:
from rep.estimators import SklearnClassifier
clf = SklearnClassifier(GradientBoostingClassifier(n_estimators=1))
X, y = generate_sample(n_samples=100, n_features=4)
for X_ in [X, pandas.DataFrame(X)]:
lookup = LookupClassifier(clf, n_bins=16).fit(X_, y)
lookup.predict_proba(X)
except ImportError:
print('expected fail: yandex/rep not installed')
|
{
"content_hash": "68e3a920dccf8ed84e0d80c22cfa4d82",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 111,
"avg_line_length": 39.023809523809526,
"alnum_prop": 0.7028676021964613,
"repo_name": "iamfullofspam/hep_ml",
"id": "82270d253a4ffce9ba7454d2c14504a7bcf5c707",
"size": "3278",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_speedup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "760999"
},
{
"name": "Python",
"bytes": "230453"
},
{
"name": "Shell",
"bytes": "599"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import matplotlib
from matplotlib.axes import Axes
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import NullLocator, Formatter, FixedLocator
from matplotlib.transforms import Affine2D, BboxTransformTo, Transform
from matplotlib.projections import register_projection
import matplotlib.spines as mspines
import matplotlib.axis as maxis
import numpy as np
# This example projection class is rather long, but it is designed to
# illustrate many features, not all of which will be used every time.
# It is also common to factor out a lot of these methods into common
# code used by a number of projections with similar characteristics
# (see geo.py).
class HammerAxes(Axes):
"""
A custom class for the Aitoff-Hammer projection, an equal-area map
projection.
http://en.wikipedia.org/wiki/Hammer_projection
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='custom_hammer')``.
name = 'custom_hammer'
def __init__(self, *args, **kwargs):
Axes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Do not register xaxis or yaxis with spines -- as done in
# Axes._init_axis() -- until HammerAxes.xaxis.cla() works.
# self.spines['hammer'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
"""
Override to set up some reasonable defaults.
"""
# Don't forget to call the base class
Axes.cla(self)
# Set up a default grid spacing
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
# Turn off minor ticking altogether
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
# Do not display ticks -- we only want gridlines and text
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
# The limits on this projection are fixed -- they are not to
# be changed by the user. This makes the math in the
# transformation itself easier, and since this is a toy
# example, the easier, the better.
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space (in this case longitude and latitude) to axes
# space. It is separated into a non-affine and affine part so
# that the non-affine part does not have to be recomputed when
# a simple affine change to the figure has been made (such as
# resizing the window or changing the dpi).
# 1) The core transformation from data space into
# rectilinear space defined in the HammerTransform class.
self.transProjection = self.HammerTransform()
# 2) The above has an output range that is not in the unit
# rectangle, so scale and translate it so it fits correctly
# within the axes. The peculiar calculations of xscale and
# yscale are specific to a Aitoff-Hammer projection, so don't
# worry about them too much.
xscale = 2.0 * np.sqrt(2.0) * np.sin(0.5 * np.pi)
yscale = np.sqrt(2.0) * np.sin(0.5 * np.pi)
self.transAffine = Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# The main data transformation is set up. Now deal with
# gridlines and tick labels.
# Longitude gridlines and ticklabels. The input to these
# transforms are in display space in x and axes space in y.
# Therefore, the input values will be in range (-xmin, 0),
# (xmax, 1). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the equator.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, np.pi) \
.translate(0.0, -np.pi)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# Now set up the transforms for the latitude ticks. The input to
# these transforms are in axes space in x and display space in
# y. Therefore, the input values will be in range (0, -ymin),
# (1, ymax). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the edge of the axes ellipse.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def get_xaxis_transform(self,which='grid'):
"""
Override this method to provide a transformation for the
x-axis grid and ticks.
"""
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self,which='grid'):
"""
Override this method to provide a transformation for the
y-axis grid and ticks.
"""
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
"""
Override this method to define the shape that is used for the
background of the plot. It should be a subclass of Patch.
In this case, it is a Circle (that may be warped by the axes
transform into an ellipse). Any data and gridlines will be
clipped to this shape.
"""
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'custom_hammer':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
# Prevent the user from changing the axes limits. In our case, we
# want to display the whole sphere all the time, so we override
# set_xlim and set_ylim to ignore any input. This also applies to
# interactive panning and zooming in the GUI interfaces.
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, lon, lat):
"""
Override this method to change how the values are displayed in
the status bar.
In this case, we want them to be displayed in degrees N/S/E/W.
"""
lon = lon * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
# \u00b0 : degree symbol
return '%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(lon), ew)
class DegreeFormatter(Formatter):
"""
This is a custom formatter that converts the native unit of
radians into (truncated) degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
# \u00b0 : degree symbol
return "%d\u00b0" % degrees
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface to set the
ticking than set_xticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
plt.FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.xaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface than
set_yticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.yaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
Often, in geographic projections, you wouldn't want to draw
longitude gridlines near the poles. This allows the user to
specify the degree at which to stop drawing longitude grids.
This is an example method that is specific to this projection
class -- it provides an interface to something that has no
analogy in the base Axes class.
"""
longitude_cap = degrees * (np.pi / 180.0)
# Change the xaxis gridlines transform so that it draws from
# -degrees to degrees, rather than -pi to pi.
self._xaxis_pretransform \
.clear() \
.scale(1.0, longitude_cap * 2.0) \
.translate(0.0, -longitude_cap)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
# Interactive panning and zooming is not supported with this projection,
# so we override all of the following methods to disable it.
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
# Now, the transforms themselves.
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_non_affine(self, ll):
"""
Override the transform_non_affine method to implement the custom
transform.
The input and output are Nx2 numpy arrays.
"""
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path_non_affine(self, path):
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = \
Transform.transform_path_non_affine.__doc__
if matplotlib.__version__ < '1.2':
# Note: For compatibility with matplotlib v1.1 and older, you'll
# need to explicitly implement a ``transform`` method as well.
# Otherwise a ``NotImplementedError`` will be raised. This isn't
# necessary for v1.2 and newer, however.
transform = transform_non_affine
# Similarly, we need to explicitly override ``transform_path`` if
# compatibility with older matplotlib versions is needed. With v1.2
# and newer, only overriding the ``transform_path_non_affine``
# method is sufficient.
transform_path = transform_path_non_affine
transform_path.__doc__ = Transform.transform_path.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform()
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
# As before, we need to implement the "transform" method for
# compatibility with matplotlib v1.1 and older.
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
def inverted(self):
# The inverse of the inverse is the original transform... ;)
return HammerAxes.HammerTransform()
inverted.__doc__ = Transform.inverted.__doc__
# Now register the projection with matplotlib so the user can select
# it.
register_projection(HammerAxes)
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Now make a simple example using the custom projection.
plt.subplot(111, projection="custom_hammer")
p = plt.plot([-1, 1, 1], [-1, -1, 1], "o-")
plt.grid(True)
plt.show()
|
{
"content_hash": "ad02d985ea9906a04b23920d2c1eb831",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 81,
"avg_line_length": 38.09185803757829,
"alnum_prop": 0.5964594979721582,
"repo_name": "RobertABT/heightmap",
"id": "31f55f6b523df162c89c97167ab5401460944d0e",
"size": "18246",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "build/matplotlib/examples/api/custom_projection_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25165856"
},
{
"name": "C++",
"bytes": "5251754"
},
{
"name": "CSS",
"bytes": "17123"
},
{
"name": "FORTRAN",
"bytes": "6353469"
},
{
"name": "JavaScript",
"bytes": "816504"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "284551"
},
{
"name": "Python",
"bytes": "13223936"
},
{
"name": "TeX",
"bytes": "37261"
}
],
"symlink_target": ""
}
|
import numpy as np
from mnist import MNIST
from classification_base import MNIST_PATH
def prep_binary_classes(y, digit=2):
return np.array([int(x==digit) for x in y])
def mnist_training(shuffled=True):
mndata = MNIST(MNIST_PATH)
train_ims, train_labels = mndata.load_training()
train_X = np.array(train_ims)
train_y = np.array(train_labels)
if shuffled:
return shuffle(train_X, train_y)
else:
return train_X, train_y
def mnist_testing(shuffled = True):
mndata = MNIST(MNIST_PATH)
test_ims, test_labels = mndata.load_testing()
test_X = np.array(test_ims)
test_y = np.array(test_labels)
if shuffled:
return shuffle(test_X, test_y)
else:
return test_X, test_y
def mnist_training_binary(num, shuffled=True):
X, y = mnist_training()
if shuffled:
X, y = shuffle(X, y)
return X, np.array([int(yi==num) for yi in y])
def mnist_testing_binary(num):
X, y = mnist_testing()
return X, np.array([int(yi==num) for yi in y])
def shuffle(X, y):
shuffler = np.arange(len(y))
np.random.shuffle(shuffler)
X = X[shuffler, :]
y = y[shuffler]
return X, y
def shuffle_train_and_test(X1, y1, X2, y2):
print('Warning: shuffling train and test should not be used for work '
'that is to be submitted.')
np.random.seed(12345)
all_X = np.concatenate((X1, X2))
all_y = np.concatenate((y1, y2))
X, y = shuffle(all_X, all_y)
X1_shuffled = X[0:X1.shape[0], :]
y1_shuffled = y[0:X1.shape[0]]
X2_shuffled = X[X1.shape[0]:, :]
y2_shuffled = y[X1.shape[0]:]
return X1_shuffled, y1_shuffled, X2_shuffled, y2_shuffled
def binary_shuffled(num):
X1, y1 = mnist_training_binary(num)
X2, y2 = mnist_testing_binary(num)
return shuffle_train_and_test(X1, y1, X2, y2)
|
{
"content_hash": "fde18abd5679cecc6d54dc5edcc373e1",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 74,
"avg_line_length": 28.546875,
"alnum_prop": 0.6305418719211823,
"repo_name": "JanetMatsen/Machine_Learning_CSE_546",
"id": "37099fc9e3acb7f640fedb4e56957a8334b9dc6f",
"size": "1827",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "HW3/code/mnist_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3022206"
},
{
"name": "Python",
"bytes": "240573"
}
],
"symlink_target": ""
}
|
"""A alternate implementation of the persistent dict found in
http://erezsh.wordpress.com/2009/05/24/filedict-a-persistent-dictionary-in-python/
"""
import sqlite3, UserDict, pickle
def key(k):
return hash(k), pickle.dumps(k)
class persistentDict(UserDict.DictMixin):
def __init__(self, filetable, d=None, **kwarg):
if isinstance(filetable, tuple):
filename, self._table = filetable
else:
filename = filetable
self._table = 'dict'
self._db = sqlite3.connect(filename)
self._db.execute('create table if not exists %s (hash integer, key blob, value blob);' % self._table)
self._db.execute('create index if not exists %s_index ON %s(hash);' % (self._table, self._table))
self._db.commit()
self.update(d, **kwarg)
def __getitem__(self, k):
v = self._db.execute('select value from %s where hash=? and key=?;' % self._table, key(k)).fetchone()
if v:
return pickle.loads(str(v[0]))
raise KeyError, k
def _setitem(self, (hkey, pkey), pval):
if self._contains((hkey, pkey)):
self._db.execute('update %s set value=? where hash=? and key=?' % self._table, (pval, hkey, pkey))
else:
self._db.execute('insert into %s values (?,?,?)' % self._table, (hkey, pkey, pval))
def __setitem__(self, k, v):
self._setitem(key(k), pickle.dumps(v))
self._db.commit()
def __delitem__(self, k):
if self._db.execute('delete from %s where hash=? and key=?;' % self._table, key(k)).rowcount <=0:
raise KeyError, k
self._db.commit()
def _contains(self, k):
res, = self._db.execute('select count(*) from %s where hash=? and key=?;' % self._table, k)
return res[0]>0
def __contains__(self, k):
return self._contains(key(k))
def __iter__(self):
for k, in self._db.execute('select key from '+self._table):
yield pickle.loads(str(k))
def keys(self):
return list(iter(self))
def insert(self, seq):
for k,v in seq:
self._setitem(key(k), pickle.dumps(v))
self._db.commit()
if __name__ == '__main__':
d = persistentDict((r'c:\bortest\db.dat', 'new'), k1=1)
d['k2'] = 1
d['k2'] = 2
try:
del d['k3']
except KeyError:
print 'OK'
print d.keys()
del d['k1']
d.insert((i, str(i)) for i in range(10000))
try:
print d['k4']
except KeyError:
print 'OK'
|
{
"content_hash": "919a48855d264e172055aa9c8b7f4bd3",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 110,
"avg_line_length": 36.47826086956522,
"alnum_prop": 0.5613825983313468,
"repo_name": "zepheira/zenpub",
"id": "763228244a1612141076875e15439f31c631b2f1",
"size": "2517",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/persistentdict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "467"
},
{
"name": "Perl",
"bytes": "3117"
},
{
"name": "Python",
"bytes": "376842"
},
{
"name": "Shell",
"bytes": "402"
}
],
"symlink_target": ""
}
|
import unittest
from latex2mathml import aggregator
__author__ = "Ronie Martinez"
__copyright__ = "Copyright 2016-2017, Ronie Martinez"
__credits__ = ["Ronie Martinez"]
__license__ = "MIT"
__maintainer__ = "Ronie Martinez"
__email__ = "ronmarti18@gmail.com"
__status__ = "Development"
class AggregatorTest(unittest.TestCase):
def test_single_group(self):
self.assertListEqual([['a']], aggregator.aggregate('{a}'))
def test_multiple_groups(self):
self.assertListEqual([['a'], ['b']], aggregator.aggregate('{a}{b}'))
def test_inner_group(self):
self.assertListEqual([['a', '+', ['b']]], aggregator.aggregate('{a+{b}}'))
def test_subscript(self):
self.assertListEqual(['_', 'a', 'b'], aggregator.aggregate('a_b'))
def test_superscript(self):
self.assertListEqual(['^', 'a', 'b'], aggregator.aggregate('a^b'))
def test_subscript_and_superscript(self):
self.assertListEqual(['_^', 'a', 'b', 'c'], aggregator.aggregate('a_b^c'))
def test_root(self):
self.assertListEqual([r'\root', ['2'], ['3']], aggregator.aggregate(r'\sqrt[3]{2}'))
def test_matrix(self):
self.assertListEqual([r'\matrix', [['a', 'b'], ['c', 'd']]],
list(aggregator.aggregate(r'\begin{matrix}a & b \\ c & d \end{matrix}')))
def test_matrix_with_alignment(self):
self.assertListEqual([r'\matrix*', 'r', [['a', 'b'], ['c', 'd']]],
list(aggregator.aggregate(r'\begin{matrix*}[r]a & b \\ c & d \end{matrix*}')))
def test_matrix_with_negative_sign(self):
self.assertListEqual([r'\matrix', [[['-', 'a'], 'b'], ['c', 'd']]],
list(aggregator.aggregate(r'\begin{matrix}-a & b \\ c & d \end{matrix}')))
def test_complex_matrix(self):
self.assertListEqual(['\\matrix', [['_', 'a', ['1'], '_', 'b', ['2']], ['_', 'c', ['3'], '_', 'd', ['4']]]],
list(aggregator.aggregate(r'\begin{matrix}a_{1} & b_{2} \\ c_{3} & d_{4} \end{matrix}')))
def test_simple_array(self):
self.assertListEqual([r'\array', 'cc', [['1', '2'], ['3', '4']]],
list(aggregator.aggregate(r'\begin{array}{cc} 1 & 2 \\ 3 & 4 \end{array}''')))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
{
"content_hash": "a124535f2f501bca4eb99ad55a83458c",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 118,
"avg_line_length": 40.75438596491228,
"alnum_prop": 0.5393887214808437,
"repo_name": "TU-Berlin/mathosphere",
"id": "f827c62b35be45bbc683f3a630e5f3862c44bce0",
"size": "2345",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pomlp/lib/latex2mathml/tests/aggregator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "392362"
},
{
"name": "Java",
"bytes": "804830"
},
{
"name": "Scala",
"bytes": "4939"
},
{
"name": "TeX",
"bytes": "89"
},
{
"name": "XSLT",
"bytes": "67046"
}
],
"symlink_target": ""
}
|
from random import randint
board = []
for x in range(0, 5):
board.append(["O"] * 5)
def print_board(board):
for row in board:
print " ".join(row)
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
guess_row = int(raw_input("Guess Row:"))
guess_col = int(raw_input("Guess Col:"))
print ship_row
print ship_col
if guess_row == ship_row and guess_col == ship_col:
print "Congratulations! You sank my battleship!"
else:
if (guess_row not in range(5) or guess_col not in range(5)):
print "Oops, that's not even in the ocean."
elif (board[guess_row][guess_col] == "X"):
print "You guessed that one already."
else:
print "You missed my battleship!"
#board[guess_row][guess_col] = "X"
print_board(board)
|
{
"content_hash": "420345b009f213a906069a84714ab513",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 68,
"avg_line_length": 21.104166666666668,
"alnum_prop": 0.5784797630799605,
"repo_name": "abhisekkumar/python",
"id": "517e2176fcd7a9bcc1fb9e203196a758ea0fbb26",
"size": "1013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "battleship.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2180"
}
],
"symlink_target": ""
}
|
import typing as t
from marshmallow import EXCLUDE, ValidationError, fields
from marshmallow_enum import EnumField
from marshmallow_oneofschema import OneOfSchema
from indico.core.db.sqlalchemy.links import LinkType
from indico.core.marshmallow import mm
from indico.modules.attachments.models.attachments import AttachmentType
from indico.modules.attachments.models.folders import AttachmentFolder
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.models.events import Event, EventType
from indico.modules.events.notes.models.notes import EventNote
from indico.modules.search.base import SearchTarget
from indico.web.flask.util import url_for
class _ResultSchemaBase(mm.Schema):
class Meta:
unknown = EXCLUDE
class CategoryPathSchema(_ResultSchemaBase):
id = fields.Int(required=True)
title = fields.String(required=True)
url = fields.Method('_get_url')
type = fields.Constant('category')
def _get_url(self, data):
return url_for('categories.display', category_id=data['id'])
class PersonSchema(_ResultSchemaBase):
#: The person's name
name = fields.String(required=True)
#: The person's affiliation
affiliation = fields.String(missing=None)
class HighlightSchema(_ResultSchemaBase):
#: The field's content to highlight
content = fields.List(fields.String())
#: The field's description to highlight
description = fields.List(fields.String())
class ResultSchemaBase(_ResultSchemaBase):
#: The parent category chain
category_path: CategoryPathSchema = fields.List(fields.Nested(CategoryPathSchema), required=True)
def require_search_target(target):
def validate(value):
if value != target:
raise ValidationError(f'type must be {target}, got {value}')
return validate
class CategoryResultSchema(ResultSchemaBase):
type = EnumField(SearchTarget, validate=require_search_target(SearchTarget.category))
category_id = fields.Int(required=True)
title = fields.String(required=True)
url = fields.Method('_get_url')
def _get_url(self, data):
return url_for('categories.display', category_id=data['category_id'])
class LocationResultSchema(mm.Schema):
#: The venue name
venue_name = fields.String(required=True)
#: The room name
room_name = fields.String(required=True)
#: The address
address = fields.String(required=True)
class EventResultSchema(ResultSchemaBase):
#: The record type
type: SearchTarget = EnumField(SearchTarget, validate=require_search_target(SearchTarget.event))
#: The event id
event_id = fields.Int(required=True)
#: The event title
title = fields.String(required=True)
#: The event description
description = fields.String(required=True)
#: The event type
event_type = EnumField(EventType, required=True)
#: The event start date time
start_dt = fields.DateTime(required=True)
#: The event end date time
end_dt = fields.DateTime(required=True)
#: The event associated persons
persons: PersonSchema = fields.List(fields.Nested(PersonSchema), required=True)
#: The event location
location: LocationResultSchema = fields.Nested(LocationResultSchema, required=True)
#: The event content to highlight
highlight = fields.Nested(HighlightSchema, missing={})
# extra fields that are not taken from the data returned by the search engine
url = fields.Method('_get_url')
def _get_url(self, data):
return url_for('events.display', event_id=data['event_id'])
class ContributionResultSchema(ResultSchemaBase):
#: The record type
type: SearchTarget = EnumField(SearchTarget, validate=require_search_target(SearchTarget.contribution))
#: The contribution id
contribution_id = fields.Int(required=True)
#: The contribution event id
event_id = fields.Int(required=True)
#: The contribution title
title = fields.String(required=True)
#: The contribution description
description = fields.String(required=True)
#: The contribution start date time
start_dt = fields.DateTime(missing=None)
#: The contribution end date time
end_dt = fields.DateTime(missing=None)
#: The contribution associated persons
persons: PersonSchema = fields.List(fields.Nested(PersonSchema), required=True)
#: The contribution location
location: LocationResultSchema = fields.Nested(LocationResultSchema, required=True)
#: The contribution duration
duration = fields.TimeDelta(precision=fields.TimeDelta.MINUTES)
#: The contribution content to highlight
highlight = fields.Nested(HighlightSchema, missing={})
# extra fields that are not taken from the data returned by the search engine
url = fields.Method('_get_url')
event_path = fields.Method('_get_event_path', dump_only=True)
def _get_url(self, data):
return url_for('contributions.display_contribution', event_id=data['event_id'],
contrib_id=data['contribution_id'])
def _get_event_path(self, data):
if not (event := Event.get(data['event_id'])):
return []
return [
{'type': 'event', 'id': event.id, 'title': event.title, 'url': event.url}
]
class SubContributionResultSchema(ContributionResultSchema):
#: The record type
type: SearchTarget = EnumField(SearchTarget, validate=require_search_target(SearchTarget.subcontribution))
#: The sub-contribution id
subcontribution_id = fields.Int(required=True)
def _get_url(self, data):
return url_for('contributions.display_subcontribution', event_id=data['event_id'],
contrib_id=data['contribution_id'], subcontrib_id=data['subcontribution_id'])
def _get_event_path(self, data):
if not (contrib := Contribution.get(data['contribution_id'])):
return []
contrib_url = url_for('contributions.display_contribution', contrib)
return [
{'type': 'event', 'id': contrib.event.id, 'title': contrib.event.title, 'url': contrib.event.url},
{'type': 'contribution', 'id': contrib.id, 'title': contrib.title, 'url': contrib_url},
]
def _get_event_path(obj):
path = [{'type': 'event', 'id': obj.event.id, 'title': obj.event.title, 'url': obj.event.url}]
if obj.link_type == LinkType.contribution:
contrib = obj.contribution
contrib_url = url_for('contributions.display_contribution', contrib)
path.append({'type': 'contribution', 'id': contrib.id, 'title': contrib.title, 'url': contrib_url})
elif obj.link_type == LinkType.subcontribution:
subcontrib = obj.subcontribution
subcontrib_url = url_for('contributions.display_subcontribution', subcontrib)
contrib = subcontrib.contribution
contrib_url = url_for('contributions.display_contribution', contrib)
path += [
{'type': 'contribution', 'id': contrib.id, 'title': contrib.title, 'url': contrib_url},
{'type': 'subcontribution', 'id': subcontrib.id, 'title': subcontrib.title, 'url': subcontrib_url},
]
return path
class AttachmentResultSchema(ResultSchemaBase):
#: The record type
type: SearchTarget = EnumField(SearchTarget, validate=require_search_target(SearchTarget.attachment))
#: The attachment id
attachment_id = fields.Int(required=True)
#: The attachment folder id
folder_id = fields.Int(required=True)
#: The attachment event id
event_id = fields.Int(required=True)
#: The attachment contribution id
contribution_id = fields.Int(missing=None)
#: The attachment sub-contribution id
subcontribution_id = fields.Int(missing=None)
#: The attachment title
title = fields.String(required=True)
#: The attachment filename
filename = fields.String(missing=None)
#: The attachment author
user: PersonSchema = fields.Nested(PersonSchema, missing=None)
#: The attachment type
attachment_type: AttachmentType = EnumField(AttachmentType, required=True)
#: The attachment last modified date time
modified_dt = fields.DateTime(required=True)
# extra fields that are not taken from the data returned by the search engine
url = fields.Method('_get_url')
event_path = fields.Method('_get_event_path', dump_only=True)
def _get_url(self, data):
return url_for('attachments.download', event_id=data['event_id'],
contrib_id=data['contribution_id'], subcontrib_id=data['subcontribution_id'],
folder_id=data['folder_id'], attachment_id=data['attachment_id'],
filename=(data['filename'] or 'go'))
def _get_event_path(self, data):
if not (folder := AttachmentFolder.get(data['folder_id'])):
return []
return _get_event_path(folder)
class EventNoteResultSchema(ResultSchemaBase):
#: The record type
type: SearchTarget = EnumField(SearchTarget, validate=require_search_target(SearchTarget.event_note))
#: The note id
note_id = fields.Int(required=True)
#: The note event id
event_id = fields.Int(required=True)
#: The note contribution id
contribution_id = fields.Int(missing=None)
#: The note sub-contribution id
subcontribution_id = fields.Int(missing=None)
#: The note title
title = fields.String(required=True)
#: The note author
user: PersonSchema = fields.Nested(PersonSchema, missing=None)
#: The note last modification date time
modified_dt = fields.DateTime(required=True)
#: The note content
content = fields.String(required=True)
#: The note content to highlight
highlight: HighlightSchema = fields.Nested(HighlightSchema, missing={})
# extra fields that are not taken from the data returned by the search engine
url = fields.Method('_get_url')
event_path = fields.Method('_get_event_path', dump_only=True)
def _get_url(self, data):
return url_for('event_notes.goto', event_id=data['event_id'], note_id=data['note_id'])
def _get_event_path(self, data):
if not (note := EventNote.get(data['note_id'])):
return []
return _get_event_path(note)
class BucketSchema(_ResultSchemaBase):
"""Represents an individual aggregation bucket element."""
#: The aggregation key.
key: str = fields.String(required=True)
#: The number of elements.
count: int = fields.Int(required=True)
#: The key that identifies the element's filter.
filter: str = fields.String(required=True)
class AggregationSchema(_ResultSchemaBase):
"""Represents an aggregation list."""
#: The name of the aggregation.
label: str = fields.String(required=True)
#: A bucket list representing each group.
buckets: t.List[BucketSchema] = fields.List(fields.Nested(BucketSchema), required=True)
class ResultItemSchema(OneOfSchema):
type_field = 'type'
type_field_remove = False
type_schemas = {
SearchTarget.category.name: CategoryResultSchema,
SearchTarget.event.name: EventResultSchema,
SearchTarget.contribution.name: ContributionResultSchema,
SearchTarget.subcontribution.name: SubContributionResultSchema,
SearchTarget.attachment.name: AttachmentResultSchema,
SearchTarget.event_note.name: EventNoteResultSchema,
}
class Meta:
# OneOfSchema passes the own schema's `unknown` value to the target schemas
unknown = EXCLUDE
def get_obj_type(self, obj):
return obj['type'].name
def _dump(self, obj, *, update_fields=True, **kwargs):
rv = super()._dump(obj, update_fields=update_fields, **kwargs)
if isinstance(rv, tuple):
# https://github.com/marshmallow-code/marshmallow-oneofschema/issues/48
raise ValidationError(rv[1]['_schema'])
return rv
class PageNavSchema(_ResultSchemaBase):
prev = fields.Int(required=True, allow_none=True)
next = fields.Int(required=True, allow_none=True)
class ResultSchema(_ResultSchemaBase):
total = fields.Int(required=True)
pages = fields.Int(missing=None)
pagenav = fields.Nested(PageNavSchema, missing=None)
results = fields.List(fields.Nested(ResultItemSchema), required=True)
aggregations = fields.Dict(fields.String(), fields.Nested(AggregationSchema), missing={})
|
{
"content_hash": "1e58c759febe55c1ba55cfaf174799e8",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 111,
"avg_line_length": 39.56369426751592,
"alnum_prop": 0.6928278193673025,
"repo_name": "ThiefMaster/indico",
"id": "b964b78342d87ea9d13e320f3488f63fad7d8d89",
"size": "12637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/search/result_schemas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import warnings
import sys
import traceback
import inspect
import pickle
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_travis
from sklearn.base import (clone, ClusterMixin, ClassifierMixin)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if "n_iter" in params:
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_regressors_classifiers_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_transformer(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if name == "KernelPCA":
transformer.remove_zero_eig = False
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
def check_transformer_sparse_data(name, Transformer):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
if name in ['Scaler', 'StandardScaler']:
transformer = Transformer(with_mean=False)
else:
transformer = Transformer()
set_fast_parameters(transformer)
# fit
try:
transformer.fit(X, y)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
if issubclass(Estimator, ClusterMixin):
estimator.fit(X_train)
else:
estimator.fit(X_train, y)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
if issubclass(Estimator, ClusterMixin):
# All estimators except clustering algorithm
# support fitting with (optional) y
estimator.fit(X_train_finite)
else:
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if not 'class' in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.85)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict:
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict:
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
classifier.fit(X, y[:, np.newaxis])
assert_equal(len(w), 1)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_class_weight_classifiers(name, Classifier):
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto),
f1_score(y_test, y_pred))
def check_class_weight_auto_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='auto')
coef_auto = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
mean_weight = (1. / 3 + 1. / 2) / 2
class_weight = {
1: 1. / 3 / mean_weight,
-1: 1. / 2 / mean_weight,
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_auto, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if name == 'MiniBatchDictLearning' or name == 'MiniBatchSparsePCA':
# FIXME
# for MiniBatchDictLearning and MiniBatchSparsePCA
estimator.batch_size = 1
set_fast_parameters(estimator)
set_random_state(estimator)
params = estimator.get_params()
estimator.fit(X, y)
new_params = estimator.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_cluster_overwrite_params(name, Clustering):
X, y = make_blobs(random_state=0, n_samples=9)
with warnings.catch_warnings(record=True):
# catch deprecation warnings
clustering = Clustering()
set_fast_parameters(clustering)
params = clustering.get_params()
clustering.fit(X)
new_params = clustering.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_sparsify_multiclass_classifier(name, Classifier):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Classifier()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_sparsify_binary_classifier(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator, multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
|
{
"content_hash": "fcb06db75f09ff37e0ba34e2518a777b",
"timestamp": "",
"source": "github",
"line_count": 998,
"max_line_length": 81,
"avg_line_length": 35.763527054108216,
"alnum_prop": 0.6023478650678024,
"repo_name": "abhishekgahlot/scikit-learn",
"id": "1552fe4e8492c18d8382b3f8055f325b0c95f126",
"size": "35692",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sklearn/utils/estimator_checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18219273"
},
{
"name": "C++",
"bytes": "1808975"
},
{
"name": "JavaScript",
"bytes": "22298"
},
{
"name": "Makefile",
"bytes": "4901"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5549217"
},
{
"name": "Shell",
"bytes": "8730"
}
],
"symlink_target": ""
}
|
"""Support for Somfy hubs."""
from abc import abstractmethod
import asyncio
from datetime import timedelta
import logging
from pymfy.api.devices.category import Category
import voluptuous as vol
from homeassistant.components.somfy import config_flow
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import callback
from homeassistant.helpers import (
config_entry_oauth2_flow,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from . import api
from .const import API, CONF_OPTIMISTIC, COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=1)
SCAN_INTERVAL_ALL_ASSUMED_STATE = timedelta(minutes=60)
SOMFY_AUTH_CALLBACK_PATH = "/auth/somfy/callback"
SOMFY_AUTH_START = "/auth/somfy"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Inclusive(CONF_CLIENT_ID, "oauth"): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, "oauth"): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SOMFY_COMPONENTS = ["cover", "switch"]
async def async_setup(hass, config):
"""Set up the Somfy component."""
hass.data[DOMAIN] = {}
domain_config = config.get(DOMAIN, {})
hass.data[DOMAIN][CONF_OPTIMISTIC] = domain_config.get(CONF_OPTIMISTIC, False)
if CONF_CLIENT_ID in domain_config:
config_flow.SomfyFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
"https://accounts.somfy.com/oauth/oauth/v2/auth",
"https://accounts.somfy.com/oauth/oauth/v2/token",
),
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up Somfy from a config entry."""
# Backwards compat
if "auth_implementation" not in entry.data:
hass.config_entries.async_update_entry(
entry, data={**entry.data, "auth_implementation": DOMAIN}
)
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
data = hass.data[DOMAIN]
data[API] = api.ConfigEntrySomfyApi(hass, entry, implementation)
async def _update_all_devices():
"""Update all the devices."""
devices = await hass.async_add_executor_job(data[API].get_devices)
return {dev.id: dev for dev in devices}
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="somfy device update",
update_method=_update_all_devices,
update_interval=SCAN_INTERVAL,
)
data[COORDINATOR] = coordinator
await coordinator.async_refresh()
if all(not bool(device.states) for device in coordinator.data.values()):
_LOGGER.debug(
"All devices have assumed state. Update interval has been reduced to: %s",
SCAN_INTERVAL_ALL_ASSUMED_STATE,
)
coordinator.update_interval = SCAN_INTERVAL_ALL_ASSUMED_STATE
device_registry = await dr.async_get_registry(hass)
hubs = [
device
for device in coordinator.data.values()
if Category.HUB.value in device.categories
]
for hub in hubs:
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, hub.id)},
manufacturer="Somfy",
name=hub.name,
model=hub.type,
)
for component in SOMFY_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
hass.data[DOMAIN].pop(API, None)
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in SOMFY_COMPONENTS
]
)
return True
class SomfyEntity(CoordinatorEntity, Entity):
"""Representation of a generic Somfy device."""
def __init__(self, coordinator, device_id, somfy_api):
"""Initialize the Somfy device."""
super().__init__(coordinator)
self._id = device_id
self.api = somfy_api
@property
def device(self):
"""Return data for the device id."""
return self.coordinator.data[self._id]
@property
def unique_id(self):
"""Return the unique id base on the id returned by Somfy."""
return self._id
@property
def name(self):
"""Return the name of the device."""
return self.device.name
@property
def device_info(self):
"""Return device specific attributes.
Implemented by platform classes.
"""
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"model": self.device.type,
"via_hub": (DOMAIN, self.device.parent_id),
# For the moment, Somfy only returns their own device.
"manufacturer": "Somfy",
}
def has_capability(self, capability):
"""Test if device has a capability."""
capabilities = self.device.capabilities
return bool([c for c in capabilities if c.name == capability])
@property
def assumed_state(self):
"""Return if the device has an assumed state."""
return not bool(self.device.states)
@callback
def _handle_coordinator_update(self):
"""Process an update from the coordinator."""
self._create_device()
super()._handle_coordinator_update()
@abstractmethod
def _create_device(self):
"""Update the device with the latest data."""
|
{
"content_hash": "85bc726f37c39497e3df25f5d61363a0",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 86,
"avg_line_length": 29.91866028708134,
"alnum_prop": 0.6321765552534784,
"repo_name": "soldag/home-assistant",
"id": "99b0a2ee56420d56075d6fcafb41a71376575c17",
"size": "6253",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/somfy/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19025087"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from xml.etree import ElementTree
import requests
"""
In order to use the patched get :
1 - import this module and set XML_DICT to your own XML dict.
The expected format is { "$entity_uri_1" : "$entity_xml_1",
"$entity_uri_2" : "$entity_xml_2",
...}
2 - Set up a test case and use the Mock's library path function to patch "genologics.lims.Lims.get" with this module's "patched get"
This will replace http calls to your lims by the XML you prepared. You can find an example of this in tests/test_example.py.
"""
XML_DICT = {}
def patched_get(*args, **kwargs):
params=None
if 'uri' in kwargs:
uri=kwargs['uri']
else:
for arg in args:
if isinstance(arg, str) or isinstance(arg, unicode):
uri = arg
if 'params' in kwargs:
params=kwargs['params']
else:
for arg in args:
if isinstance(arg, dict):
params = arg
r = requests.Request(method='GET', url=uri, params=params)
r = r.prepare()
if not XML_DICT:
raise Exception("You need to update genologics.test_utils.XML_DICT before using this function")
try:
return ElementTree.fromstring(XML_DICT[r.url])
except KeyError:
raise Exception("Cannot find mocked xml for uri {0}".format(r.url))
def dump_source_xml(lims):
"""After using a LIMS object, using this method on it will dump all the cached XML in a serialized dictionnary form,
to be used with patched_get"""
final_string = []
final_string.append('{')
for k, v in lims.cache.iteritems():
final_string.append("'{0}':".format(k))
v.get()
final_string.append('"""{0}""",'.format(v.xml().replace('\n', "\n")))
final_string.append('}')
return '\n'.join(final_string)
|
{
"content_hash": "1d0714aaa11c222251e921aaa3feb273",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 132,
"avg_line_length": 33.472727272727276,
"alnum_prop": 0.6056491037479631,
"repo_name": "Galithil/genologics",
"id": "93f79aec052ec747e28a402cf1bbe6295aae04b8",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genologics/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167303"
}
],
"symlink_target": ""
}
|
from PyObjCTools.TestSupport import *
from AppKit import *
class TestNSGlyphGeneratorHelper (NSObject):
def insertGlyphs_length_forStartingGlyphAtIndex_characterIndex_(self, glyphs, length, glyphIndex, charIndex):
self.glyphs = (glyphs, length, glyphIndex, charIndex)
def setIntAttribute_value_forGlyphAtIndex_(self, a, v, g): pass
class TestNSGlyphGenerator (TestCase):
def testConstants(self):
self.assertEqual(NSShowControlGlyphs, (1 << 0))
self.assertEqual(NSShowInvisibleGlyphs, (1 << 1))
self.assertEqual(NSWantsBidiLevels, (1 << 2))
def testProtocols(self):
self.assertArgHasType(TestNSGlyphGeneratorHelper.setIntAttribute_value_forGlyphAtIndex_, 0, objc._C_NSInteger)
self.assertArgHasType(TestNSGlyphGeneratorHelper.setIntAttribute_value_forGlyphAtIndex_, 1, objc._C_NSInteger)
self.assertArgHasType(TestNSGlyphGeneratorHelper.setIntAttribute_value_forGlyphAtIndex_, 2, objc._C_NSUInteger)
o = TestNSGlyphGeneratorHelper.alloc().init()
o.insertGlyphs_length_forStartingGlyphAtIndex_characterIndex_(
[0, 1, 2, 3, 4], 5, 3, 8)
self.assertEqual(o.glyphs, ([0, 1, 2, 3, 4], 5, 3, 8))
self.assertArgHasType(
TestNSGlyphGeneratorHelper.insertGlyphs_length_forStartingGlyphAtIndex_characterIndex_,
0, b'n^I')
self.assertArgSizeInArg(
TestNSGlyphGeneratorHelper.insertGlyphs_length_forStartingGlyphAtIndex_characterIndex_,
0, 1)
self.assertArgHasType(
TestNSGlyphGeneratorHelper.insertGlyphs_length_forStartingGlyphAtIndex_characterIndex_,
1, objc._C_NSUInteger)
self.assertArgHasType(
TestNSGlyphGeneratorHelper.insertGlyphs_length_forStartingGlyphAtIndex_characterIndex_,
2, objc._C_NSUInteger)
self.assertArgHasType(
TestNSGlyphGeneratorHelper.insertGlyphs_length_forStartingGlyphAtIndex_characterIndex_,
3, objc._C_NSUInteger)
def testMethods(self):
self.assertArgIsOut(
NSGlyphGenerator.generateGlyphsForGlyphStorage_desiredNumberOfCharacters_glyphIndex_characterIndex_, 2)
self.assertArgIsOut(
NSGlyphGenerator.generateGlyphsForGlyphStorage_desiredNumberOfCharacters_glyphIndex_characterIndex_, 3)
if __name__ == "__main__":
main()
|
{
"content_hash": "adadabae98364fc454cdc1cb9c856016",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 119,
"avg_line_length": 47.52,
"alnum_prop": 0.7108585858585859,
"repo_name": "albertz/music-player",
"id": "d4e1ff41a05bdfac4a583d58eab17632e8173e29",
"size": "2377",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsglyphgenerator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "435926"
},
{
"name": "C++",
"bytes": "149133"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "914432"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "M",
"bytes": "10808"
},
{
"name": "Makefile",
"bytes": "13304"
},
{
"name": "Mathematica",
"bytes": "61418"
},
{
"name": "Objective-C",
"bytes": "2082720"
},
{
"name": "Objective-C++",
"bytes": "62427"
},
{
"name": "PostScript",
"bytes": "2783"
},
{
"name": "Prolog",
"bytes": "217"
},
{
"name": "Python",
"bytes": "7789845"
},
{
"name": "QMake",
"bytes": "9667"
},
{
"name": "Roff",
"bytes": "8329"
},
{
"name": "Shell",
"bytes": "3521"
}
],
"symlink_target": ""
}
|
command += info_command ("--stats ../common/textures/grid.tx", info_program="iinfo")
|
{
"content_hash": "738e0aaff51afc3be0a39658c062f2a1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 84,
"avg_line_length": 85,
"alnum_prop": 0.6941176470588235,
"repo_name": "OpenImageIO/oiio",
"id": "32436fea16c78af18a0357af3abee58984428805",
"size": "108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testsuite/iinfo/run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "131728"
},
{
"name": "C++",
"bytes": "6651069"
},
{
"name": "CMake",
"bytes": "218101"
},
{
"name": "Makefile",
"bytes": "18697"
},
{
"name": "POV-Ray SDL",
"bytes": "5056106"
},
{
"name": "Python",
"bytes": "269004"
},
{
"name": "Shell",
"bytes": "56909"
}
],
"symlink_target": ""
}
|
"""
The flask application package.
"""
from flask import Flask
app = Flask(__name__)
app.debug = False
import lvsys.views
|
{
"content_hash": "0caa608f25ccd679d042e65b28561c30",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 30,
"avg_line_length": 13.777777777777779,
"alnum_prop": 0.7016129032258065,
"repo_name": "sun1991/lvsys",
"id": "400a3c857a804a7885a7636c79631480c950177c",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lvsys/lvsys/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "891"
},
{
"name": "C",
"bytes": "438222"
},
{
"name": "C++",
"bytes": "21416"
},
{
"name": "CSS",
"bytes": "7443"
},
{
"name": "HTML",
"bytes": "7979"
},
{
"name": "JavaScript",
"bytes": "219940"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "4812011"
}
],
"symlink_target": ""
}
|
from django.conf import settings
def ssl_media(request):
if request.is_secure():
ssl_media_url = settings.MEDIA_URL.replace('http://','https://')
else:
ssl_media_url = settings.MEDIA_URL
return {'MEDIA_URL': ssl_media_url}
|
{
"content_hash": "93e3003378eabf364bfd1a12f969edfe",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 28.11111111111111,
"alnum_prop": 0.6403162055335968,
"repo_name": "bueda/django-comrade",
"id": "39227f6020b1c2f3a6e293b72e0c6f172f8391a2",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comrade/http/context_processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83667"
}
],
"symlink_target": ""
}
|
"""
This module uses and extends scholar.py to scrape results
from google scholar to determine the authors, citations,
and title of a paper given a list of possible titles.
"""
#from difflib import get_close_matches
#get_close_matches(word, possibilities[, n][, cutoff])
from scholar import ScholarQuerier
from user_agents import USER_AGENTS
from jellyfish import levenshtein_distance as str_metric
from time import sleep
import random
def find_article(canidates):
"""
A function that attempts to find a good match
from scholar.py for a set of canidate paper
titles, it returns the best result.
"""
canidate_scores = [5000 for dummy in canidates]
canidate_best_match = ['' for dummy in canidates]
querier = ScholarQuerier()
delay = 0
for ii, canidate in enumerate(canidates):
sleep(delay)
querier.UA = random.choice(USER_AGENTS)
querier.query(canidate)
for art in querier.articles:
title = art['title'].encode('ascii', 'ignore')
score = str_metric(canidate,
title)/max(len(title),len(canidate))
if score < canidate_scores[ii]:
canidate_scores[ii] = score
canidate_best_match[ii] = art
print '----------------------'
print 'Canidate: '+canidate
print 'Match: '+ \
canidate_best_match[ii]['title'].encode('ascii', 'ignore')
print 'Score '+str(canidate_scores[ii])
querier.clear_articles()
delay = max(random.gauss(30, 30), 5)
def main():
"""
Main function, used to test module.
"""
test_canidates = ['Polymer 45 (2004) 573579',
'www.elsevier.com/locate/polymer',
'Thermal denaturation and folding rates of single domain proteins:',
'size matters']
find_article(test_canidates)
if __name__ == "__main__":
main()
|
{
"content_hash": "e734b593b3b3ebb91a8865d3de8e6ddc",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 33.228070175438596,
"alnum_prop": 0.6246040126715945,
"repo_name": "ftalex/citation_graph",
"id": "a2a6bc5b8393cb08317da880ae0e78a0448c99c1",
"size": "1917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scholar_get.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25988"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from .views import (
HomeView,
TournamentListView,
TournamentDetailView,
TournamentCreateView,
TournamentResultsView,
)
urlpatterns = patterns(
'',
# urls for Tournament
url(
r'^$',
HomeView.as_view(),
name='core_home_view'),
url(
r'^tournament/$',
TournamentListView.as_view(),
name='core_tournament_list'
),
url(
r'^tournament/create/$',
TournamentCreateView.as_view(),
name='core_tournament_create'
),
url(
r'^tournament/(?P<pk>\d+)/results/$',
TournamentResultsView.as_view(),
name='core_tournament_results'
),
url(
r'^tournament/(?P<pk>\d+)/$',
TournamentDetailView.as_view(),
name='core_tournament_detail'
),
)
|
{
"content_hash": "ce7c03bf6559b7d2163284a70eecf9f6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 45,
"avg_line_length": 22.83783783783784,
"alnum_prop": 0.570414201183432,
"repo_name": "Axelrod-Python/DjAxelrod",
"id": "05a80816966fcb553b7e618eee82e324e80e080d",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "835"
},
{
"name": "HTML",
"bytes": "5875"
},
{
"name": "JavaScript",
"bytes": "16318"
},
{
"name": "Python",
"bytes": "20613"
},
{
"name": "Shell",
"bytes": "4668"
}
],
"symlink_target": ""
}
|
"""Tests of tests/tools.py"""
import glob
import os
import unittest
from .tools import renumber_svg_ids, canonicalize_svg, SvgTest
class RenumberTest(unittest.TestCase):
"""Test the renumber_svg_ids function."""
def test_it(self):
self.assertEqual(
renumber_svg_ids("<svg id='id10' id='id20'>"),
"<svg id='newid0' id='newid1'>"
)
self.assertEqual(
renumber_svg_ids("<svg id='id10' also='#id10'>"),
"<svg id='newid0' also='#newid0'>"
)
self.assertEqual(
renumber_svg_ids("<svg id='id10' butnotid='id10'>"),
"<svg id='newid0' butnotid='id10'>"
)
class CanonicalizeTest(unittest.TestCase):
"""Test the canonicalize_svg function."""
def test_it(self):
self.assertEqual(
canonicalize_svg("<svg></svg>"),
canonicalize_svg("<svg>\n</svg>")
)
self.assertEqual(
canonicalize_svg("<svg>\n </svg>"),
canonicalize_svg("<svg>\n</svg>")
)
class SvgTestTest(SvgTest):
"""Test the custom methods in SvgTest."""
def test_success(self):
# assert_same_svg uses renumbering, so equivalent but not identical
# SVG will pass the assert.
self.assert_good_svg("<svg id='id10'><x id='id20' y='#id10'/></svg>")
def test_failure(self):
# assert_good_svg will get write failures to files for examination.
# The rewriting includes the new numbering, and also some newlines for
# making the SVG more readable.
# There should be no result files stored for this test.
self.assertEqual(glob.glob(self.result_file_name("*", ".*")), [])
# Write a bogus "ok" file, so the test will fail.
ok_out_filename = self.result_file_name("ok", ".out")
self.addCleanup(os.remove, ok_out_filename)
with open(ok_out_filename, "w") as ok_out:
ok_out.write(
"<svg id='id99'><x id='id98' y='#id97'/></svg>\n"
)
# Now assert_good_svg will raise an AssertionError.
with self.assertRaises(AssertionError):
self.assert_good_svg(
"<svg id='id10'><x id='id20' y='#id10'/></svg>",
)
# assert_good_svg has written an out file and two html files.
xx_out_filename = self.result_file_name("xx", ".out")
self.addCleanup(os.remove, xx_out_filename)
with open(xx_out_filename) as xx_out:
self.assertIn(
"<svg id='newid0'>\n<x id='newid1' y='#newid0'/>\n</svg>",
xx_out.read()
)
ok_html_filename = self.result_file_name("ok", ".html")
self.addCleanup(os.remove, ok_html_filename)
with open(ok_html_filename) as ok_html:
self.assertIn(
"<svg id='newid0'>\n<x id='newid1' y='#newid2'/>\n</svg>",
ok_html.read()
)
xx_html_filename = self.result_file_name("xx", ".html")
self.addCleanup(os.remove, xx_html_filename)
with open(xx_html_filename) as xx_html:
self.assertIn(
"<svg id='newid0'>\n<x id='newid1' y='#newid0'/>\n</svg>",
xx_html.read()
)
|
{
"content_hash": "3fd756b838faa9e14b812c6075869b69",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 34.357894736842105,
"alnum_prop": 0.5514705882352942,
"repo_name": "nedbat/cupid",
"id": "941c14d8ebb383ca8e48974f97a4f9d38ef703cb",
"size": "3264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_test_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Python",
"bytes": "54899"
}
],
"symlink_target": ""
}
|
"""Module for testing constraints involving the bind server command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestBindServerConstraints(TestBrokerCommand):
def testrejectdelunittest02(self):
command = "del host --hostname unittest00.one-nyp.ms.com"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out,
"Cannot delete host unittest00.one-nyp.ms.com due "
"to the following dependencies:",
command)
self.matchoutput(out,
"unittest00.one-nyp.ms.com is bound as a server for "
"service utsvc instance utsi1",
command)
self.matchoutput(out,
"unittest00.one-nyp.ms.com is bound as a server for "
"service utsvc instance utsi2",
command)
# Test that unittest00 comes out of utsi1 but stays in utsi2
def testunbindutsi1unittest00(self):
self.noouttest(["unbind", "server",
"--hostname", "unittest00.one-nyp.ms.com",
"--service", "utsvc", "--instance", "utsi1"])
def testverifycatutsi1(self):
command = "cat --service utsvc --instance utsi1"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"structure template servicedata/utsvc/utsi1/config;",
command)
self.matchoutput(out, 'include { "servicedata/utsvc/config" };',
command)
self.matchoutput(out, '"instance" = "utsi1";', command)
self.searchoutput(out,
r'"servers" = list\(\s*"unittest02.one-nyp.ms.com"\s*\);',
command)
def testverifyunbindutsi1(self):
command = "show service --service utsvc --instance utsi1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Server: unittest02.one-nyp.ms.com", command)
self.matchclean(out, "Server: unittest00.one-nyp.ms.com", command)
def testverifycatutsi2(self):
command = "cat --service utsvc --instance utsi2"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"structure template servicedata/utsvc/utsi2/config;",
command)
self.matchoutput(out, 'include { "servicedata/utsvc/config" };',
command)
self.matchoutput(out, '"instance" = "utsi2";', command)
self.searchoutput(out,
r'"servers" = list\(\s*"unittest00.one-nyp.ms.com"\s*\);',
command)
def testverifyutsi2(self):
command = "show service --service utsvc --instance utsi2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Server: unittest00.one-nyp.ms.com", command)
self.matchclean(out, "Server: unittest02.one-nyp.ms.com", command)
def testrejectdelserviceinstance(self):
command = "del service --service utsvc --instance utsi2"
self.badrequesttest(command.split(" "))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestBindServerConstraints)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "993ed614957a100b29cded88a2512f4d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 84,
"avg_line_length": 42.28395061728395,
"alnum_prop": 0.5783941605839416,
"repo_name": "stdweird/aquilon",
"id": "ac09f86ad042a4d0fdc257b19af038d2bdfb09a1",
"size": "4158",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/broker/test_constraints_bind_server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
}
|
import os, time
import shutil
import json
from tinydb import TinyDB, Query
proj_dir = str(cfg.__project_dir__)
img_dir = "01. Plates\\"
class Transfer():
def __init__(self, proj_dir, local_dir):
self.shot_file_db = TinyDB('C:\\Users\\IanHartman\\Basket-App\\basket\\assets\\shot_file_db.json')
self.proj_json = 'C:\\Users\\IanHartman\\Basket-App\\basket\\assets\\data.json'
self.proj_dir = proj_dir
self.local_dir = local_dir
self.shot_files = []
def splitFilePath(self, path):
basedir, filename = os.path.split(path)
filename_noext, ext = os.path.splitext(filename)
filename_init_digits = filename_noext.rsplit('.')[len(filename_noext.rsplit('.')) - 1]
filename_end_digits = filename_init_digits
return [basedir, filename, filename_noext, ext, filename_init_digits, filename_end_digits]
def encodeSequence(self, filepath):
basedir, filename = os.path.split(filepath)
filename_noext, ext = os.path.splitext(filename)
filename_init_digits = filename_noext.rsplit('.')[len(filename_noext.rsplit('.')) - 1]
filename_end_digits = filename_init_digits
from string import digits
if isinstance(filepath, bytes):
digits = digits.encode()
filename_nodigits = filename_noext.rstrip(digits)
# Equal length would be a result of a file that didn't have a trailing ####
if len(filename_nodigits) == len(filename_noext):
return []
# List all files in the directory given for the initial file
files = os.listdir(basedir)
seq_files = []
# Iterate through directory files
for file in files:
# Look for files that match the pattern of the initial file
if file.startswith(filename_nodigits) and file.endswith(ext) and file[len(filename_nodigits):-len(ext) if ext else -1].isdigit():
# Split the input file into necessary parts
file_noext, file_ext = os.path.splitext(file)
file_digits = str(file_noext.rsplit('.')[len(file_noext.rsplit('.')) -1])
# Iterate through the sequence and count up how many files there are
if int(file_digits) > int(filename_end_digits):
filename_end_digits = file_digits
else:
filename_end_digits = filename_init_digits
# Add the file to the list
seq_files.append(file)
# Build a stamp to represent the entire sequence in the DB
seq_stamp = [basedir, filename_nodigits + ("#" * len(str(filename_end_digits))) + ext + "_FR" + "[{}-{}]".format(str(int(filename_init_digits)), str(filename_end_digits))]
return seq_stamp
def resolveSequence(self, stamp):
if stamp.find("_FR[") != -1:
seq_file_base = stamp.split("_FR[")
all_seq_files = []
seq_file_range = seq_file_base[1].rstrip(']')
seq_file_start = seq_file_range.split('-')[0]
seq_file_end = seq_file_range.split('-')[1]
seq_file_pad = seq_file_base[0].count('#')
i = int(seq_file_start)
while (i <= int(seq_file_end)):
seq_file_fullname = str(seq_file_base[0]).replace('#'*seq_file_pad, '0'*(seq_file_pad - len(str(i))) + str(i))
all_seq_files.append(seq_file_fullname)
i = i + 1
return all_seq_files
else:
print("Normal File")
def getShotFiles(self):
with open(self.proj_json) as proj_data:
shot_data = json.load(proj_data)['shots']
for script in shot_data[str(cfg.__shot_num__)]['scripts']:
self.shot_files.append(script)
for seq in shot_data[str(cfg.__shot_num__)]['sequences']:
seq_files = self.resolveSequence(seq)
for seq_file in seq_files:
self.shot_files.append(seq_file)
def ignoreFiles(self, folder, files):
ignore_list = []
for file in files:
full_path = os.path.join(folder, file)
if not os.path.isdir(full_path) and file not in self.shot_files:
print(file)
ignore_list.append(file)
return ignore_list
def createLocalCopy(self):
try:
shutil.copytree(self.proj_dir, self.local_dir, ignore=self.ignoreFiles)
except OSError as err:
return err.args[0]
test = Transfer(str(cfg.__project_dir__), str(cfg.__local_dir__))
test.getShotFiles()
test.createLocalCopy()
|
{
"content_hash": "93d5b2f87734d90b0f4b60ec1279c180",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 179,
"avg_line_length": 38.099173553719005,
"alnum_prop": 0.586767895878525,
"repo_name": "Hartman-/Basket",
"id": "0eec4144b3388d26ae06dcc5b51fae1986f25c0d",
"size": "4633",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "basket/utils/filetransfer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2870"
},
{
"name": "Python",
"bytes": "140569"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
}
|
from linkedlist import SinglyLinkedListNode
def reverseList(head):
tail=None
last=None
tempNode = head
while tempNode is not None:
currentNode, tempNode = tempNode, tempNode.next
currentNode.next = tail
tail = currentNode
return tail
def reverseListKNode(head,k):
tempHead= None
tempTail= None
while head is not None:
tempNode = head
last = None
tk=k
while tempNode is not None and tk > 0:
currentNode,nextNode = tempNode,tempNode.next
currentNode.next = last
last=currentNode
tempNode = nextNode
tk-=1
if tempHead is not None:
tempTail.next = last
head.next = nextNode
else:
tempHead = last
head.next= nextNode
tempTail = head
head=nextNode
return tempHead
def printLinkedList(head):
while head is not None:
print head.data,
head=head.next
print ''
def createList(list):
lastNode=None
head=None
for i in list:
node= SinglyLinkedListNode(i)
if lastNode == None:
lastNode = node
head = node
else:
lastNode.next = node
lastNode=node
return head
a=(i for i in xrange(1,11))
list = createList(a)
printLinkedList(list)
newList=reverseListKNode(list,2)
printLinkedList(newList)
|
{
"content_hash": "0df8079759330364690acd847ea2a986",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 57,
"avg_line_length": 15.989010989010989,
"alnum_prop": 0.5773195876288659,
"repo_name": "pankajanand18/python-tests",
"id": "0ffd900e1ae4970fd82520a76c55f4af9a9dd99d",
"size": "1457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linkedlists/reverserlist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42385"
}
],
"symlink_target": ""
}
|
import re
from setuptools import setup
import os.path
version = re.search("__version__ = '([^']+)'", open(
os.path.join(os.path.dirname(__file__), 'multifeedexporter.py')
).read().strip()).group(1)
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
setup(
name='scrapy-multifeedexporter',
version=version,
py_modules=['multifeedexporter'],
license=open(os.path.join(here,'LICENSE')).readline().strip(),
description='Export scraped items of different types to multiple feeds.',
long_description=README,
author='Gabriel Birke',
author_email='gb@birke-software.de',
url='http://github.com/gbirke/scrapy-multifeedexporter',
keywords="scrapy crawl scraping",
platforms = ['Any'],
install_requires = ['scrapy>=0.23'],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: No Input/Output (Daemon)',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Scrapy'
]
)
|
{
"content_hash": "4fef157f4b053cf44ad949b8f8487ad1",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 32.2,
"alnum_prop": 0.6335403726708074,
"repo_name": "gbirke/scrapy-multifeedexporter",
"id": "65ef070ef41c3d331fa96f88d1627771d20c9ac1",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3880"
}
],
"symlink_target": ""
}
|
from pandac.PandaModules import *
from toontown.toonbase import ToontownGlobals
import Playground
from toontown.launcher import DownloadForceAcknowledge
from toontown.building import Elevator
from toontown.toontowngui import TTDialog
from toontown.toonbase import TTLocalizer
from toontown.racing import RaceGlobals
from direct.fsm import State
from toontown.safezone import PicnicBasket
from toontown.safezone import GolfKart
from direct.task.Task import Task
class OZPlayground(Playground.Playground):
waterLevel = -0.53
def __init__(self, loader, parentFSM, doneEvent):
Playground.Playground.__init__(self, loader, parentFSM, doneEvent)
self.parentFSM = parentFSM
self.picnicBasketBlockDoneEvent = 'picnicBasketBlockDone'
self.cameraSubmerged = -1
self.toonSubmerged = -1
self.fsm.addState(State.State('picnicBasketBlock', self.enterPicnicBasketBlock, self.exitPicnicBasketBlock, ['walk']))
state = self.fsm.getStateNamed('walk')
state.addTransition('picnicBasketBlock')
self.picnicBasketDoneEvent = 'picnicBasketDone'
def load(self):
Playground.Playground.load(self)
def unload(self):
Playground.Playground.unload(self)
def enter(self, requestStatus):
Playground.Playground.enter(self, requestStatus)
def exit(self):
Playground.Playground.exit(self)
taskMgr.remove('oz-check-toon-underwater')
taskMgr.remove('oz-check-cam-underwater')
self.loader.hood.setNoFog()
def doRequestLeave(self, requestStatus):
self.fsm.request('trialerFA', [requestStatus])
def enterDFA(self, requestStatus):
doneEvent = 'dfaDoneEvent'
self.accept(doneEvent, self.enterDFACallback, [requestStatus])
self.dfa = DownloadForceAcknowledge.DownloadForceAcknowledge(doneEvent)
if requestStatus['hoodId'] == ToontownGlobals.MyEstate:
self.dfa.enter(base.cr.hoodMgr.getPhaseFromHood(ToontownGlobals.MyEstate))
else:
self.dfa.enter(5)
def enterStart(self):
self.cameraSubmerged = 0
self.toonSubmerged = 0
taskMgr.add(self.__checkToonUnderwater, 'oz-check-toon-underwater')
taskMgr.add(self.__checkCameraUnderwater, 'oz-check-cam-underwater')
def __checkCameraUnderwater(self, task):
if base.camera.getZ(render) < self.waterLevel:
self.__submergeCamera()
else:
self.__emergeCamera()
return Task.cont
def __checkToonUnderwater(self, task):
if base.localAvatar.getZ() < -4.0:
self.__submergeToon()
else:
self.__emergeToon()
return Task.cont
def __submergeCamera(self):
if self.cameraSubmerged == 1:
return
self.loader.hood.setUnderwaterFog()
base.playSfx(self.loader.underwaterSound, looping=1, volume=0.8)
self.cameraSubmerged = 1
self.walkStateData.setSwimSoundAudible(1)
def __emergeCamera(self):
if self.cameraSubmerged == 0:
return
self.loader.hood.setNoFog()
self.loader.underwaterSound.stop()
self.cameraSubmerged = 0
self.walkStateData.setSwimSoundAudible(0)
def __submergeToon(self):
if self.toonSubmerged == 1:
return
base.playSfx(self.loader.submergeSound)
if base.config.GetBool('disable-flying-glitch') == 0:
self.fsm.request('walk')
self.walkStateData.fsm.request('swimming', [self.loader.swimSound])
pos = base.localAvatar.getPos(render)
base.localAvatar.d_playSplashEffect(pos[0], pos[1], self.waterLevel)
self.toonSubmerged = 1
def __emergeToon(self):
if self.toonSubmerged == 0:
return
self.walkStateData.fsm.request('walking')
self.toonSubmerged = 0
def enterTeleportIn(self, requestStatus):
reason = requestStatus.get('reason')
if reason == RaceGlobals.Exit_Barrier:
requestStatus['nextState'] = 'popup'
self.dialog = TTDialog.TTDialog(text=TTLocalizer.KartRace_RaceTimeout, command=self.__cleanupDialog, style=TTDialog.Acknowledge)
elif reason == RaceGlobals.Exit_Slow:
requestStatus['nextState'] = 'popup'
self.dialog = TTDialog.TTDialog(text=TTLocalizer.KartRace_RacerTooSlow, command=self.__cleanupDialog, style=TTDialog.Acknowledge)
elif reason == RaceGlobals.Exit_BarrierNoRefund:
requestStatus['nextState'] = 'popup'
self.dialog = TTDialog.TTDialog(text=TTLocalizer.KartRace_RaceTimeoutNoRefund, command=self.__cleanupDialog, style=TTDialog.Acknowledge)
self.toonSubmerged = -1
taskMgr.remove('oz-check-toon-underwater')
Playground.Playground.enterTeleportIn(self, requestStatus)
def teleportInDone(self):
self.toonSubmerged = -1
taskMgr.add(self.__checkToonUnderwater, 'oz-check-toon-underwater')
Playground.Playground.teleportInDone(self)
def __cleanupDialog(self, value):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
if hasattr(self, 'fsm'):
self.fsm.request('walk', [1])
return
def enterPicnicBasketBlock(self, picnicBasket):
base.localAvatar.laffMeter.start()
base.localAvatar.b_setAnimState('off', 1)
base.localAvatar.cantLeaveGame = 1
self.accept(self.picnicBasketDoneEvent, self.handlePicnicBasketDone)
self.trolley = PicnicBasket.PicnicBasket(self, self.fsm, self.picnicBasketDoneEvent, picnicBasket.getDoId(), picnicBasket.seatNumber)
self.trolley.load()
self.trolley.enter()
def exitPicnicBasketBlock(self):
base.localAvatar.laffMeter.stop()
base.localAvatar.cantLeaveGame = 0
self.ignore(self.trolleyDoneEvent)
self.trolley.unload()
self.trolley.exit()
del self.trolley
def detectedPicnicTableSphereCollision(self, picnicBasket):
self.fsm.request('picnicBasketBlock', [picnicBasket])
def handleStartingBlockDone(self, doneStatus):
self.notify.debug('handling StartingBlock done event')
where = doneStatus['where']
if where == 'reject':
self.fsm.request('walk')
elif where == 'exit':
self.fsm.request('walk')
elif where == 'racetrack':
self.doneStatus = doneStatus
messenger.send(self.doneEvent)
else:
self.notify.error('Unknown mode: ' + where + ' in handleStartingBlockDone')
def handlePicnicBasketDone(self, doneStatus):
self.notify.debug('handling picnic basket done event')
mode = doneStatus['mode']
if mode == 'reject':
self.fsm.request('walk')
elif mode == 'exit':
self.fsm.request('walk')
else:
self.notify.error('Unknown mode: ' + mode + ' in handlePicnicBasketDone')
def showPaths(self):
from toontown.classicchars import CCharPaths
from toontown.toonbase import TTLocalizer
self.showPathPoints(CCharPaths.getPaths(TTLocalizer.Chip))
|
{
"content_hash": "0e67cec132e8c4fadfc6786921868619",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 148,
"avg_line_length": 39.357142857142854,
"alnum_prop": 0.6664805249197264,
"repo_name": "linktlh/Toontown-journey",
"id": "10e4be14873d6be79354d3b9f4f4f2ea1cad2e9b",
"size": "7163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/safezone/OZPlayground.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Matrix square root for general matrices and for upper triangular matrices.
This module exists to avoid cyclic imports.
"""
__all__ = ['sqrtm']
import numpy as np
from scipy._lib._util import _asarray_validated
# Local imports
from .misc import norm
from .lapack import ztrsyl, dtrsyl
from .decomp_schur import schur, rsf2csf
class SqrtmError(np.linalg.LinAlgError):
pass
from ._matfuncs_sqrtm_triu import within_block_loop
def _sqrtm_triu(T, blocksize=64):
"""
Matrix square root of an upper triangular matrix.
This is a helper function for `sqrtm` and `logm`.
Parameters
----------
T : (N, N) array_like upper triangular
Matrix whose square root to evaluate
blocksize : int, optional
If the blocksize is not degenerate with respect to the
size of the input array, then use a blocked algorithm. (Default: 64)
Returns
-------
sqrtm : (N, N) ndarray
Value of the sqrt function at `T`
References
----------
.. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
"Blocked Schur Algorithms for Computing the Matrix Square Root,
Lecture Notes in Computer Science, 7782. pp. 171-182.
"""
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
# Cast to complex as necessary + ensure double precision
if not keep_it_real:
T = np.asarray(T, dtype=np.complex128, order="C")
T_diag = np.asarray(T_diag, dtype=np.complex128)
else:
T = np.asarray(T, dtype=np.float64, order="C")
T_diag = np.asarray(T_diag, dtype=np.float64)
R = np.diag(np.sqrt(T_diag))
# Compute the number of blocks to use; use at least one block.
n, n = T.shape
nblocks = max(n // blocksize, 1)
# Compute the smaller of the two sizes of blocks that
# we will actually use, and compute the number of large blocks.
bsmall, nlarge = divmod(n, nblocks)
blarge = bsmall + 1
nsmall = nblocks - nlarge
if nsmall * bsmall + nlarge * blarge != n:
raise Exception('internal inconsistency')
# Define the index range covered by each block.
start_stop_pairs = []
start = 0
for count, size in ((nsmall, bsmall), (nlarge, blarge)):
for i in range(count):
start_stop_pairs.append((start, start + size))
start += size
# Within-block interactions (Cythonized)
within_block_loop(R, T, start_stop_pairs, nblocks)
# Between-block interactions (Cython would give no significant speedup)
for j in range(nblocks):
jstart, jstop = start_stop_pairs[j]
for i in range(j-1, -1, -1):
istart, istop = start_stop_pairs[i]
S = T[istart:istop, jstart:jstop]
if j - i > 1:
S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart,
jstart:jstop])
# Invoke LAPACK.
# For more details, see the solve_sylvester implemention
# and the fortran dtrsyl and ztrsyl docs.
Rii = R[istart:istop, istart:istop]
Rjj = R[jstart:jstop, jstart:jstop]
if keep_it_real:
x, scale, info = dtrsyl(Rii, Rjj, S)
else:
x, scale, info = ztrsyl(Rii, Rjj, S)
R[istart:istop, jstart:jstop] = x * scale
# Return the matrix square root.
return R
def sqrtm(A, disp=True, blocksize=64):
"""
Matrix square root.
Parameters
----------
A : (N, N) array_like
Matrix whose square root to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
blocksize : integer, optional
If the blocksize is not degenerate with respect to the
size of the input array, then use a blocked algorithm. (Default: 64)
Returns
-------
sqrtm : (N, N) ndarray
Value of the sqrt function at `A`
errest : float
(if disp == False)
Frobenius norm of the estimated error, ||err||_F / ||A||_F
References
----------
.. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
"Blocked Schur Algorithms for Computing the Matrix Square Root,
Lecture Notes in Computer Science, 7782. pp. 171-182.
Examples
--------
>>> from scipy.linalg import sqrtm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> r = sqrtm(a)
>>> r
array([[ 0.75592895, 1.13389342],
[ 0.37796447, 1.88982237]])
>>> r.dot(r)
array([[ 1., 3.],
[ 1., 4.]])
"""
A = _asarray_validated(A, check_finite=True, as_inexact=True)
if len(A.shape) != 2:
raise ValueError("Non-matrix input to matrix function.")
if blocksize < 1:
raise ValueError("The blocksize should be at least 1.")
keep_it_real = np.isrealobj(A)
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
failflag = False
try:
R = _sqrtm_triu(T, blocksize=blocksize)
ZH = np.conjugate(Z).T
X = Z.dot(R).dot(ZH)
except SqrtmError:
failflag = True
X = np.empty_like(A)
X.fill(np.nan)
if disp:
if failflag:
print("Failed to find a square root.")
return X
else:
try:
arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro')
except ValueError:
# NaNs in matrix
arg2 = np.inf
return X, arg2
|
{
"content_hash": "766d9f628ceaee116881e58feeab2771",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 76,
"avg_line_length": 29.643979057591622,
"alnum_prop": 0.578064288237372,
"repo_name": "nmayorov/scipy",
"id": "d14f9bfbd8b0b40eff617ccdb256bae4423e3376",
"size": "5662",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "scipy/linalg/_matfuncs_sqrtm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4418291"
},
{
"name": "C++",
"bytes": "672553"
},
{
"name": "Dockerfile",
"bytes": "1328"
},
{
"name": "Fortran",
"bytes": "5300184"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "13498627"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import ckeditor.fields
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250, verbose_name='Category')),
],
options={
'verbose_name_plural': 'Categories',
'verbose_name': 'Category',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Name Menu')),
],
options={
'verbose_name_plural': 'Меню',
},
),
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Name')),
('slug', models.CharField(blank=True, max_length=250, verbose_name='URL')),
('published', models.BooleanField(verbose_name='Published')),
('ordering', models.IntegerField(blank=True, default=0, null=True, verbose_name='Ordering')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('menu', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='content.Menu', verbose_name='Name menu')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='content.MenuItem', verbose_name='Parent menu item')),
],
options={
'verbose_name_plural': 'Menu items',
},
managers=[
('_default_manager', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meta_description', ckeditor.fields.RichTextField(blank=True, verbose_name='Meta description')),
('meta_keywords', models.CharField(blank=True, max_length=250, verbose_name='Meta keywords')),
('meta_title', models.CharField(blank=True, max_length=250, verbose_name='Meta Title')),
('meta_author', models.CharField(blank=True, max_length=250, verbose_name='Meta Author')),
('favicon_slug', models.CharField(blank=True, max_length=250, verbose_name='URL favicon')),
('published', models.BooleanField(default=0, verbose_name='Published')),
],
options={
'verbose_name_plural': 'Meta descriptions',
'verbose_name': 'Meta description',
},
),
migrations.CreateModel(
name='Slide',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('slug', models.CharField(blank=True, max_length=250, verbose_name='Url pic')),
('text1', ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='Text1')),
('text2', ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='Text2')),
('published', models.BooleanField(verbose_name='Published')),
('published_main', models.BooleanField(default='', verbose_name='Published on main')),
('ordering', models.IntegerField(blank=True, default=0, null=True, verbose_name='Ordering')),
('category', models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='slides', to='content.Category', verbose_name='Category')),
],
options={
'verbose_name_plural': 'Slides',
'verbose_name': 'Slide',
},
),
]
|
{
"content_hash": "0013571b3ce744432d3fcf0e675dd6af",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 204,
"avg_line_length": 51.041666666666664,
"alnum_prop": 0.5783673469387756,
"repo_name": "skylifewww/pangolin-fog",
"id": "4f4d5e97c4db9ebf12a1ce9528fa53777ee5d838",
"size": "4976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "content/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "126434"
},
{
"name": "HTML",
"bytes": "154546"
},
{
"name": "JavaScript",
"bytes": "174324"
},
{
"name": "Makefile",
"bytes": "1483"
},
{
"name": "Nginx",
"bytes": "641"
},
{
"name": "Python",
"bytes": "177394"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews_manager', '0004_auto_20161209_1323'),
]
operations = [
migrations.AddField(
model_name='clinicalannotationstep',
name='notes',
field=models.TextField(default=None, null=True, blank=True),
),
]
|
{
"content_hash": "c8415aea0cb37c1f22dd7787afb2cb1e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 23.38888888888889,
"alnum_prop": 0.6175771971496437,
"repo_name": "crs4/ProMort",
"id": "39729af65c4744ccbbd91f0e58db3ba9e4849c5e",
"size": "1544",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "promort/reviews_manager/migrations/0005_clinicalannotationstep_notes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23712"
},
{
"name": "HTML",
"bytes": "222137"
},
{
"name": "JavaScript",
"bytes": "486643"
},
{
"name": "Python",
"bytes": "728035"
}
],
"symlink_target": ""
}
|
from collections import Counter
from redis import StrictRedis
from streamparse import Bolt
class WordCountBolt(Bolt):
outputs = ['word', 'count']
def initialize(self, conf, ctx):
self.counter = Counter()
self.total = 0
def _increment(self, word, inc_by):
self.counter[word] += inc_by
self.total += inc_by
def process(self, tup):
word = tup.values[0]
self._increment(word, 10 if word == "dog" else 1)
if self.total % 1000 == 0:
self.logger.info("counted %i words", self.total)
self.emit([word, self.counter[word]])
class RedisWordCountBolt(Bolt):
def initialize(self, conf, ctx):
self.redis = StrictRedis()
self.total = 0
def _increment(self, word, inc_by):
self.total += inc_by
return self.redis.zincrby("words", word, inc_by)
def process(self, tup):
word = tup.values[0]
count = self._increment(word, 10 if word == "dog" else 1)
if self.total % 1000 == 0:
self.logger.info("counted %i words", self.total)
self.emit([word, count])
|
{
"content_hash": "5a126cfd4157a3527cf8e6d1c02f8d43",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 65,
"avg_line_length": 27.365853658536587,
"alnum_prop": 0.5926916221033868,
"repo_name": "codywilbourn/streamparse",
"id": "90f968d2536ed000cf073b5656673a30d7bd414b",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/redis/src/bolts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "517"
},
{
"name": "Python",
"bytes": "199577"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
}
|
"""Tests for kernel connection utilities"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
import nose.tools as nt
from traitlets.config import Config
from jupyter_core.application import JupyterApp
from ipython_genutils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
from ipython_genutils.py3compat import str_to_bytes
from jupyter_client import connect, KernelClient
from jupyter_client.consoleapp import JupyterConsoleApp
from jupyter_client.session import Session
class DummyConsoleApp(JupyterApp, JupyterConsoleApp):
def initialize(self, argv=[]):
JupyterApp.initialize(self, argv=argv)
self.init_connection_file()
sample_info = dict(ip='1.2.3.4', transport='ipc',
shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5,
key=b'abc123', signature_scheme='hmac-md5', kernel_name='python'
)
sample_info_kn = dict(ip='1.2.3.4', transport='ipc',
shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5,
key=b'abc123', signature_scheme='hmac-md5', kernel_name='test'
)
def test_write_connection_file():
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
nt.assert_true(os.path.exists(cf))
with open(cf, 'r') as f:
info = json.load(f)
info['key'] = str_to_bytes(info['key'])
nt.assert_equal(info, sample_info)
def test_load_connection_file_session():
"""test load_connection_file() after """
session = Session()
app = DummyConsoleApp(session=Session())
app.initialize(argv=[])
session = app.session
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
app.connection_file = cf
app.load_connection_file()
nt.assert_equal(session.key, sample_info['key'])
nt.assert_equal(session.signature_scheme, sample_info['signature_scheme'])
def test_load_connection_file_session_with_kn():
"""test load_connection_file() after """
session = Session()
app = DummyConsoleApp(session=Session())
app.initialize(argv=[])
session = app.session
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info_kn)
app.connection_file = cf
app.load_connection_file()
nt.assert_equal(session.key, sample_info_kn['key'])
nt.assert_equal(session.signature_scheme, sample_info_kn['signature_scheme'])
def test_app_load_connection_file():
"""test `ipython console --existing` loads a connection file"""
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
app = DummyConsoleApp(connection_file=cf)
app.initialize(argv=[])
for attr, expected in sample_info.items():
if attr in ('key', 'signature_scheme'):
continue
value = getattr(app, attr)
nt.assert_equal(value, expected, "app.%s = %s != %s" % (attr, value, expected))
def test_load_connection_info():
client = KernelClient()
info = {
'control_port': 53702,
'hb_port': 53705,
'iopub_port': 53703,
'ip': '0.0.0.0',
'key': 'secret',
'shell_port': 53700,
'signature_scheme': 'hmac-sha256',
'stdin_port': 53701,
'transport': 'tcp',
}
client.load_connection_info(info)
assert client.control_port == info['control_port']
assert client.session.key.decode('ascii') == info['key']
assert client.ip == info['ip']
def test_find_connection_file():
cfg = Config()
with TemporaryDirectory() as d:
cfg.ProfileDir.location = d
cf = 'kernel.json'
app = DummyConsoleApp(config=cfg, connection_file=cf)
app.initialize()
security_dir = app.runtime_dir
profile_cf = os.path.join(security_dir, cf)
with open(profile_cf, 'w') as f:
f.write("{}")
for query in (
'kernel.json',
'kern*',
'*ernel*',
'k*',
):
nt.assert_equal(connect.find_connection_file(query, path=security_dir), profile_cf)
JupyterApp._instance = None
|
{
"content_hash": "5e82394f1ca9dcfb7a2a56886d7f620d",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 95,
"avg_line_length": 32.08029197080292,
"alnum_prop": 0.6348122866894198,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "c84ee660ba0c850d3646a86c8748cde86930f4e2",
"size": "4395",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/jupyter_client/tests/test_connect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from testfixtures import compare
from service.ws_re.register.author import Author
class TestAuthor(TestCase):
def test_author(self):
register_author = Author("Test Name", {"death": 1999})
compare("Test Name", register_author.name)
compare(1999, register_author.death)
compare(None, register_author.birth)
register_author = Author("Test Name", {"birth": 1999})
compare(None, register_author.death)
compare(1999, register_author.birth)
register_author = Author("Test Name", {"first_name": "Test"})
compare("Test", register_author.first_name)
register_author = Author("Test Name", {"last_name": "Name"})
compare("Name", register_author.last_name)
register_author = Author("Test Name", {"redirect": "Tada"})
compare(None, register_author.death)
compare("Tada", register_author.redirect)
register_author = Author("Test Name", {"ws_lemma": "Tada_lemma"})
compare(None, register_author.death)
compare("Tada_lemma", register_author.ws_lemma)
register_author = Author("Test Name", {"wp_lemma": "Tada_lemma"})
compare(None, register_author.death)
compare("Tada_lemma", register_author.wp_lemma)
def testPublicDomain(self):
author = Author("Test Name", {})
compare(2100, author.year_public_domain)
author = Author("Test Name", {"death": 1900})
compare(1971, author.year_public_domain)
author = Author("Test Name", {"birth": 1900})
compare(2051, author.year_public_domain)
author = Author("Test Name", {"death": 1950, "birth": 1900})
compare(2021, author.year_public_domain)
|
{
"content_hash": "5b48023a4fa01485939987d9bdee5ed4",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 36.229166666666664,
"alnum_prop": 0.6365727429557216,
"repo_name": "the-it/WS_THEbotIT",
"id": "5d6f2d05b1550698fea05235017fba0dddce331b",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "service/ws_re/register/test_author.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "3121"
},
{
"name": "Makefile",
"bytes": "3017"
},
{
"name": "Python",
"bytes": "785189"
},
{
"name": "Shell",
"bytes": "1199"
}
],
"symlink_target": ""
}
|
"""`Factory` provider - passing injections to the underlying providers example."""
from dependency_injector import containers, providers
class Regularizer:
def __init__(self, alpha: float) -> None:
self.alpha = alpha
class Loss:
def __init__(self, regularizer: Regularizer) -> None:
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss: Loss) -> None:
self.loss = loss
class Algorithm:
def __init__(self, task: ClassificationTask) -> None:
self.task = task
class Container(containers.DeclarativeContainer):
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
if __name__ == "__main__":
container = Container()
algorithm_1 = container.algorithm_factory(
task__loss__regularizer__alpha=0.5,
)
assert algorithm_1.task.loss.regularizer.alpha == 0.5
algorithm_2 = container.algorithm_factory(
task__loss__regularizer__alpha=0.7,
)
assert algorithm_2.task.loss.regularizer.alpha == 0.7
|
{
"content_hash": "cbebd2ba0b0c277113c6f02bc84e3f5b",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 82,
"avg_line_length": 24.132075471698112,
"alnum_prop": 0.6075058639562158,
"repo_name": "rmk135/dependency_injector",
"id": "de1130f3f8091bed43dc923456df729ca192c4d0",
"size": "1279",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/providers/factory_init_injections_underlying.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "171241"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0012_auto_20160318_0818'),
]
operations = [
migrations.AlterField(
model_name='invite',
name='accepted',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "ec4ccd9c25eb4c70278b1345889729fa",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.5968586387434555,
"repo_name": "fxa90id/mozillians",
"id": "fe866292f5d9cc32e7676165d8a2970e319e9a8c",
"size": "406",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mozillians/groups/migrations/0013_auto_20160323_0228.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "181742"
},
{
"name": "HTML",
"bytes": "165063"
},
{
"name": "JavaScript",
"bytes": "141584"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "887164"
},
{
"name": "Shell",
"bytes": "1332"
}
],
"symlink_target": ""
}
|
"""
Given a list, rotate the list to the right by k places, where k is non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @param k, an integer
# @return a ListNode
def rotateRight(self, head, k):
if not head:
return None
length = 1
tail = head # Naming
while tail.next: # No need to use extra prev
tail = tail.next
length += 1
k %= length
if k == 0:
return head # Detail
tail.next = head
cur = head
i = 0
while i < length - k - 1: # Note this detail
cur = cur.next
i += 1
new_head = cur.next
cur.next = None
return new_head
|
{
"content_hash": "a41835e2379f1fd90ded3a8d9cad88c0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 80,
"avg_line_length": 26.289473684210527,
"alnum_prop": 0.48148148148148145,
"repo_name": "yuzhangcmu/Python-Study",
"id": "727e6c5bc5bc4d5560c819eaac4a67bff5b08498",
"size": "999",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Leetcode/Rotate_List.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
{
"content_hash": "f8cdbe947b52b952467176f1432898b7",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 28.60747663551402,
"alnum_prop": 0.48056190787324404,
"repo_name": "citrix-openstack-build/glance",
"id": "524a5c96f34d8b9d9f235c673e6ae81a1e2dd37d",
"size": "4222",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "glance/common/ordereddict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2464002"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
}
|
"""The text random column class of the dataframe creator, create random text snippets"""
import random
import warnings
__author__ = "Peter J Usherwood"
__python_version__ = "3.6"
class TextRandom:
"""
Produce a column of random sentences, 1 per row, generated by markov chains on a given corpus.
"""
def __init__(self, name='Test Random Text', n_records=50, args=None):
"""
:param name: String column root name
:param n_records: Int number of rows per column
:param args:
- corpus: An uncleaned word tokenized corpus to be used as a training set for the markov chains
- remove_brackets_and_quotes: Bool, remove brackets and quotes that can otherwise appear irregular
"""
if args.get('corpus', None) is None:
from nltk.corpus import brown
self.corpus = args.pop('corpus', brown.words()[:200000])
else:
self.corpus = args.pop('corpus')
self.name = name
self.n_records = n_records
self.remove_brackets_and_quotes = args.get('remove_brackets_and_quotes', True)
if len(args.keys()) > 0:
for key in args.keys():
warnings.warn('Unused key:'+str(key))
self.cache = {}
if self.remove_brackets_and_quotes:
self.corpus = [w for w in self.corpus if w not in ['"', '`', '(', ')', '``', "'", "''"]]
self.corpus_size = len(self.corpus)
self.database()
self.col = self.create_array()
def triples(self):
"""
Generates triples from the given data string.
1st (word1, word2, word3)
2nd (word2, word3, word4)
etc
"""
if len(self.corpus) < 3:
return
for i in range(len(self.corpus) - 2):
yield (self.corpus[i], self.corpus[i + 1], self.corpus[i + 2])
def database(self):
for w1, w2, w3 in self.triples():
key = (w1, w2)
if key in self.cache:
self.cache[key].append(w3)
else:
self.cache[key] = [w3]
def generate_sentence(self,
non_forward_space_punctuation='.?,:;',
clean_punctuation=True):
"""
Generate a sentence using markov chains
:param non_forward_space_punctuation: String, collection of punctuation marks to remove white space before
:param clean_punctuation: Bool, whether to remove the whitespaces before selected punctuation marks
:return: String, the sentence generated
"""
sentence_ending_keys = [key for key in self.cache.keys() if key[0] == '.']
seed_key = random.choice(sentence_ending_keys)
seed_word = seed_key[1]
next_word = random.choice(self.cache[seed_key])
w1, w2 = seed_word, next_word
gen_words = []
while w2 not in ['.', '?', '!']:
gen_words.append(w1)
try:
w1, w2 = w2, random.choice(self.cache[(w1, w2)])
except KeyError:
print((w1, w2))
gen_words = gen_words[:-1]
self.cache.pop((w1, w2), None)
gen_words.append(w2)
text = ' '.join(gen_words)
if clean_punctuation:
for punc in non_forward_space_punctuation:
text = text.replace(' ' + punc, punc)
return text
def create_array(self,
non_forward_space_punctuation='.?,:;',
clean_punctuation=True):
"""
Standard class method to produce the list to be used as a column
:param non_forward_space_punctuation: String, collection of punctuation marks to remove white space before
:param clean_punctuation: Bool, whether to remove the whitespaces before selected punctuation marks
:return: List, the column
"""
col = []
for n in range(self.n_records):
col += [self.generate_sentence(non_forward_space_punctuation=non_forward_space_punctuation,
clean_punctuation=clean_punctuation)]
return col
|
{
"content_hash": "88f51b17498be49cf6b040cf24ca0d74",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 114,
"avg_line_length": 33.693548387096776,
"alnum_prop": 0.5598372426998564,
"repo_name": "Usherwood/usherwood_ds",
"id": "2adfbc0824111eaccaca440773fa11c1fdaefe36",
"size": "4201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usherwood_ds/df_creator/columns/text_random.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76416"
},
{
"name": "Python",
"bytes": "245786"
}
],
"symlink_target": ""
}
|
import unittest
import pynuodb
from .nuodb_base import NuoBase
class NuoDBGlobalsTest(NuoBase):
def test_module_globals(self):
self.assertEquals(pynuodb.apilevel, '2.0')
self.assertEquals(pynuodb.threadsafety, 1)
self.assertEquals(pynuodb.paramstyle, 'qmark')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0ffacee93e8cc1755266ee3bb59aa87c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 54,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6882352941176471,
"repo_name": "tvincentNuoDB/nuodb-python",
"id": "d8e1848c3376f50adf0527f1d746b3f52c56c757",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/nuodb_globals_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16023"
},
{
"name": "Makefile",
"bytes": "1955"
},
{
"name": "Python",
"bytes": "284584"
}
],
"symlink_target": ""
}
|
import os
from dashboard import app as application, run
if __name__ == '__main__':
ip = os.environ.get('OPENSHIFT_PYTHON_IP', '0.0.0.0')
port = int(os.environ.get('OPENSHIFT_PYTHON_PORT', 8051))
application.config['PROPAGATE_EXCEPTIONS'] = True
run(ip, port)
|
{
"content_hash": "10d2e9fc4026dc5fd5b4bc1e31b80667",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 61,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6594202898550725,
"repo_name": "ButecoOpenSource/dashboard-old",
"id": "caf8ff258da40e4e54b0918a4255cfebe288ba03",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111718"
},
{
"name": "HTML",
"bytes": "19590"
},
{
"name": "JavaScript",
"bytes": "505105"
},
{
"name": "Makefile",
"bytes": "283"
},
{
"name": "Python",
"bytes": "8682"
}
],
"symlink_target": ""
}
|
from autobahn.twisted.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
import json
from twisted.internet.defer import Deferred, \
inlineCallbacks, \
returnValue
def sleep(delay):
d = Deferred()
reactor.callLater(delay, d.callback, None)
return d
class SlowSquareServerProtocol(WebSocketServerProtocol):
@inlineCallbacks
def slowsquare(self, x):
if x > 5:
raise Exception("number too large")
else:
yield sleep(1)
returnValue(x * x)
@inlineCallbacks
def onMessage(self, payload, isBinary):
if not isBinary:
x = json.loads(payload.decode('utf8'))
try:
res = yield self.slowsquare(x)
except Exception as e:
self.sendClose(1000, "Exception raised: {0}".format(e))
else:
self.sendMessage(json.dumps(res).encode('utf8'))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
factory = WebSocketServerFactory("ws://localhost:9000", debug = False)
factory.protocol = SlowSquareServerProtocol
reactor.listenTCP(9000, factory)
reactor.run()
|
{
"content_hash": "2ba34803a2ed352b41233974770fa846",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 73,
"avg_line_length": 25.28846153846154,
"alnum_prop": 0.6106463878326996,
"repo_name": "mrrrgn/AutobahnPython",
"id": "89346d7e872f8154b2e574d1b12f1a2fb7f18bcf",
"size": "2085",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/twisted/websocket/slowsquare/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from rockstar import RockStar
r_code = "cat ('Hello world!')"
rock_it_bro = RockStar(days=400, file_name='hello.r', code=r_code)
rock_it_bro.make_me_a_rockstar()
|
{
"content_hash": "86d3e0c4d95c84b2c259365dcd13549c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 66,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7012195121951219,
"repo_name": "Hitman666/rockstar",
"id": "d316eda0dcae9278c355d2ec9775d1e21b406ae8",
"size": "164",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "examples/r_rockstar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14272"
}
],
"symlink_target": ""
}
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: Preorder in ArrayList which contains node values.
"""
def preorderTraversal(self, root):
# write your code here
result = []
self.pre_order_traversal(root, result)
return result
def pre_order_traversal(self, root, result):
if root is None:
return
result.append(root.val)
self.pre_order_traversal(root.left, result)
self.pre_order_traversal(root.right, result)
|
{
"content_hash": "d108055c5de255c6eaefb948eaa9945b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 62,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.6223241590214067,
"repo_name": "joshua-jin/algorithm-campus",
"id": "124db7060e69d2c6b4849e3dcb9472734b7a9de3",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lulu/binary_tree_preorder_traversal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "62223"
},
{
"name": "Java",
"bytes": "192445"
},
{
"name": "Python",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logistica', '0037_nfentrada_empresa'),
]
operations = [
migrations.CreateModel(
name='NfEntradaAgator',
fields=[
],
options={
'verbose_name': 'Nota fiscal de entrada Agator',
'verbose_name_plural': 'Notas fiscais de entrada Agator',
'proxy': True,
'indexes': [],
},
bases=('logistica.nfentrada',),
),
migrations.CreateModel(
name='NfEntradaTussor',
fields=[
],
options={
'verbose_name': 'Nota fiscal de entrada Tussor',
'verbose_name_plural': 'Notas fiscais de entrada Tussor',
'proxy': True,
'indexes': [],
},
bases=('logistica.nfentrada',),
),
]
|
{
"content_hash": "72ef89beab49f06ccb6e1b26b3d0a08e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 73,
"avg_line_length": 27.35135135135135,
"alnum_prop": 0.48122529644268774,
"repo_name": "anselmobd/fo2",
"id": "c79c6a4619fbf4cf9ba2dc9a5e0b88f79731a1c1",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/logistica/migrations/0038_nfentradaagator_nfentradatussor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
}
|
from subprocess \
import \
call
# Code taken from python 2.6.5
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
|
{
"content_hash": "384a7820e2123ffe98bf72ea29aea743",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 92,
"avg_line_length": 32.088235294117645,
"alnum_prop": 0.6617781851512374,
"repo_name": "abadger/Bento",
"id": "39ea610e70cce34191a0fac62f47a8a30cb7e436",
"size": "1091",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bento/compat/_subprocess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8367"
},
{
"name": "C++",
"bytes": "165"
},
{
"name": "FORTRAN",
"bytes": "97"
},
{
"name": "Python",
"bytes": "1018735"
},
{
"name": "Shell",
"bytes": "5067"
}
],
"symlink_target": ""
}
|
import copy
import unittest
import warnings
import mock
import numpy as np
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import optimizer
from chainer import optimizers
from chainer import serializer
from chainer import testing
from chainer.testing import attr
import chainerx
class TestHyperparameter(unittest.TestCase):
def setUp(self):
self.parent = optimizer.Hyperparameter()
self.parent.x = 1
self.parent.y = 2
self.child = optimizer.Hyperparameter(self.parent)
self.child.y = 3
self.child.z = 4
def test_getattr(self):
self.assertTrue(hasattr(self.parent, 'x'))
self.assertEqual(self.parent.x, 1)
self.assertTrue(hasattr(self.parent, 'y'))
self.assertEqual(self.parent.y, 2)
self.assertFalse(hasattr(self.parent, 'z'))
self.assertTrue(hasattr(self.child, 'x'))
self.assertEqual(self.child.x, 1)
self.assertTrue(hasattr(self.child, 'y'))
self.assertEqual(self.child.y, 3)
self.assertTrue(hasattr(self.child, 'z'))
self.assertEqual(self.child.z, 4)
def test_get_dict(self):
self.assertEqual(self.parent.get_dict(), {'x': 1, 'y': 2})
self.assertEqual(self.child.get_dict(), {'x': 1, 'y': 3, 'z': 4})
def test_repr(self):
self.assertEqual(repr(self.parent), 'Hyperparameter(x=1, y=2)')
self.assertEqual(repr(self.child), 'Hyperparameter(x=1, y=3, z=4)')
def test_deep_copy(self):
parent_copy, child_copy = copy.deepcopy([self.parent, self.child])
self.assertEqual(self.child.get_dict(), child_copy.get_dict())
self.assertEqual(self.parent.get_dict(), parent_copy.get_dict())
self.assertIs(child_copy.parent, parent_copy)
class DummyDeserializer(serializer.Deserializer):
def __init__(self, target):
super(DummyDeserializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
if value is None:
value = self.target[key]
elif isinstance(value, np.ndarray):
np.copyto(value, self.target[key])
else:
value = type(value)(np.asarray(self.target[key]))
return value
class TestUpdateRule(unittest.TestCase):
class SimpleUpdateRule(optimizer.UpdateRule):
def update_core_cpu(self, param):
pass
def update_core_gpu(self, param):
pass
def setUp(self):
self.data = np.ones((2, 3), np.float32)
self.grad = np.ones_like(self.data)
self.var = chainer.Variable(self.data, grad=self.grad)
update_rule = self.SimpleUpdateRule()
update_rule.update_core_cpu = mock.MagicMock(
wraps=update_rule.update_core_cpu)
update_rule.update_core_gpu = mock.MagicMock(
wraps=update_rule.update_core_gpu)
update_rule.update_core_chainerx = mock.MagicMock(
wraps=update_rule.update_core_chainerx)
self.update_rule = update_rule
def test_update_cpu(self):
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core_cpu.call_count, 1)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 0)
self.assertEqual(self.update_rule.update_core_chainerx.call_count, 0)
@attr.gpu
def test_update_gpu(self):
self.var.to_gpu()
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core_cpu.call_count, 0)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 1)
self.assertEqual(self.update_rule.update_core_chainerx.call_count, 0)
@attr.chainerx
def test_update_chainerx(self):
self.var.to_chainerx()
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core_cpu.call_count, 1)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 0)
self.assertEqual(self.update_rule.update_core_chainerx.call_count, 1)
@attr.chainerx
@attr.gpu
def test_update_chainerx_gpu(self):
self.var.to_gpu()
self.var.to_chainerx()
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core_cpu.call_count, 0)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 1)
self.assertEqual(self.update_rule.update_core_chainerx.call_count, 1)
def check_add_hook(self, hook):
self.update_rule.update(self.var)
self.assertEqual(hook.call_count, 1)
args = hook.call_args_list[0][0]
self.assertEqual(len(args), 2)
self.assertIs(args[0], self.update_rule)
self.assertIs(args[1], self.var)
def test_add_hook(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook)
self.check_add_hook(hook)
def test_add_hook_with_name(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.check_add_hook(hook)
def test_remove_hook(self):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.update_rule.remove_hook('hook')
self.update_rule.update(self.var)
self.assertEqual(hook.call_count, 0)
def test_add_hook_with_function_name(self):
hook_body = mock.MagicMock()
def foo(update_rule, data, grad):
hook_body(update_rule, data, grad)
self.update_rule.add_hook(foo)
self.update_rule.remove_hook('foo')
self.update_rule.update(self.var)
self.assertEqual(hook_body.call_count, 0)
def test_add_hook_no_name(self):
class CallableWithoutName(object):
def __call__(self, update_rule, param):
pass
with self.assertRaises(ValueError):
self.update_rule.add_hook(CallableWithoutName())
def test_add_hook_duplicated_name(self):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
with self.assertRaises(ValueError):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
def test_remove_hook_not_exist(self):
with self.assertRaises(KeyError):
self.update_rule.remove_hook('foo')
def test_disabled_update_rule(self):
self.update_rule.update_core = mock.MagicMock()
self.update_rule.enabled = False
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 0)
self.update_rule.enabled = True
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 1)
def setup_state(self):
def init_state(data):
state = self.update_rule.state
state['a'] = 0
state['b'] = np.array([1, 2, 3], dtype=np.float32)
self.update_rule.init_state = init_state
@attr.gpu
def test_state_copy_to_gpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertIsInstance(self.update_rule.state['b'], cuda.ndarray)
self.update_rule.update_core = update_core
self.var.to_gpu()
self.update_rule.update(self.var)
@attr.multi_gpu(2)
def test_state_copy_to_another_gpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['b'], cuda.ndarray)
self.assertEqual(self.update_rule.state['b'].device.id, 1)
# call update with arrays on GPU 0 (tested by another method)
self.update_rule.update_core = lambda param: None
self.update_rule.update(chainer.Variable(
cuda.to_gpu(self.data, 0), grad=cuda.to_gpu(self.grad, 0)))
# check if it copies the states correctly when arrays on another GPU
# are passed
self.update_rule.update_core = update_core
self.update_rule.update(chainer.Variable(
cuda.to_gpu(self.data, 1), grad=cuda.to_gpu(self.grad, 1)))
def get_target(self):
target = {}
target['t'] = 100
target['a'] = 1
target['b'] = np.array([2, 3, 4], dtype=np.float32)
return target
@attr.gpu
def test_state_copy_to_cpu(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertIsInstance(self.update_rule.state['b'], np.ndarray)
self.var.to_gpu()
self.update_rule.update(self.var)
self.var.to_cpu()
self.update_rule.update_core = update_core
self.update_rule.update(self.var)
@attr.chainerx
def test_state_copy_to_chainerx(self):
self.setup_state()
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertIsInstance(
self.update_rule.state['b'], chainerx.ndarray)
self.var.to_cpu()
self.update_rule.update(self.var)
self.var.to_chainerx()
self.update_rule.update_core = update_core
self.update_rule.update(self.var)
def test_deserialize(self):
self.setup_state()
target = self.get_target()
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNotNone(self.update_rule.state)
self.assertEqual(self.update_rule.state['a'], target['a'])
np.testing.assert_array_equal(self.update_rule.state['b'], target['b'])
def test_deserialize_by_strict_deserializer(self):
self.setup_state()
target = self.get_target()
del target['a']
with self.assertRaises(KeyError):
self.update_rule.serialize(DummyDeserializer(target))
def test_deserialize_by_nonstrict_deserializer(self):
self.setup_state()
target = self.get_target()
target['a'] = None
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNone(self.update_rule.state)
def test_deserialize_disabled_update_rule_by_strict_deserializer(self):
self.setup_state()
self.update_rule.enabled = False
target = self.get_target()
del target['a']
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNone(self.update_rule.state)
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
def test_new_epoch(self):
self.optimizer.new_epoch()
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch()
def test_auto_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
self.optimizer.new_epoch(auto=True)
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_auto_new_epoch(self):
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch(auto=True)
@attr.chainerx
class TestOptimizerWithChainerxImplementation(unittest.TestCase):
# This test ensures an optimizer can update ChainerX array by overriding
# update_core_chainerx().
def test_upate(self):
initial_p = np.array([1., 2., 3.], np.float32)
x = chainerx.array([2., 4., 6.], np.float32)
expected_p = 4. * initial_p - 6. * backend.CpuDevice().send(x)
class ChainerxUpdateRule(optimizer.UpdateRule):
call_count = 0
def update_core_chainerx(self, param):
# p <= 3 * p - 2 * (dy/dp)
array = param.array
t1 = param.array.as_grad_stopped() * 3.
t2 = param.grad.as_grad_stopped() * 2.
delta = t1 - t2
array += delta
self.call_count += 1
class ChainerxOptimizer(optimizer.GradientMethod):
def create_update_rule(self):
return ChainerxUpdateRule(self.hyperparam)
class Link(chainer.Link):
def __init__(self):
super(Link, self).__init__()
with self.init_scope():
self.p = chainer.Parameter(initial_p)
def forward(self, x):
return 3. * x * self.p
link = Link()
link.to_device('native:0')
y = link(x)
y.backward()
optimizer_ = ChainerxOptimizer()
optimizer_.setup(link)
optimizer_.update()
assert link.p.update_rule.call_count == 1
np.testing.assert_array_equal(
backend.CpuDevice().send(link.p.array), expected_p)
class TestOptimizerHook(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def test_add_hook(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = False
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.optimizer)
def test_add_hook_call_for_each_param(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = True
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.target.param.update_rule, self.target.param)
def test_remove_hook(self):
h1 = mock.MagicMock(timing='pre')
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.remove_hook('h1')
self.optimizer.call_hooks()
self.assertFalse(h1.called)
def test_duplicated_hook(self):
self.optimizer.setup(self.target)
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
with self.assertRaises(KeyError):
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
def test_invalid_hook(self):
with self.assertRaises(TypeError):
self.optimizer.add_hook(1)
def test_add_hook_before_setup(self):
with self.assertRaises(RuntimeError):
self.optimizer.add_hook(lambda s: None, 'h1')
class SimpleLink(chainer.Link):
def __init__(self, w, g):
super(SimpleLink, self).__init__()
with self.init_scope():
self.param = chainer.Parameter(w)
self.param.grad = g
class TestGradientMethod(unittest.TestCase):
def setUp(self):
self.optimizer = chainer.GradientMethod()
self.target = chainer.ChainList(
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)),
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)))
self.optimizer.create_update_rule = mock.MagicMock
def setup_cpu(self):
self.optimizer.setup(self.target)
def setup_gpu(self, device=None):
self.target.to_gpu(device)
self.optimizer.setup(self.target)
def setup_chainerx(self, orig_xp):
if orig_xp is cuda.cupy:
self.target.to_device('cuda:0')
else:
assert orig_xp is np
self.target.to_device('native:0')
self.optimizer.setup(self.target)
def test_setup(self):
create_update_rule = mock.MagicMock()
self.optimizer.create_update_rule = create_update_rule
self.optimizer.setup(self.target)
self.assertEqual(create_update_rule.call_count, 2)
self.assertEqual(create_update_rule.call_args_list[0], [(), {}])
self.assertEqual(create_update_rule.call_args_list[1], [(), {}])
def check_update(self):
self.assertEqual(self.optimizer.t, 0)
self.optimizer.update()
self.assertEqual(self.optimizer.t, 1)
self.target[0].param.update_rule.update.assert_called_once_with(
self.target[0].param)
self.target[1].param.update_rule.update.assert_called_once_with(
self.target[1].param)
def test_update_cpu(self):
self.setup_cpu()
self.check_update()
@attr.gpu
def test_update_gpu(self):
self.setup_gpu()
self.check_update()
@attr.chainerx
def test_update_chainerx_cpu(self):
self.setup_chainerx(np)
self.check_update()
@attr.chainerx
@attr.gpu
def test_update_chainerx_gpu(self):
self.setup_chainerx(cuda.cupy)
self.check_update()
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2)],
'dtype': [np.float16, np.float32, np.float64],
'loss_scale': [None, 1, 10],
}))
class TestGradientMethodLossScale(unittest.TestCase):
def setUp(self):
param0_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param0_grad = np.copy(param0_data)
param1_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param1_grad = np.copy(param1_data)
self.target = chainer.ChainList(
SimpleLink(param0_data, param0_grad),
SimpleLink(param1_data, param1_grad))
lr = 1.0
if self.loss_scale is not None:
lr = self.loss_scale
for i in range(2):
self.target[i].param._loss_scale = self.loss_scale
self.optimizer = chainer.optimizers.SGD(lr)
def setup_cpu(self):
self.optimizer.setup(self.target)
def setup_gpu(self, device=None):
self.target.to_gpu(device)
self.optimizer.setup(self.target)
def setup_chainerx(self, orig_xp):
if orig_xp is cuda.cupy:
self.target.to_device('cuda:0')
else:
assert orig_xp is np
self.target.to_device('native:0')
self.optimizer.setup(self.target)
def check_update(self):
self.optimizer.update()
xp = backend.get_array_module(self.target[0].param)
expected_data = xp.zeros(self.shape, dtype=self.dtype)
rtol, atol = 1e-4, 1e-5
if self.dtype is np.float16:
rtol, atol = 1e-1, 1e-2
for i in range(2):
testing.assert_allclose(self.target[i].param.data, expected_data,
rtol=rtol, atol=atol)
def test_update_cpu(self):
self.setup_cpu()
self.check_update()
@attr.gpu
def test_update_gpu(self):
self.setup_gpu()
self.check_update()
@attr.chainerx
def test_update_chainerx_cpu(self):
if self.dtype == np.float16:
raise unittest.SkipTest('ChainerX does not support float16')
self.setup_chainerx(np)
self.check_update()
@attr.chainerx
@attr.gpu
def test_update_chainerx_gpu(self):
if self.dtype == np.float16:
raise unittest.SkipTest('ChainerX does not support float16')
self.setup_gpu()
self.setup_chainerx(cuda.cupy)
self.check_update()
class TestCleargradHook(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def check_cleargrad(self):
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(CleargradHook(self))
opt.add_hook(DummyHook(self))
opt.update()
def test_cleargrad_cpu(self):
self.check_cleargrad()
@attr.gpu
def test_cleargrad_gpu(self):
self.target.to_gpu()
self.check_cleargrad()
class DummyOptimizer(chainer.GradientMethod):
def __init__(self, test):
super(DummyOptimizer, self).__init__()
self.test = test
def create_update_rule(self):
return mock.MagicMock()
class DummyHook(object):
name = 'Dummy'
timing = 'pre'
def __init__(self, test):
self.test = test
def __call__(self, opt):
for param in opt.target.params():
# Confirm all grads are not None
self.test.assertIsNotNone(param.grad)
class CleargradHook(object):
name = 'Cleargrad'
timing = 'pre'
def __init__(self, _):
pass
def __call__(self, opt):
for param in opt.target.params():
# Clear all grads
param.cleargrad()
class TestGradientMethodClearGrads(unittest.TestCase):
def setUp(self):
self.optimizer = DummyOptimizer(self)
self.target = SimpleLink(
np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32))
self.optimizer.setup(self.target)
self.optimizer.add_hook(DummyHook(self))
def test_update(self):
self.target.cleargrads()
self.optimizer.update()
class TestDeprecatedOptimizerHooksEmitsWarning(unittest.TestCase):
def setUp(self):
self.context = warnings.catch_warnings(record=True)
self.warnings = self.context.__enter__()
warnings.filterwarnings(action='always', category=DeprecationWarning)
def tearDown(self):
self.context.__exit__()
def test_gradient_clipping(self):
chainer.optimizer.GradientClipping(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_hard_clipping(self):
chainer.optimizer.GradientHardClipping(1., 2.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_noise(self):
chainer.optimizer.GradientNoise(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_lasso(self):
chainer.optimizer.Lasso(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_weight_decay(self):
chainer.optimizer.WeightDecay(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
testing.run_module(__name__, __file__)
|
{
"content_hash": "0ef89419578874af96c91698e63da26f",
"timestamp": "",
"source": "github",
"line_count": 689,
"max_line_length": 79,
"avg_line_length": 32.63570391872279,
"alnum_prop": 0.6182068842835542,
"repo_name": "ktnyt/chainer",
"id": "1185707d8c33c8eb8e5a2be9d545c564fc87a2ea",
"size": "22486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/test_optimizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1440363"
},
{
"name": "CMake",
"bytes": "42822"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1242"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5128330"
},
{
"name": "Shell",
"bytes": "19475"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import json
import logging
from django import template
from django.utils import six
from django.utils.html import mark_safe
from reviewboard.avatars import avatar_services
register = template.Library()
@register.simple_tag(takes_context=True)
def avatar(context, user, size, service_id=None):
"""Render the user's avatar to HTML.
When the ``service_id`` argument is not provided, or the specified service
is not registered or is not enabled, the user's specified avatar service
will be used for rendering instead.
Args:
context (django.template.Context):
The template rendering context.
user (django.contrib.auth.models.User):
The user whose avatar is to be rendered.
size (int):
The height and width of the avatar, in pixels.
service_id (unicode, optional):
The unique identifier of the avatar service to use. If this is
omitted, or the specified service is not registered and enabled,
the default avatar service will be used.
Returns:
django.utils.safestring.SafeText:
The user's avatar rendered to HTML, or an empty string if no avatar
service could be found.
"""
service = avatar_services.for_user(user, service_id)
if service is None:
logging.error('Could not get a suitable avatar service for user %s.',
user)
return mark_safe('')
return service.render(request=context['request'], user=user, size=size)
@register.simple_tag(takes_context=True)
def avatar_url(context, user, size, resolution='1x', service_id=None):
"""Return the URL of the requested avatar.
Args:
context (django.template.Context):
The template rendering context.
user (django.contrib.auth.models.User):
The user whose avatar is to be rendered.
size (int):
The height and width of the avatar, in pixels.
resolution (unicode, optional):
The resolution of the avatar. This should be one of ``'1x'``, for
normal DPI, or ``'2x'``, for high DPI. This defaults to normal DPI.
service_id (unicode, optional):
The unique identifier of the avatar service to use. If this is
omitted, or the specified service is not registered and enabled,
the default avatar service will be used.
Returns:
django.utils.safestring.SafeText:
The URL of the requested avatar, or an empty string if no avatar
service could be found.
"""
if resolution not in ('1x', '2x'):
raise ValueError('resolution should be "1x" or "2x", not %r.'
% resolution)
service = avatar_services.for_user(user, service_id)
if service is None:
logging.error('Could not get a suitable avatar service for user %s.',
user)
return mark_safe('')
urls = service.get_avatar_urls(request=context['request'],
user=user,
size=size)
return urls[resolution]
@register.simple_tag(takes_context=True)
def avatar_urls(context, user, size, service_id=None):
"""Serialize the user's avatar URLs into a JavaScript object.
Args:
context (django.template.Context):
The template rendering context.
user (django.contrib.auth.models.User):
The user whose avatar URLs are to be serialized.
size (int):
The height and width of the avatar, in pixels.
service_id (unicode, optional):
The unique identifier of the avatar service to use. If this is
omitted, or the specified service is not registered and enabled,
the default avatar service will be used.
Returns:
django.utils.safestring.SafeText:
The rendered JavaScript object.
"""
service = avatar_services.for_user(user, service_id)
if service is None:
logging.error('Could not get a suitable avatar service for user %s.',
user)
urls = {}
else:
urls = {
resolution: url
for resolution, url in six.iteritems(
service.get_avatar_urls(request=context['request'],
user=user,
size=size)
)
}
return mark_safe(json.dumps(urls))
|
{
"content_hash": "99c223ee46265e699cfb21da3fe750bf",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 32.768115942028984,
"alnum_prop": 0.6134453781512605,
"repo_name": "davidt/reviewboard",
"id": "a6543ef668bb698f99684fda7c0d47cc375ec003",
"size": "4522",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "reviewboard/avatars/templatetags/avatars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "206392"
},
{
"name": "HTML",
"bytes": "182334"
},
{
"name": "JavaScript",
"bytes": "1770499"
},
{
"name": "Python",
"bytes": "3842787"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="funnel.insidetextfont", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "3298ae20960bb9996ce684f26ce46a3e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 32.69230769230769,
"alnum_prop": 0.611764705882353,
"repo_name": "plotly/plotly.py",
"id": "55ebdda24f6f4c7879fafc365b792abc459be894",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/insidetextfont/_colorsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
Tools for working in non-realtime.
"""
from .AudioInputBusGroup import AudioInputBusGroup # noqa
from .AudioOutputBusGroup import AudioOutputBusGroup # noqa
from .Buffer import Buffer # noqa
from .BufferGroup import BufferGroup # noqa
from .Bus import Bus # noqa
from .BusGroup import BusGroup # noqa
from .DoNotPropagate import DoNotPropagate # noqa
from .Group import Group # noqa
from .Moment import Moment # noqa
from .Node import Node # noqa
from .NodeTransition import NodeTransition # noqa
from .RootNode import RootNode # noqa
from .Session import Session # noqa
from .SessionFactory import SessionFactory # noqa
from .SessionObject import SessionObject # noqa
from .SessionRenderer import SessionRenderer # noqa
from .State import State # noqa
from .Synth import Synth # noqa
|
{
"content_hash": "3135b0c705867ff1ece946bc4f16e6c2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 60,
"avg_line_length": 38.42857142857143,
"alnum_prop": 0.7806691449814126,
"repo_name": "Pulgama/supriya",
"id": "ce59dd2488c13da9dcfa4e10dfbdfc2b06a94a2b",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/nonrealtime/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
}
|
import sys
import threading
class ProgressBar(object):
"""
Print progress bar in the separated thread
"""
def __init__(self, interval=1, fp=sys.stdout):
"""
Start thread for progress bar
:param interval: interval seconds to write dots
:param fp: file pointer to write
"""
self.interval = interval
self.fp = fp
# create event for handling termination
self.__stop_event = threading.Event()
# create and start new thread
self.thread = threading.Thread(target=self.__target)
self.thread.start()
def stop(self):
"""
Terminate progress bar thread
"""
self.__stop_event.set()
self.thread.join()
self.fp.write('\n')
self.fp.flush()
def __target(self):
"""
Inner method for writing dots in the separated thread
"""
event = self.__stop_event
while not event.is_set():
self.fp.write('.')
self.fp.flush()
event.wait(self.interval)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.stop()
|
{
"content_hash": "a73fa9cbd896597b110f22175607438a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 61,
"avg_line_length": 23.74,
"alnum_prop": 0.5450716090985678,
"repo_name": "mogproject/artifact-cli",
"id": "11822ddb4f0e2af268c9a0111ef7383ce925b10a",
"size": "1187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/artifactcli/util/progressbar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "656"
},
{
"name": "Python",
"bytes": "162807"
}
],
"symlink_target": ""
}
|
import datetime
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Sum
from django.template import Context
from django.template.loader import get_template
from timepiece import utils
from timepiece.entries.models import Entry
class ProjectContract(models.Model):
STATUS_UPCOMING = 'upcoming'
STATUS_CURRENT = 'current'
STATUS_COMPLETE = 'complete'
CONTRACT_STATUS = {
STATUS_UPCOMING: 'Upcoming',
STATUS_CURRENT: 'Current',
STATUS_COMPLETE: 'Complete',
}
PROJECT_UNSET = 0 # Have to set existing contracts to something...
PROJECT_FIXED = 1
PROJECT_PRE_PAID_HOURLY = 2
PROJECT_POST_PAID_HOURLY = 3
PROJECT_TYPE = { # UNSET is not an option
PROJECT_FIXED: 'Fixed',
PROJECT_PRE_PAID_HOURLY: 'Pre-paid Hourly',
PROJECT_POST_PAID_HOURLY: 'Post-paid Hourly',
}
name = models.CharField(max_length=255)
projects = models.ManyToManyField('crm.Project', related_name='contracts')
start_date = models.DateField()
end_date = models.DateField()
status = models.CharField(choices=CONTRACT_STATUS.items(),
default=STATUS_UPCOMING, max_length=32)
type = models.IntegerField(choices=PROJECT_TYPE.items())
class Meta:
ordering = ('-end_date',)
verbose_name = 'contract'
db_table = 'timepiece_projectcontract' # Using legacy table name.
def __unicode__(self):
return unicode(self.name)
def get_admin_url(self):
return reverse('admin:contracts_projectcontract_change', args=[self.pk])
def get_absolute_url(self):
return reverse('view_contract', args=[self.pk])
@property
def entries(self):
"""
All Entries worked on projects in this contract during the contract
period.
"""
return Entry.objects.filter(project__in=self.projects.all(),
start_time__gte=self.start_date,
end_time__lt=self.end_date + relativedelta(days=1))
def contracted_hours(self, approved_only=True):
"""Compute the hours contracted for this contract.
(This replaces the old `num_hours` field.)
:param boolean approved_only: If true, only include approved
contract hours; if false, include pending ones too.
:returns: The sum of the contracted hours, subject to the
`approved_only` parameter.
:rtype: Decimal
"""
qset = self.contract_hours
if approved_only:
qset = qset.filter(status=ContractHour.APPROVED_STATUS)
result = qset.aggregate(sum=Sum('hours'))['sum']
return result or 0
def pending_hours(self):
"""Compute the contract hours still in pending status"""
qset = self.contract_hours.filter(status=ContractHour.PENDING_STATUS)
result = qset.aggregate(sum=Sum('hours'))['sum']
return result or 0
@property
def hours_assigned(self):
"""Total assigned hours for this contract."""
if not hasattr(self, '_assigned'):
# TODO put this in a .extra w/a subselect
assignments = self.assignments.aggregate(s=Sum('num_hours'))
self._assigned = assignments['s'] or 0
return self._assigned or 0
@property
def hours_remaining(self):
return self.contracted_hours() - self.hours_worked
@property
def hours_worked(self):
"""Number of billable hours worked on the contract."""
if not hasattr(self, '_worked'):
# TODO put this in a .extra w/a subselect
entries = self.entries.filter(activity__billable=True)
self._worked = entries.aggregate(s=Sum('hours'))['s'] or 0
return self._worked or 0
@property
def nonbillable_hours_worked(self):
"""Number of non-billable hours worked on the contract."""
if not hasattr(self, '_nb_worked'):
# TODO put this in a .extra w/a subselect
entries = self.entries.filter(activity__billable=False)
self._nb_worked = entries.aggregate(s=Sum('hours'))['s'] or 0
return self._nb_worked or 0
@property
def fraction_hours(self):
"""Fraction of contracted hours that have been worked. E.g.
if 50 hours have been worked of 100 contracted, value is 0.5.
"""
if self.contracted_hours():
return float(self.hours_worked) / float(self.contracted_hours())
return 0.0
@property
def fraction_schedule(self):
"""If contract status is current, return the current date as a
fraction of the scheduled period - e.g. if the contract period is
June 1 to July 31, and today is July 1, then the value is
about 0.5.
If the contract status is not current, or either the start or end
date is not set, returns 0.0
"""
if self.status != ProjectContract.STATUS_CURRENT or \
not self.start_date or \
not self.end_date:
return 0.0
contract_period = (self.end_date - self.start_date).days
if contract_period <= 0.0:
return 0.0
days_elapsed = (datetime.date.today() - self.start_date).days
if days_elapsed <= 0.0:
return 0.0
return float(days_elapsed) / contract_period
class ContractHour(models.Model):
PENDING_STATUS = 1
APPROVED_STATUS = 2
CONTRACT_HOUR_STATUS = (
(PENDING_STATUS, 'Pending'), # default
(APPROVED_STATUS, 'Approved')
)
hours = models.DecimalField(max_digits=8, decimal_places=2,
default=0)
contract = models.ForeignKey(ProjectContract,
related_name='contract_hours')
date_requested = models.DateField()
date_approved = models.DateField(blank=True, null=True)
status = models.IntegerField(choices=CONTRACT_HOUR_STATUS,
default=PENDING_STATUS)
notes = models.TextField(blank=True)
class Meta(object):
verbose_name = 'contracted hours'
verbose_name_plural = verbose_name
db_table = 'timepiece_contracthour' # Using legacy table name.
def __init__(self, *args, **kwargs):
super(ContractHour, self).__init__(*args, **kwargs)
# Save the current values so we can report changes later
self._original = {
'hours': self.hours,
'notes': self.notes,
'status': self.status,
'get_status_display': self.get_status_display(),
'date_requested': self.date_requested,
'date_approved': self.date_approved,
'contract': self.contract if self.contract_id else None,
}
def get_absolute_url(self):
return reverse('admin:contracts_contracthour_change', args=[self.pk])
def clean(self):
# Note: this is called when editing in the admin, but not otherwise
if self.status == self.PENDING_STATUS and self.date_approved:
raise ValidationError(
"Pending contracthours should not have an approved date, did "
"you mean to change status to approved?"
)
def _send_mail(self, subject, ctx):
# Don't go to the work unless we have a place to send it
emails = utils.get_setting('TIMEPIECE_ACCOUNTING_EMAILS')
if not emails:
return
from_email = utils.get_setting('DEFAULT_FROM_EMAIL')
template = get_template('timepiece/contract/hours_email.txt')
context = Context(ctx)
msg = template.render(context)
send_mail(
subject=subject,
message=msg,
from_email=from_email,
recipient_list=emails
)
def save(self, *args, **kwargs):
# Let the date_approved default to today if it's been set approved
# and doesn't have one
if self.status == self.APPROVED_STATUS and not self.date_approved:
self.date_approved = datetime.date.today()
# If we have an email address to send to, and this record was
# or is in pending status, we'll send an email about the change.
if ContractHour.PENDING_STATUS in (self.status, self._original['status']):
is_new = self.pk is None
super(ContractHour, self).save(*args, **kwargs)
if ContractHour.PENDING_STATUS in (self.status, self._original['status']):
domain = Site.objects.get_current().domain
method = 'https' if utils.get_setting('TIMEPIECE_EMAILS_USE_HTTPS')\
else 'http'
url = self.contract.get_absolute_url()
ctx = {
'new': is_new,
'changed': not is_new,
'deleted': False,
'current': self,
'previous': self._original,
'link': '%s://%s%s' % (method, domain, url)
}
prefix = "New" if is_new else "Changed"
name = self._meta.verbose_name
subject = "%s pending %s for %s" % (prefix, name, self.contract)
self._send_mail(subject, ctx)
def delete(self, *args, **kwargs):
# Note: this gets called when you delete a single item using the red
# Delete button at the bottom while editing it in the admin - but not
# when you delete one or more from the change list using the admin
# action.
super(ContractHour, self).delete(*args, **kwargs)
# If we have an email address to send to, and this record was in
# pending status, we'll send an email about the change.
if ContractHour.PENDING_STATUS in (self.status, self._original['status']):
domain = Site.objects.get_current().domain
method = 'https' if utils.get_setting('TIMEPIECE_EMAILS_USE_HTTPS')\
else 'http'
url = self.contract.get_absolute_url()
ctx = {
'deleted': True,
'new': False,
'changed': False,
'previous': self._original,
'link': '%s://%s%s' % (method, domain, url)
}
contract = self._original['contract']
name = self._meta.verbose_name
subject = "Deleted pending %s for %s" % (name, contract)
self._send_mail(subject, ctx)
class ContractAssignment(models.Model):
contract = models.ForeignKey(ProjectContract, related_name='assignments')
user = models.ForeignKey(User, related_name='assignments')
start_date = models.DateField()
end_date = models.DateField()
num_hours = models.DecimalField(max_digits=8, decimal_places=2, default=0)
min_hours_per_week = models.IntegerField(default=0)
class Meta:
unique_together = (('contract', 'user'),)
db_table = 'timepiece_contractassignment' # Using legacy table name.
def __unicode__(self):
return u'{0} / {1}'.format(self.user, self.contract)
@property
def entries(self):
return Entry.objects.filter(project__in=self.contract.projects.all(),
user=self.user, start_time__gte=self.start_date,
end_time__lt=self.end_date + relativedelta(days=1))
@property
def hours_remaining(self):
return self.num_hours - self.hours_worked
@property
def hours_worked(self):
if not hasattr(self, '_worked'):
self._worked = self.entries.aggregate(s=Sum('hours'))['s'] or 0
return self._worked or 0
class HourGroupManager(models.Manager):
def summaries(self, entries):
#Get the list of bundle names and hour sums
bundled_entries = entries.values('activity__activity_bundle',
'activity__activity_bundle__name')
bundled_entries = bundled_entries.annotate(Sum('hours'))
bundled_entries = bundled_entries.order_by(
'activity__activity_bundle__order',
'activity__activity_bundle__name'
)
bundled_totals = list(bundled_entries.values_list(
'activity__activity_bundle__name',
'activity__activity_bundle',
'hours__sum')
)
#Get the list of activity names and hour sums
activity_entries = entries.values('activity', 'activity__name',
'activity__activity_bundle')
activity_entries = activity_entries.annotate(Sum('hours'))
activity_entries = activity_entries.order_by('activity')
activity_totals = list(activity_entries.values_list(
'activity__name',
'activity__activity_bundle',
'hours__sum')
)
totals = {}
other_values = ()
for bundle in bundled_totals:
bundle_key, bundle_value = bundle[0], bundle[2]
act_values = [(act[0], act[2]) for act in activity_totals
if act[1] == bundle[1]]
if bundle_key is not None:
totals[bundle_key] = (bundle_value, act_values)
else:
other_values = (bundle_value, act_values)
totals = sorted(totals.items())
if other_values:
totals.append(('Other', other_values))
all_totals = sum([bt[2] for bt in bundled_totals])
totals.append(('Total', (all_totals, [])))
return totals
class HourGroup(models.Model):
"""Activities that are bundled together for billing"""
name = models.CharField(max_length=255, unique=True)
activities = models.ManyToManyField('entries.Activity',
related_name='activity_bundle')
order = models.PositiveIntegerField(unique=True, blank=True, null=True)
objects = HourGroupManager()
class Meta:
db_table = 'timepiece_hourgroup' # Using legacy table name.
def __unicode__(self):
return self.name
class EntryGroup(models.Model):
INVOICED = Entry.INVOICED
NOT_INVOICED = Entry.NOT_INVOICED
STATUSES = {
INVOICED: 'Invoiced',
NOT_INVOICED: 'Not Invoiced',
}
user = models.ForeignKey(User, related_name='entry_group')
project = models.ForeignKey('crm.Project', related_name='entry_group')
status = models.CharField(max_length=24, choices=STATUSES.items(),
default=INVOICED)
number = models.CharField("Reference #", max_length=50, blank=True,
null=True)
comments = models.TextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
start = models.DateField(blank=True, null=True)
end = models.DateField()
class Meta:
db_table = 'timepiece_entrygroup' # Using legacy table name.
def delete(self):
self.entries.update(status=Entry.APPROVED)
super(EntryGroup, self).delete()
def __unicode__(self):
invoice_data = {
'number': self.number,
'status': self.status,
'project': self.project,
'end': self.end.strftime('%b %Y'),
}
return u'Entry Group ' + \
u'%(number)s: %(status)s - %(project)s - %(end)s' % invoice_data
|
{
"content_hash": "cef381cbaf84640b8a45c7174267277e",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 82,
"avg_line_length": 39.209476309226936,
"alnum_prop": 0.5953062392673154,
"repo_name": "gaga3966/django-timepiece",
"id": "a9d2bc078b18ce376180b966bef7b4aaf05f316f",
"size": "15723",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "timepiece/contracts/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44702"
},
{
"name": "HTML",
"bytes": "196700"
},
{
"name": "JavaScript",
"bytes": "199553"
},
{
"name": "Python",
"bytes": "551854"
}
],
"symlink_target": ""
}
|
import sys
import imp
import json
import sublime
import sublime_plugin
# Make sure all dependencies are reloaded on upgrade
reloader_path = 'json2apex.helpers.reloader'
if reloader_path in sys.modules:
imp.reload(sys.modules[reloader_path])
from .helpers import reloader
reloader.reload()
from .helpers import JSON2ApexLib
from .helpers import PatternClass
# from . import logger
# log = logger.get(__name__)
class SchemaToApexCommand(sublime_plugin.TextCommand):
apexClassView = {}
classList = []
def run(self, edit):
api_object = self.getContent()
if(api_object is not None):
self.generateCode(edit, api_object)
def getContent(self):
try:
contents = self.view.substr(sublime.Region(0, self.view.size()))
return contents
except ValueError:
sublime.error_message('Invalid JSON')
return None
def generateCode(self, edit, api_object):
pattern = PatternClass.Pattern.fromString('PatternCls', api_object)
gen = pattern.generateCode()
del(pattern)
self.classList = ["PatternCls"]
self.apexClassView = sublime.active_window().new_file()
self.apexClassView.set_syntax_file(
'Packages/MavensMate/sublime/lang/Apex.sublime-syntax')
self.apexClassView.insert(edit, 0, gen)
self.renameClass()
def renameClass(self):
args = {
'classList': self.classList
}
# log.debug(args)
self.apexClassView.run_command('launch_class_renaming', args)
class YamlSchemaToApexCommand(sublime_plugin.TextCommand):
apexClassView = {}
classList = []
def run(self, edit):
api_object = self.getContent()
if(api_object is not None):
self.generateCode(edit, api_object)
def getContent(self):
try:
contents = self.view.substr(sublime.Region(0, self.view.size()))
# api_object = json.loads(contents)
return contents
except ValueError:
sublime.error_message('Invalid JSON')
return None
def generateCode(self, edit, api_object):
pattern = PatternClass.Pattern.fromYaml('PatternCls', api_object)
gen = pattern.generateCode()
del(pattern)
self.classList = ["PatternCls"]
self.apexClassView = sublime.active_window().new_file()
self.apexClassView.set_syntax_file('Packages/MavensMate/sublime/lang/Apex.sublime-syntax')
self.apexClassView.insert(edit, 0, gen)
self.renameClass()
def renameClass(self):
args = {
'classList': self.classList
}
# log.debug(args)
self.apexClassView.run_command('launch_class_renaming', args)
class JsonToApexCommand(sublime_plugin.TextCommand):
apexClassView = {}
classList = []
def run(self, edit):
api_object = self.getContent()
if(api_object is not None):
self.generateCode(edit, api_object)
def getContent(self):
try:
contents = self.view.substr(sublime.Region(0, self.view.size()))
api_object = json.loads(contents)
return api_object
except ValueError:
sublime.error_message('Invalid JSON')
return None
def generateCode(self, edit, api_object):
converter = JSON2ApexLib.SampleConverter()
gen = converter.generateFromSample(api_object)
self.classList = ["API", "Root_object"]
self.classList += list(converter.formedClasses.values())
# log.debug(self.classList)
self.apexClassView = sublime.active_window().new_file()
self.apexClassView.set_syntax_file(
'Packages/MavensMate/sublime/lang/Apex.sublime-syntax')
self.apexClassView.insert(edit, 0, gen)
self.renameClass()
del(converter)
def renameClass(self):
args = {
'classList': self.classList
}
# log.debug(args)
self.apexClassView.run_command('launch_class_renaming', args)
class LaunchClassRenamingCommand(sublime_plugin.TextCommand):
apexView = {}
classList = []
oldClassName = ''
def run(self, edit, classList):
self.apexView = self.view
self.classList = classList
curWin = self.apexView.window()
self.oldClassName = self.classList.pop(0)
matches = self.apexView.find_all(self.oldClassName)
self.apexView.sel().clear()
self.apexView.sel().add_all(matches)
tryout = curWin.show_input_panel(
'Rename ' + self.oldClassName, self.oldClassName, self.rename, None, None)
tryout.sel().add(tryout.visible_region())
def rename(self, newName):
args = {
'oldClassName': self.oldClassName,
'newClassName': newName,
'classList': self.classList
}
self.apexView.run_command('rename_apex_class', args)
class RenameApexClassCommand(sublime_plugin.TextCommand):
def run(self, edit, oldClassName, newClassName, classList):
matches = self.view.find_all(oldClassName)
reg_end = 0
for m in matches:
cur_m = self.view.find(oldClassName, reg_end)
reg_end = m.end()
self.view.replace(edit, cur_m, newClassName)
if(0 < len(classList)):
args = {
'classList': classList
}
self.view.run_command('launch_class_renaming', args)
else:
self.view.sel().add(sublime.Region(0, self.view.size()))
|
{
"content_hash": "6cbc1edf2c8495e981f218e67334d489",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 92,
"avg_line_length": 26.804469273743017,
"alnum_prop": 0.7219674864526886,
"repo_name": "nchursin/json2apex",
"id": "b79588340b983668d800ce5f516f5c2bea185ece",
"size": "4798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "json2apex_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "234273"
}
],
"symlink_target": ""
}
|
import sqlite3 as sql
from error import error
class database: #the database class handles anything with a database connection
def __init__(self):
try:
self.rdb = None
self.rdb = sql.connect('average_check.db')
self.ecx = self.rdb.cursor()
except:
error(1,'sql connect error', True)
def record_time(self, p_id, cam_id, s_id, uuid, time, speed): #Record the time the plate passes with the data associated
try:
self.ecx.execute("INSERT INTO data (p_id, cam_id, s_id, uuid, time, speed) VALUES ('"+str(p_id)+"', '"+str(cam_id)+"', '"+str(s_id)+"','"+str(uuid)+"','"+str(time)+"','"+str(speed)+"');")
self.rdb.commit()#commit is need to properly insert the data
except Exception as e:
error(2,str(e)+' sql record_time() error', True)
def find_site(self, site_id):#Find the s_id and max speed integer from site_id input string
try:
self.ecx.execute("select s_id, s_limit from sites where site_id = '"+str(site_id)+"' limit 1;")
result = self.ecx.fetchone() #fetchone gets any output from sqlite
if result == None: #if no output
error(3,'sql find site() error (none type) - ', True)#error
return result[0], result[1]#else return item 1 then item 2
except Exception as e:
error(3, str(e)+'sql find site() error', True)
def add_plate(self, plate, foreign=False):#To add a p_id or get the existing p_id for a number plate
try:
foreign = str(foreign).upper() #make the bool into a upper string
self.ecx.execute("select (1) from plates where plate = '"+plate+"' limit 1;") #check if plate exsists
if self.ecx.fetchone() == None: #if not, enter plate
self.ecx.execute("INSERT INTO plates (plate, p_foreign) VALUES ('"+plate+"', '"+foreign+"');")
self.rdb.commit()#commit insert
self.ecx.execute("select p_id from plates where plate = '"+plate+"';")#get the p_id
return self.ecx.fetchone()[0]#Return the p_id
except Exception as e:
error(4,str(e)+'sql add_plate() error', True) #should be no error
def get_cam_m(self, cam_id, s_id):#Get the meters along the road the input cam id is
try:
self.ecx.execute("select cam_m from cams where s_id = '"+str(s_id)+"' and cam_id = '"+str(cam_id)+"' limit 1;")
result = self.ecx.fetchone()
if result == None:
error(5,'sql get_cam_m() error (no cam) - '+str(s_id)+' - '+str(cam_id), True)
return float(result[0])
except Exception as e:
error(5,str(e)+'sql get_cam_m() error - '+str(result), True)
def last_cam(self, p_id, s_id):#Find the time and id of the last cam passed by the car
self.ecx.execute("SELECT time, cam_id FROM data where p_id = '"+str(p_id)+"' and s_id = '"+str(s_id)+"' order by d_index DESC limit 1;")
result = self.ecx.fetchone()#fetch from sqlite
if result == None: #
error(8,'get last_cam error - '+str(p_id)+' - '+str(s_id)+' - '+str(result), True)
return False
return result[0], result[1]
def cam_first(self, curr_cam_m, time, p_id, s_id):#Find if the cam passed is the first of this trip
try:
if curr_cam_m == 0: #if first cam on road
return True
self.ecx.execute("SELECT time FROM data where p_id = '"+str(p_id)+"' and s_id = '"+str(s_id)+"' order by d_index DESC limit 1;")
result = self.ecx.fetchone()
if result == None:
#error(9,' sql cam_first() error '+str(curr_cam_m)+' '+str(time)+' '+str(p_id)+' '+str(s_id), False)
return True
return (result[0] < time - 3600)# if older than 1 Hour
except Exception as e:
error(10,str(e)+' sql cam_first() error', True)
def record_first(self, p_id, cam_id, s_id, uuid, time):#Record the time the plate passes with the data associated, only for the first record with no speed
try:
self.ecx.execute("INSERT INTO data (p_id, cam_id, s_id, uuid, time) VALUES ('"+str(p_id)+"', '"+str(cam_id)+"', '"+str(s_id)+"', '"+str(uuid)+"', '"+str(time)+"')")
self.rdb.commit()#commit insert
except Exception as e:
error(11,str(e)+' sql record_first_time() error '+str(p_id)+' - '+str(cam_id)+' - '+str(s_id)+' - '+str(uuid)+' - '+str(time), True)
def return_speeders(self):#Return all data of cars over speed limit.
try:
self.ecx.execute("SELECT * FROM data where speed > 0 order by d_index DESC;")
result = self.ecx.fetchall()
return result
except Exception as e:
error(12,str(e)+' sql record_speeders() error', True)
def return_foreign_speeders(self):#Return all data of foreign cars over speed limit.
try:
self.ecx.execute("select d_index, p.p_id, uuid, s_id, time, cam_id, speed from data as d, plates as p where d.speed > 0 and p_foreign = 'TRUE' and p.p_id=d.p_id;")
result = self.ecx.fetchall()
return result
except Exception as e:
error(13,str(e)+' sql record_speeders() error', True)
def get_cam_id(self, site_cam_id, s_id):#Get the actual cam_id relative to the program rather than the cam_id relative to the site id
try:
self.ecx.execute("SELECT cam_id FROM cams where site_cam_id = '"+str(site_cam_id)+"' and s_id = '"+str(s_id)+"' LIMIT 1;")
result = self.ecx.fetchone()
if result == None:
error(14,' sql get_cam_id() error '+str(site_cam_id)+' '+str(s_id), True)
return None
return result[0]
except Exception as e:
error(14,str(e)+' sql get_cam_id() error', True)
def get_plate(self, p_id):#Get the plate string from the p_id
try:
self.ecx.execute("SELECT plate FROM plates where p_id = '"+str(p_id)+"' LIMIT 1;")
result = self.ecx.fetchone()
if result == None:
error(15,' sql get_plate() error '+str(p_id), False)
return None
return result[0]
except Exception as e:
error(16,str(e)+' sql get_plate() error', True)
def get_site(self, s_id):#Get the site string from the s_id
try:
self.ecx.execute("SELECT site_id, s_limit FROM sites where s_id = '"+str(s_id)+"' LIMIT 1;")
result = self.ecx.fetchone()
if result == None:
error(17,' sql get_site() error '+str(s_id), False)
return None
return result[0], result[1]
except Exception as e:
error(17,str(e)+' sql get_site() error', True)
def get_owner(self, p_id):#Get the owner info string from the p_id
try:
self.ecx.execute("select * from owners where p_id= '"+str(p_id)+"' LIMIT 1;")
result = self.ecx.fetchone()
if result == None:
return None
return result[1], result[2]
except Exception as e:
error(18,str(e)+' sql get_owner() error', True)
|
{
"content_hash": "2b2a1a85879fda0bb170300c58214218",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 193,
"avg_line_length": 48.416058394160586,
"alnum_prop": 0.615257048092869,
"repo_name": "msemple1111/average_alpr",
"id": "a128b877d913509e6424b29026776d59808b4ceb",
"size": "6633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30023"
},
{
"name": "Shell",
"bytes": "185"
}
],
"symlink_target": ""
}
|
from .map_layer_error import MapLayerError
class UnmappedDataSourceError(MapLayerError):
"""description of class"""
|
{
"content_hash": "466d66c91daa388c65545aaf58c37e15",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 30,
"alnum_prop": 0.7916666666666666,
"repo_name": "adamkerz/arcpyext",
"id": "e6373375175092a0aedb22faf7244427edd2bd31",
"size": "120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/arcpyext/exceptions/unmapped_data_source_error.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "90"
},
{
"name": "Python",
"bytes": "59745"
}
],
"symlink_target": ""
}
|
"""Tests for Convolution node name match via the XLA JIT.
The canned results in these tests are created by running each test using the
Tensorflow CPU device and saving the output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import ops
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
class ConvolutionNodeNameTest(xla_test.XLATestCase):
"""Verify convolution node name match.
Verify convolution node names on TPU and CPU match with dilation > 1.
"""
def _verifyNodeNameMatch(self, layer, input_sizes, filter_sizes, strides,
dilations):
def _GetNodeNames(use_xla):
with self.session():
input_tensor = array_ops.placeholder(np.float32, shape=input_sizes)
if use_xla:
with self.test_scope():
# pylint: disable=protected-access
graph = ops.get_default_graph()
graph._set_control_flow_context(
control_flow_ops.XLAControlFlowContext())
# pylint: enable=protected-access
conv2d_op = layer(
filters=64,
kernel_size=filter_sizes,
dilation_rate=dilations,
padding="same")
_ = conv2d_op(input_tensor)
return [n.name for n in ops.get_default_graph().as_graph_def().node]
else:
with ops.device("CPU"):
conv2d_op = layer(
filters=64,
kernel_size=filter_sizes,
dilation_rate=dilations,
padding="same")
_ = conv2d_op(input_tensor)
names = [
n.name for n in ops.get_default_graph().as_graph_def().node
]
# filter out space to depth ops.
return [
name for name in names
if "space" not in name and "Space" not in name
]
xla_names = _GetNodeNames(use_xla=True)
no_xla_names = _GetNodeNames(use_xla=False)
# CPU path creates some additional nodes to handle dilations.
# TODO(b/138804006): Remove this when CPU & GPU support dilations.
filtered_no_xla_names = []
for name in no_xla_names:
if ("dilation_rate" in name or "filter_shape" in name or "stack" in name):
continue
else:
filtered_no_xla_names.append(name)
self.assertListEqual(xla_names, filtered_no_xla_names)
def testConv1DNodeNameMatch(self):
input_sizes = [8, 16, 3]
filter_sizes = [7]
strides = 1
dilations = [2]
layer = layers.Conv1D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
def testConv2DNodeNameMatch(self):
input_sizes = [8, 16, 16, 3]
filter_sizes = [7, 7]
strides = 1
dilations = [2, 2]
layer = layers.Conv2D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
def testConv3DNodeNameMatch(self):
input_sizes = [8, 16, 16, 16, 3]
filter_sizes = [7, 7, 7]
strides = 1
dilations = [2, 2, 2]
layer = layers.Conv3D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "2bbbf98fb8bb93f7fc4fcd7b931460fb",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 32.879629629629626,
"alnum_prop": 0.6088425795550549,
"repo_name": "frreiss/tensorflow-fred",
"id": "e346d9f17cdd9710505ae57c865b2be463130577",
"size": "4240",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/tests/conv_node_name_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
# Copyright 2016-2017 University of Pittsburgh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http:www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, uuid, datetime
# query concept id and concept code for specific vocabulary
def getConceptCodeByVocabId(conn, vocabId):
cur = conn.cursor()
qry = """
select concept_id, concept_code, concept_name from public.concept where vocabulary_id = %s
""" % vocabId
cur.execute(qry)
return cur.fetchall()
# query concept id by concept code and vocabulary id
def getConceptIdByConceptCode(conn, conceptCode, vocabId):
cur = conn.cursor()
qry = """
select * from public.concept where concept_code = '%s' and vocabulary_id = '%s';
""" % (conceptCode, vocabId)
cur.execute(qry)
for row in cur.fetchall():
return row[0]
|
{
"content_hash": "3d89fea7ce64925f2c30f1ca7d9f9bad",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 91,
"avg_line_length": 33.28947368421053,
"alnum_prop": 0.7201581027667984,
"repo_name": "dbmi-pitt/dbmi-annotator",
"id": "3529607fc61b7fa1cdd8a87d9ee89a539b5278d2",
"size": "1265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "translation/mp-evidence-base-ETL/postgres/omopConceptQry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1303624"
},
{
"name": "Dockerfile",
"bytes": "1013"
},
{
"name": "HTML",
"bytes": "6289636"
},
{
"name": "Java",
"bytes": "841717"
},
{
"name": "JavaScript",
"bytes": "3520455"
},
{
"name": "PLpgSQL",
"bytes": "15994"
},
{
"name": "Python",
"bytes": "268290"
},
{
"name": "Shell",
"bytes": "2088"
},
{
"name": "TSQL",
"bytes": "324332"
}
],
"symlink_target": ""
}
|
import shutil
from subprocess import Popen
from tempfile import TemporaryFile
from urllib.parse import urlparse
from pathlib import PurePath
from urllib.parse import unquote
GIT_BIN=shutil.which("git")
# Determine the default directory name used by git for the uri
def get_default_repo_dirname(uri, bare=False):
path = PurePath(urlparse(uri)[2])
name = unquote(path.parts[-1])
if not bare and path.suffix == ".git":
return name[0:-4]
if bare and path.suffix != ".git":
return "{}.git".format(name)
return name
class GitStatus:
def __init__(self, exit_status, stdout, stderr):
self.exit_status = exit_status
self.stdout = stdout
self.stderr = stderr
def has_error(self):
return self.exit_status != 0
def clone(repository, directory=None, cwd=None):
args = [GIT_BIN, "clone", "--", repository]
if directory: args += [directory]
with TemporaryFile() as outf, TemporaryFile() as errf:
popen_args = dict(stdout=outf, stderr=errf)
if cwd:
popen_args["cwd"] = cwd
p = Popen(args, **popen_args)
status = p.wait()
outf.seek(0)
errf.seek(0)
return GitStatus(status, outf.read().decode('utf-8'), errf.read().decode('utf-8'))
return None
def reset(cwd, hard=False, commit=None):
args = [GIT_BIN, "reset"]
if hard: args += ["--hard"]
if commit: args += [commit]
with TemporaryFile() as outf, TemporaryFile() as errf:
p = Popen(args, cwd=cwd, stdout=outf, stderr=errf)
status = p.wait()
outf.seek(0)
errf.seek(0)
return GitStatus(status, outf.read().decode('utf-8'), errf.read().decode('utf-8'))
return None
def pull(cwd):
args = [GIT_BIN, "pull"]
with TemporaryFile() as outf, TemporaryFile() as errf:
p = Popen(args, cwd=cwd, stdout=outf, stderr=errf)
status = p.wait()
outf.seek(0)
errf.seek(0)
return GitStatus(status, outf.read().decode('utf-8'), errf.read().decode('utf-8'))
return None
|
{
"content_hash": "dbc0b76069cc8401217d3685d41dd31c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 90,
"avg_line_length": 27.653333333333332,
"alnum_prop": 0.6142719382835101,
"repo_name": "datamachine/telex",
"id": "c2dd2c6c060ba439d111273f694d0addc2184292",
"size": "2074",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "telex/git/git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "156218"
},
{
"name": "Shell",
"bytes": "624"
}
],
"symlink_target": ""
}
|
""" ymir.mixins.packages
"""
import os
from fabric import api
from ymir import data as ydata
class PackageMixin(object):
""" To be linux-base agnostic, services should NOT call apt or yum directly.
Actually.. services should not be installing packages directly because
one of the CAP languages is used, but, to build puppet on the remote
side we do need to add system packages first.
Pacapt is used as a universal front-end for the backend
package manager. See: https://github.com/icy/pacapt
"""
_require_pacapt_already_run = False
def _require_pacapt(self):
""" installs pacapt (a universal front-end for apt/yum/dpkg)
on the remote server if it does not already exist there
"""
if self._require_pacapt_already_run:
return # optimization hack: let's only run once per process
self.report("checking remote side for pacapt "
"(an OS-agnostic package manager)")
with api.quiet():
remote_missing_pacapt = api.run('ls /usr/bin/pacapt').failed
if remote_missing_pacapt:
self.report(
ydata.FAIL + " pacapt does not exist, installing it now")
local_pacapt_path = os.path.join(
os.path.dirname(ydata.__file__), 'pacapt')
self.put(local_pacapt_path, '/usr/bin', use_sudo=True)
else:
self.report(ydata.SUCCESS + " pacapt is already present")
api.sudo('chmod o+x /usr/bin/pacapt')
self._require_pacapt_already_run = True
def _update_system_packages(self, quiet=True):
""" does not use ansible, hopefully this makes caching easier so
updates are faster. still, this should probably be refactored
or deprecated because it's the only reason pacapt is still needed.
"""
self._require_pacapt()
quiet = '> /dev/null' if quiet else ''
self.report("updating system packages, this might take a while.")
canary = '/tmp/.ymir_package_update'
max_age = 360
age_test = "[[ `date +%s -r {0}` -gt `date +%s --date='{1} min ago'` ]]"
with api.quiet():
need_update = api.sudo(age_test.format(canary, max_age)).failed
if not need_update:
msg = "packages were updated less than {0} minutes ago"
self.report(ydata.SUCCESS + msg.format(max_age))
return True
with api.shell_env(DEBIAN_FRONTEND='noninteractive'):
with api.settings(warn_only=True):
# return code is "100" for centos
result = api.sudo(
'/usr/bin/pacapt --noconfirm -Sy {0}'.format(quiet)).succeeded
api.sudo('touch {0}'.format(canary))
return result
_update_sys_packages = _update_system_packages
def _install_system_package(self, pkg_name, quiet=False):
""" """
with api.settings(warn_only=True):
success = self._provision_apt(pkg_name)
if not success:
success = self._provision_yum(pkg_name)
return success
def _pkg_provisioner(self, pkg_name, ansible_module_name, state='present'):
""" """
cmd = '--become --module-name {0} -a "name={1} state={2}"'
cmd = cmd.format(
ansible_module_name, pkg_name, state)
with api.settings(warn_only=True):
return self._provision_ansible(cmd)
def _pkgs_provision(self, pkg_names, ansible_module_name, state='present'):
pkg_names = pkg_names.split(',')
results = []
for pkg in pkg_names:
results.append(
self._pkg_provisioner(pkg, ansible_module_name, state=state))
return all(results)
def _provision_yum(self, pkg_names):
""" """
return self._pkg_provisioner(pkg_names, 'yum')
def _provision_apt(self, pkg_names):
""" """
return self._pkg_provisioner(pkg_names, 'apt')
def _remove_system_package(self, pkg_name):
""" """
success = self._pkg_provisioner(pkg_name, "apt", state="absent")
if not success:
self._pkg_provisioner(pkg_name, "yum", state="absent")
|
{
"content_hash": "7ae284e9cee25aef519d5a783c8af307",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 82,
"avg_line_length": 40.333333333333336,
"alnum_prop": 0.5903187721369539,
"repo_name": "mattvonrocketstein/ymir",
"id": "98756ad5bcc8721c53a926850bded2d470e3409b",
"size": "4259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ymir/mixins/packages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "330"
},
{
"name": "Puppet",
"bytes": "3363"
},
{
"name": "Python",
"bytes": "177403"
},
{
"name": "Ruby",
"bytes": "9827"
},
{
"name": "Shell",
"bytes": "31589"
}
],
"symlink_target": ""
}
|
"""
Types to use in messaging stuff.
"""
from typing import TypeVar, Dict, List
Transferrable = TypeVar("Transferrable", Dict, List, str)
Serialized = TypeVar("Serialized", str, bytes)
|
{
"content_hash": "fbd79c34d69af0b3dc4d4abd585e8c47",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 18.9,
"alnum_prop": 0.7195767195767195,
"repo_name": "anti1869/sunhead",
"id": "4a3117dcef23ac7db11954c36d900c94ed4634dc",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sunhead/events/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "62290"
}
],
"symlink_target": ""
}
|
from tempest.api.network import base
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class SubnetPoolsTestJSON(base.BaseNetworkTest):
"""Tests the following operations in the subnetpools API:
Create a subnet pool.
Update a subnet pool.
Delete a subnet pool.
Lists subnet pool.
Show subnet pool details.
v2.0 of the Neutron API is assumed. It is assumed that subnet_allocation
options mentioned in the [network-feature-enabled] section and
default_network option mentioned in the [network] section of
etc/tempest.conf:
"""
@classmethod
def skip_checks(cls):
super(SubnetPoolsTestJSON, cls).skip_checks()
if not utils.is_extension_enabled('subnet_allocation', 'network'):
msg = "subnet_allocation extension not enabled."
raise cls.skipException(msg)
@decorators.attr(type='smoke')
@decorators.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e9811')
def test_create_list_show_update_delete_subnetpools(self):
subnetpool_name = data_utils.rand_name('subnetpools')
# create subnet pool
prefix = CONF.network.default_network
body = self.subnetpools_client.create_subnetpool(name=subnetpool_name,
prefixes=prefix)
subnetpool_id = body["subnetpool"]["id"]
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.subnetpools_client.delete_subnetpool,
subnetpool_id)
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
# get detail about subnet pool
body = self.subnetpools_client.show_subnetpool(subnetpool_id)
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
# update the subnet pool
subnetpool_name = data_utils.rand_name('subnetpools_update')
body = self.subnetpools_client.update_subnetpool(subnetpool_id,
name=subnetpool_name)
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
# delete subnet pool
body = self.subnetpools_client.delete_subnetpool(subnetpool_id)
self.assertRaises(lib_exc.NotFound,
self.subnetpools_client.show_subnetpool,
subnetpool_id)
|
{
"content_hash": "1cc8541aeac3363668876ba427d45e26",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 42.983333333333334,
"alnum_prop": 0.6521907716169058,
"repo_name": "Juniper/tempest",
"id": "bfc26098ea1dd4969cef3d9a5adfbe58b05f2ee7",
"size": "3174",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/api/network/test_subnetpools_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4194970"
},
{
"name": "Shell",
"bytes": "19343"
}
],
"symlink_target": ""
}
|
'''config.py: util functions for config, mainly for heron-cli'''
import argparse
import contextlib
import getpass
import os
import sys
import subprocess
import tarfile
import tempfile
import yaml
from heron.common.src.python.utils.log import Log
# pylint: disable=logging-not-lazy
# default environ tag, if not provided
ENVIRON = "default"
# directories for heron distribution
BIN_DIR = "bin"
CONF_DIR = "conf"
ETC_DIR = "etc"
LIB_DIR = "lib"
CLI_DIR = ".heron"
RELEASE_YAML = "release.yaml"
ZIPPED_RELEASE_YAML = "scripts/packages/release.yaml"
OVERRIDE_YAML = "override.yaml"
# directories for heron sandbox
SANDBOX_CONF_DIR = "./heron-conf"
# config file for heron cli
CLIENT_YAML = "client.yaml"
# cli configs for role and env
IS_ROLE_REQUIRED = "heron.config.is.role.required"
IS_ENV_REQUIRED = "heron.config.is.env.required"
def create_tar(tar_filename, files, config_dir, config_files):
'''
Create a tar file with a given set of files
'''
with contextlib.closing(tarfile.open(tar_filename, 'w:gz', dereference=True)) as tar:
for filename in files:
if os.path.isfile(filename):
tar.add(filename, arcname=os.path.basename(filename))
else:
raise Exception("%s is not an existing file" % filename)
if os.path.isdir(config_dir):
tar.add(config_dir, arcname=get_heron_sandbox_conf_dir())
else:
raise Exception("%s is not an existing directory" % config_dir)
for filename in config_files:
if os.path.isfile(filename):
arcfile = os.path.join(get_heron_sandbox_conf_dir(), os.path.basename(filename))
tar.add(filename, arcname=arcfile)
else:
raise Exception("%s is not an existing file" % filename)
def get_subparser(parser, command):
'''
Retrieve the given subparser from parser
'''
# pylint: disable=protected-access
subparsers_actions = [action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers
for choice, subparser in subparsers_action.choices.items():
if choice == command:
return subparser
return None
def cygpath(x):
'''
normalized class path on cygwin
'''
command = ['cygpath', '-wp', x]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
result = p.communicate()
output = result[0]
lines = output.split("\n")
return lines[0]
def identity(x):
'''
identity function
'''
return x
def normalized_class_path(x):
'''
normalize path
'''
if sys.platform == 'cygwin':
return cygpath(x)
return identity(x)
def get_classpath(jars):
'''
Get the normalized class path of all jars
'''
return ':'.join(map(normalized_class_path, jars))
def get_heron_dir():
"""
This will extract heron directory from .pex file.
For example,
when __file__ is '/Users/heron-user/bin/heron/heron/tools/common/src/python/utils/config.pyc', and
its real path is '/Users/heron-user/.heron/bin/heron/tools/common/src/python/utils/config.pyc',
the internal variable ``path`` would be '/Users/heron-user/.heron', which is the heron directory
This means the variable `go_above_dirs` below is 9.
:return: root location of the .pex file
"""
go_above_dirs = 9
path = "/".join(os.path.realpath(__file__).split('/')[:-go_above_dirs])
return normalized_class_path(path)
def get_zipped_heron_dir():
"""
This will extract heron directory from .pex file,
with `zip_safe = False' Bazel flag added when building this .pex file
For example,
when __file__'s real path is
'/Users/heron-user/.pex/code/xxxyyy/heron/tools/common/src/python/utils/config.pyc', and
the internal variable ``path`` would be '/Users/heron-user/.pex/code/xxxyyy/',
which is the root PEX directory
This means the variable `go_above_dirs` below is 7.
:return: root location of the .pex file.
"""
go_above_dirs = 7
path = "/".join(os.path.realpath(__file__).split('/')[:-go_above_dirs])
return normalized_class_path(path)
################################################################################
# Get the root of heron dir and various sub directories depending on platform
################################################################################
def get_heron_bin_dir():
"""
This will provide heron bin directory from .pex file.
:return: absolute path of heron lib directory
"""
bin_path = os.path.join(get_heron_dir(), BIN_DIR)
return bin_path
def get_heron_conf_dir():
"""
This will provide heron conf directory from .pex file.
:return: absolute path of heron conf directory
"""
conf_path = os.path.join(get_heron_dir(), CONF_DIR)
return conf_path
def get_heron_lib_dir():
"""
This will provide heron lib directory from .pex file.
:return: absolute path of heron lib directory
"""
lib_path = os.path.join(get_heron_dir(), LIB_DIR)
return lib_path
def get_heron_release_file():
"""
This will provide the path to heron release.yaml file
:return: absolute path of heron release.yaml file
"""
return os.path.join(get_heron_dir(), RELEASE_YAML)
def get_zipped_heron_release_file():
"""
This will provide the path to heron release.yaml file.
To be used for .pex file built with `zip_safe = False` flag.
For example, `heron-ui'.
:return: absolute path of heron release.yaml file
"""
return os.path.join(get_zipped_heron_dir(), ZIPPED_RELEASE_YAML)
def get_heron_cluster_conf_dir(cluster, default_config_path):
"""
This will provide heron cluster config directory, if config path is default
:return: absolute path of heron cluster conf directory
"""
return os.path.join(default_config_path, cluster)
def get_heron_sandbox_conf_dir():
"""
This will provide heron conf directory in the sandbox
:return: relative path of heron sandbox conf directory
"""
return SANDBOX_CONF_DIR
def get_heron_libs(local_jars):
"""Get all the heron lib jars with the absolute paths"""
heron_lib_dir = get_heron_lib_dir()
heron_libs = [os.path.join(heron_lib_dir, f) for f in local_jars]
return heron_libs
def get_heron_cluster(cluster_role_env):
"""Get the cluster to which topology is submitted"""
return cluster_role_env.split('/')[0]
# pylint: disable=too-many-branches
def parse_cluster_role_env(cluster_role_env, config_path):
"""Parse cluster/[role]/[environ], supply default, if not provided, not required"""
parts = cluster_role_env.split('/')[:3]
Log.info("Using config file under %s" % config_path)
if not os.path.isdir(config_path):
Log.error("Config path cluster directory does not exist: %s" % config_path)
raise Exception("Invalid config path")
# if cluster/role/env is not completely provided, check further
if len(parts) < 3:
cli_conf_file = os.path.join(config_path, CLIENT_YAML)
# if client conf doesn't exist, use default value
if not os.path.isfile(cli_conf_file):
if len(parts) == 1:
parts.append(getpass.getuser())
if len(parts) == 2:
parts.append(ENVIRON)
else:
cli_confs = {}
with open(cli_conf_file, 'r') as conf_file:
tmp_confs = yaml.load(conf_file)
# the return value of yaml.load can be None if conf_file is an empty file
if tmp_confs is not None:
cli_confs = tmp_confs
else:
print "Failed to read: %s due to it is empty" % (CLIENT_YAML)
# if role is required but not provided, raise exception
if len(parts) == 1:
if (IS_ROLE_REQUIRED in cli_confs) and (cli_confs[IS_ROLE_REQUIRED] is True):
raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, IS_ROLE_REQUIRED, CLIENT_YAML))
else:
parts.append(getpass.getuser())
# if environ is required but not provided, raise exception
if len(parts) == 2:
if (IS_ENV_REQUIRED in cli_confs) and (cli_confs[IS_ENV_REQUIRED] is True):
raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, IS_ENV_REQUIRED, CLIENT_YAML))
else:
parts.append(ENVIRON)
# if cluster or role or environ is empty, print
if len(parts[0]) == 0 or len(parts[1]) == 0 or len(parts[2]) == 0:
print "Failed to parse"
sys.exit(1)
return (parts[0], parts[1], parts[2])
################################################################################
# Parse the command line for overriding the defaults
################################################################################
def parse_override_config(namespace):
"""Parse the command line for overriding the defaults"""
try:
tmp_dir = tempfile.mkdtemp()
override_config_file = os.path.join(tmp_dir, OVERRIDE_YAML)
with open(override_config_file, 'w') as f:
for config in namespace:
f.write("%s\n" % config.replace('=', ': '))
return override_config_file
except Exception as e:
raise Exception("Failed to parse override config: %s" % str(e))
def get_java_path():
"""Get the path of java executable"""
java_home = os.environ.get("JAVA_HOME")
return os.path.join(java_home, BIN_DIR, "java")
def check_java_home_set():
"""Check if the java home set"""
# check if environ variable is set
if "JAVA_HOME" not in os.environ:
Log.error("JAVA_HOME not set")
return False
# check if the value set is correct
java_path = get_java_path()
if os.path.isfile(java_path) and os.access(java_path, os.X_OK):
return True
Log.error("JAVA_HOME/bin/java either does not exist or not an executable")
return False
def check_release_file_exists():
"""Check if the release.yaml file exists"""
release_file = get_heron_release_file()
# if the file does not exist and is not a file
if not os.path.isfile(release_file):
Log.error("Required file not found: %s" % release_file)
return False
return True
def print_build_info(zipped_pex=False):
"""Print build_info from release.yaml
:param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.
"""
if zipped_pex:
release_file = get_zipped_heron_release_file()
else:
release_file = get_heron_release_file()
with open(release_file) as release_info:
for line in release_info:
print line,
def get_version_number(zipped_pex=False):
"""Print version from release.yaml
:param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.
"""
if zipped_pex:
release_file = get_zipped_heron_release_file()
else:
release_file = get_heron_release_file()
with open(release_file) as release_info:
for line in release_info:
trunks = line[:-1].split(' ')
if trunks[0] == 'heron.build.version':
return trunks[-1].replace("'", "")
return 'unknown'
def insert_bool(param, command_args):
'''
:param param:
:param command_args:
:return:
'''
index = 0
found = False
for lelem in command_args:
if lelem == '--' and not found:
break
if lelem == param:
found = True
break
index = index + 1
if found:
command_args.insert(index + 1, 'True')
return command_args
def insert_bool_values(command_line_args):
'''
:param command_line_args:
:return:
'''
args1 = insert_bool('--verbose', command_line_args)
args2 = insert_bool('--deploy-deactivated', args1)
return args2
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _format_action(self, action):
# pylint: disable=bad-super-call
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
|
{
"content_hash": "cb02ba671c109ee0cc5e700b6f9527e2",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 100,
"avg_line_length": 29.60847880299252,
"alnum_prop": 0.6540048850332688,
"repo_name": "objmagic/heron",
"id": "900b2422a8af8bd6a4c7f6ad0a5f7737294f9b2c",
"size": "12465",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heron/tools/common/src/python/utils/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5723"
},
{
"name": "C++",
"bytes": "1055110"
},
{
"name": "CSS",
"bytes": "106404"
},
{
"name": "HTML",
"bytes": "153790"
},
{
"name": "Java",
"bytes": "3130416"
},
{
"name": "JavaScript",
"bytes": "165751"
},
{
"name": "M4",
"bytes": "17941"
},
{
"name": "Makefile",
"bytes": "498"
},
{
"name": "Objective-C",
"bytes": "1929"
},
{
"name": "Perl",
"bytes": "9085"
},
{
"name": "Protocol Buffer",
"bytes": "21678"
},
{
"name": "Python",
"bytes": "1203088"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "4640"
},
{
"name": "Shell",
"bytes": "131874"
},
{
"name": "Thrift",
"bytes": "915"
}
],
"symlink_target": ""
}
|
from django.db import models
from dockit.schema.loading import get_base_document
import datetime
from decimal import Decimal
from dockit.backends.djangodocument.managers import BaseIndexManager, DocumentManager, RegisteredIndexManager
class DocumentStore(models.Model):
collection = models.CharField(max_length=128)
data = models.TextField()
objects = DocumentManager()
def clear_indexes(self):
for index in type(self).objects.index_models.itervalues():
index['model'].objects.clear_db_index(self)
class RegisteredIndex(models.Model):
name = models.CharField(max_length=128, db_index=True)
collection = models.CharField(max_length=128, db_index=True)
query_hash = models.CharField(max_length=128)
objects = RegisteredIndexManager()
def get_document(self):
return get_base_document(self.collection)
class Meta:
unique_together = [('name', 'collection')]
class RegisteredIndexDocument(models.Model):
index = models.ForeignKey(RegisteredIndex, related_name='documents')
doc_id = models.CharField(max_length=128, db_index=True)
data = models.TextField(blank=True) #optionally store a copy of the document for retrieval
timestamp = models.DateTimeField(auto_now=True)
class BaseIndex(models.Model):
document = models.ForeignKey(RegisteredIndexDocument)
param_name = models.CharField(max_length=128, db_index=True)
objects = BaseIndexManager()
class Meta:
abstract = True
class IntegerIndex(BaseIndex):
value = models.IntegerField(null=True)
RegisteredIndex.objects.register_index_model('int', IntegerIndex, int)
class LongIndex(BaseIndex):
value = models.BigIntegerField(null=True)
RegisteredIndex.objects.register_index_model('long', LongIndex, long)
class BooleanIndex(BaseIndex):
value = models.NullBooleanField()
RegisteredIndex.objects.register_index_model('bool', BooleanIndex, bool)
class StringIndex(BaseIndex):
value = models.CharField(max_length=512, null=True)
RegisteredIndex.objects.register_index_model('char', StringIndex, (basestring, type(None)))
class TextIndex(BaseIndex):
value = models.TextField(null=True)
RegisteredIndex.objects.register_index_model('text', TextIndex, basestring)
class DateTimeIndex(BaseIndex):
value = models.DateTimeField(null=True)
RegisteredIndex.objects.register_index_model('datetime', DateTimeIndex, datetime.datetime)
class DateIndex(BaseIndex):
value = models.DateField(null=True)
RegisteredIndex.objects.register_index_model('date', DateIndex, datetime.date)
class FloatIndex(BaseIndex):
value = models.FloatField(null=True)
RegisteredIndex.objects.register_index_model('float', FloatIndex, float)
class TimeIndex(BaseIndex):
value = models.TimeField(null=True)
RegisteredIndex.objects.register_index_model('time', TimeIndex, datetime.time)
class DecimalIndex(BaseIndex):
value = models.DecimalField(max_digits=19, decimal_places=10, null=True)
RegisteredIndex.objects.register_index_model('decimal', DecimalIndex, Decimal)
|
{
"content_hash": "cd0c90f6154668b1ffc66da1bed8f331",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 109,
"avg_line_length": 35.39080459770115,
"alnum_prop": 0.7544657356284508,
"repo_name": "zbyte64/django-dockit",
"id": "cf3482f69f0f542bdb746f420bf23674a6bc03ba",
"size": "3079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dockit/backends/djangodocument/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2494"
},
{
"name": "Python",
"bytes": "428384"
}
],
"symlink_target": ""
}
|
"""Various classes representing distributed values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import weakref
import six
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
def _devices_match(d1, d2):
return device_util.canonicalize(d1) == device_util.canonicalize(d2)
class DeviceMap(object):
"""A mapping of replicas & logical device ids to devices."""
@property
def all_devices(self):
"""Returns a tuple of strings with all devices in this DeviceMap."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def devices_by_replica(self):
"""Returns a tuple `t` where `t[replica]` is the devices for `replica`."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def num_logical_devices(self):
"""Count of the number of devices each replica may be defined across."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def num_replicas_in_graph(self):
"""Number of replicas defined in this graph."""
raise NotImplementedError("Required for DeviceMap implementations.")
def logical_device_from_values(self, values):
"""Returns the logical device index `values` is on."""
raise NotImplementedError("Required for DeviceMap implementations.")
def logical_to_actual_devices(self, logical_device_id):
"""Returns sequence of `num_replicas_in_graph` devices."""
raise NotImplementedError("Required for DeviceMap implementations.")
def select_for_current_replica(self, values, replica_context):
"""Select the element of `values` for the current replica."""
raise NotImplementedError("Required for DeviceMap implementations.")
def replica_for_device(self, device):
"""Return the replica id containing `device`."""
raise NotImplementedError("Required for DeviceMap implementations.")
def select_for_device(self, values, device):
"""Select the element of `values` to access from `device`."""
raise NotImplementedError("Required for DeviceMap implementations.")
def is_device_in_replica(self, device, replica_id):
"""Returns whether `device` is a member of replica `replica_id`."""
raise NotImplementedError("Required for DeviceMap implementations.")
class SingleDeviceMap(DeviceMap):
"""A device map for 1 non-computation device.
Use `SingleDeviceMap` when the device does not correspond to some replica of
the computation. For computation devices, use `ReplicaDeviceMap` below (even
if there is only a single device in the map).
"""
def __init__(self, device):
"""Initialize a `SingleDeviceMap`.
Args:
device: A string device.
"""
assert isinstance(device, six.string_types)
self._device = device_util.canonicalize(device)
self._devices = (self._device,)
@property
def all_devices(self):
return self._devices
@property
def devices_by_replica(self):
raise ValueError("SingleDeviceMap not indexed by replicas")
@property
def num_logical_devices(self):
return 1
@property
def num_replicas_in_graph(self):
return 1
def logical_device_from_values(self, values):
del values
return 0
def logical_to_actual_devices(self, logical_device_id):
assert logical_device_id == 0
return self._devices
def select_for_current_replica(self, values, replica_context):
assert len(values) == 1
del replica_context
return values[0]
def replica_for_device(self, device):
raise ValueError("SingleDeviceMap not indexed by replicas")
def select_for_device(self, values, device):
assert len(values) == 1
if self._device != device:
raise ValueError("Device %s not found in %s (current device %s)" %
(device, self._devices, device_util.current()))
return values[0]
def is_device_in_replica(self, device, replica_id):
raise ValueError("SingleDeviceMap not indexed by replicas")
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._device)
class ReplicaDeviceMap(DeviceMap):
"""A device map for 1 device per replica."""
def __init__(self, devices):
"""Initialize a `ReplicaDeviceMap`.
Args:
devices: `devices[i]` is the string device for replica `i`.
"""
self._devices = tuple(device_util.canonicalize(d) for d in devices)
if len(set(self._devices)) != len(self._devices):
raise ValueError("Duplicate devices in %s, after canonicalization: %s" %
(devices, self._devices))
self._device_to_replica = {d: r for r, d in enumerate(self._devices)}
@property
def all_devices(self):
return self._devices
@property
def devices_by_replica(self):
return ((d,) for d in self._devices)
@property
def num_logical_devices(self):
return 1
@property
def num_replicas_in_graph(self):
return len(self._devices)
def logical_device_from_values(self, values):
del values
return 0
def logical_to_actual_devices(self, logical_device_id):
assert logical_device_id == 0
return self._devices
def select_for_current_replica(self, values, replica_context):
assert len(values) == len(self._devices)
replica_id = replica_context.replica_id_in_sync_group
if not isinstance(replica_id, int):
replica_id = tensor_util.constant_value(replica_id)
return values[replica_id]
def replica_for_device(self, device):
return self._device_to_replica.get(device)
def select_for_device(self, values, device):
assert len(values) == len(self._devices)
replica_id = self._device_to_replica.get(device)
if replica_id is None:
raise ValueError("Device %s not found in %s (current device %s)" %
(device, self._devices, device_util.current()))
return values[replica_id]
def is_device_in_replica(self, device, replica_id):
return _devices_match(device, self._devices[replica_id])
def __str__(self):
return "[%s]" % (", ".join(self._devices))
def __repr__(self):
return "%s([%s])" % (self.__class__.__name__,
", ".join(repr(d) for d in self._devices))
LogicalDeviceSpec = collections.namedtuple(
"LogicalDeviceSpec", ("device_map", "logical_device"))
class DistributedValues(object):
"""Holds a map from device to values. Either PerReplica or Mirrored."""
def __init__(self, device_map, values, logical_device=None):
assert isinstance(device_map, DeviceMap)
self._device_map = device_map
self._values = tuple(values)
if logical_device is None:
logical_device = device_map.logical_device_from_values(self._values)
self._logical_device = logical_device
# TODO(josh11b): Split this into two functions, one with device, one without.
def get(self, device=None):
"""Returns the value for the current device or raises a ValueError."""
if device is None:
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
return self._device_map.select_for_current_replica(
self._values, replica_context)
else:
device = distribute_lib.get_update_device()
if device is None:
return self._get_cross_replica()
device = device_util.canonicalize(device)
return self._device_map.select_for_device(self._values, device)
@property
def primary(self):
"""Returns a representative component."""
return self._values[0]
@property
def devices(self):
return self._device_map.logical_to_actual_devices(self._logical_device)
@property
def logical_device(self):
return self._logical_device
@property
def device_map(self):
return self._device_map
# TODO(josh11b): Replace unwrap with this?
@property
def values(self):
return self._values
@property
def is_tensor_like(self):
for v in self._values:
if not tensor_util.is_tensor(v):
return False
return True
def __str__(self):
devices = self.devices
assert len(self._values) == len(devices)
debug_str = ",\n".join(" %d %s: %s" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
devices = self.devices
assert len(self._values) == len(devices)
debug_repr = ",\n".join(" %d %s: %r" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
# NOTE(josh11b,apassos): It would be great if we could inspect the values this was
# initialized with and use that to generate the overloaded operators here.
# Unfortunately, Python's rules for special methods don't allow this, see
# https://docs.python.org/3/reference/datamodel.html#special-method-names
# "if a class defines a method named __getitem__(), and x is an instance of
# this class, then x[i] is roughly equivalent to type(x).__getitem__(x, i)."
# In particular, these special methods don't go through __getattr__, and
# it will only use those methods if they are defined in the class, not the
# object.
class DistributedDelegate(DistributedValues):
"""A map from device to values; acts as the same type as the values."""
def __getattr__(self, name):
# TODO(priyag): This needs to be made robust against pitfalls from mix use
# __getattr__ and @property. See b/120402273.
return getattr(self.get(), name)
# pylint: disable=multiple-statements
def __add__(self, o): return self.get() + o
def __radd__(self, o): return o + self.get()
def __sub__(self, o): return self.get() - o
def __rsub__(self, o): return o - self.get()
def __mul__(self, o): return self.get() * o
def __rmul__(self, o): return o * self.get()
def __truediv__(self, o): return self.get() / o
def __rtruediv__(self, o): return o / self.get()
def __floordiv__(self, o):
return self.get() // o
def __rfloordiv__(self, o): return o // self.get()
def __mod__(self, o): return self.get() % o
def __rmod__(self, o): return o % self.get()
def __lt__(self, o): return self.get() < o
def __le__(self, o): return self.get() <= o
def __gt__(self, o): return self.get() > o
def __ge__(self, o): return self.get() >= o
def __and__(self, o): return self.get() & o
def __rand__(self, o): return o & self.get()
def __or__(self, o): return self.get() | o
def __ror__(self, o): return o | self.get()
def __xor__(self, o): return self.get() ^ o
def __rxor__(self, o): return o ^ self.get()
def __getitem__(self, o): return self.get()[o]
def __pow__(self, o, modulo=None): return pow(self.get(), o, modulo)
def __rpow__(self, o): return pow(o, self.get())
def __invert__(self): return ~self.get()
def __neg__(self): return -self.get()
def __abs__(self): return abs(self.get())
def __div__(self, o):
try:
return self.get().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self.get().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self.get().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self.get().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# TODO(josh11b): Even more operator overloads.
class PerReplica(DistributedValues):
"""Holds a map from device to unsynchronized values."""
pass
# Note that unlike PerReplica, Mirrored values inherit from
# DistributedDelegate and so can be used directly in cross-replica mode.
class Mirrored(DistributedDelegate):
"""Holds a map from device to values which are kept in sync."""
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return self.primary
return self._values[replica_id]
def _as_graph_element(self):
obj = self.get()
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return obj
def _assign_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign(array_ops.identity(tensor))
def _assert_strategy(strategy):
if not distribution_strategy_context.has_strategy():
raise RuntimeError(
'Need to be inside "with strategy.scope()" for %s' %
(strategy,))
current_strategy = distribution_strategy_context.get_strategy()
if current_strategy is not strategy:
raise RuntimeError(
"Mixing different tf.distribute.Strategy objects: %s is not %s" %
(current_strategy, strategy))
DistributedVarOp = collections.namedtuple(
"DistributedVarOp", ["name", "graph", "type"])
class DistributedVariable(DistributedDelegate):
"""Holds a map from device to variables."""
# TODO(josh11b): Support changing the set of variables if e.g. if new
# devices are joining or a device is to leave.
def __init__(self, strategy, device_map, values, logical_device=None):
self._distribute_strategy = strategy
super(DistributedVariable, self).__init__(
device_map, values, logical_device=logical_device)
self._common_name = self.primary.name.split(":")[0]
# Use a weakref to make it easy to map from the contained values
# to the container without introducing a reference cycle.
for v in values:
v._distributed_container = weakref.ref(self) # pylint: disable=protected-access
# tf.keras keeps track of variables initialized using this attribute. When
# tf.keras gets the default session, it initializes all uninitialized vars.
# We need to make _keras_initialized a member of DistributedVariable because
# without this it will use `__getattr__` which will delegate to a component
# variable.
self._keras_initialized = False
# Typically, a `DistributedVariable`'s initializer is composed of the
# initializers of the components variables. However, in some cases, such as
# when restoring from a checkpoint, we may set the _initializer_op
# property on the entire `DistributedVariable`.
self._initializer_op = None
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
result = self.primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(result, self._values[-1].is_initialized(),
name=name)
return result
@property
def initializer(self):
if self._initializer_op:
init_op = self._initializer_op
else:
# return grouped ops of all the var initializations of component values of
# the mirrored variable
init_op = control_flow_ops.group(tuple(
v.initializer for v in self._values))
return init_op
def _get_closest(self):
"""Return member in the same replica if possible, else the primary."""
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
return self._device_map.select_for_current_replica(
self._values, replica_context)
device = distribute_lib.get_update_device()
if device is None:
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return self.primary
return self._values[replica_id]
def initialized_value(self):
return self._get_closest().initialized_value()
@property
def initial_value(self):
return self._get_closest().initial_value
@property
def graph(self):
return self.primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self.primary._unique_id # pylint: disable=protected-access
@property
def _graph_key(self):
"""Lets Optimizers know which graph this variable is from."""
return self.primary._graph_key # pylint: disable=protected-access
@property
def name(self):
return self.primary.name
@property
def dtype(self):
return self.primary.dtype
@property
def shape(self):
return self.primary.shape
@property
def distribute_strategy(self):
return self._distribute_strategy
def get_shape(self):
return self.primary.get_shape()
def to_proto(self, export_scope=None):
return self.primary.to_proto(export_scope=export_scope)
@property
def op(self):
# We want cross-replica code that does some var.op.X calls
# to work (even if the current device isn't in self.devices), but
# other uses of var.op in a cross-replica context to fail.
if distribution_strategy_context.in_cross_replica_context():
return DistributedVarOp(self.primary.op.name,
self.primary.op.graph,
self.primary.op.type)
return self.get().op
@property
def _in_graph_mode(self):
return self.primary._in_graph_mode # pylint: disable=protected-access
def read_value(self):
return self._distribute_strategy.extended.read_var(self)
def value(self):
return self._get_closest().value()
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
ops.register_dense_tensor_like_type(DistributedVariable)
def _validate_colocate_extended(v, extended):
variable_strategy = v._distribute_strategy # pylint: disable=protected-access
if variable_strategy.extended is not extended:
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not %s created in scope: %s" %
(v, variable_strategy))
def validate_colocate_distributed_variable(v, extended):
if not isinstance(v, DistributedVariable):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def validate_colocate_tpu_variable(v, extended):
if not isinstance(v, TPUMirroredVariable):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def validate_colocate(v, extended):
if not hasattr(v, "_distribute_strategy"):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def _apply_aggregation(strategy, value, aggregation, destinations):
if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return strategy.extended.broadcast_to(strategy.unwrap(value)[0],
destinations=destinations)
reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation)
return strategy.extended.reduce_to(reduce_op, value, destinations)
class _MirroredSaveable(saver.BaseSaverBuilder.ResourceVariableSaveable):
"""Class for defining how to restore a MirroredVariable."""
def __init__(self, mirrored_variable, primary_variable, name):
self._mirrored_variable = mirrored_variable
super(_MirroredSaveable, self).__init__(primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return control_flow_ops.group(tuple(
_assign_on_device(v.device, v, tensor)
for v in self._mirrored_variable.values))
class MirroredVariable(DistributedVariable, Mirrored,
trackable.Trackable):
"""Holds a map from device to variables whose values are kept in sync."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
super(MirroredVariable, self).__init__(
strategy, device_map, values, logical_device=logical_device)
self._aggregation = aggregation
# The arguments to update() are automatically unwrapped so the update()
# function would normally see regular variables, not MirroredVariables.
# However, the update function can still operate on wrapped MirroredVariables
# through object members, captured arguments, etc. This is more likely in an
# update_non_slot() function (like OptimizerV2._finish), which can
# update several non-slot variables in one call.
def _assign_func(self, *args, **kwargs):
_assert_strategy(self._distribute_strategy)
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
update_device = distribute_lib.get_update_device()
if update_device is not None:
# We are calling an assign function on the mirrored variable in an
# update context.
v = self.get(device=update_device)
return f(v, *args, **kwargs)
# We are calling assign on the mirrored variable in cross replica context,
# use `strategy.extended.update()` to update the variable.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
_assert_replica_context(self._distribute_strategy)
# We are calling an assign function on the mirrored variable in replica
# context.
# We reduce the value we want to assign/add/sub. More details about how we
# handle the different use cases can be found in the _reduce method.
# We call the function on each of the mirrored variables with the reduced
# value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError("You must specify an aggregation method to update a "
"MirroredVariable in Replica Context.")
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def aggregation(self):
return self._aggregation
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return array_ops.identity(self.primary)
return array_ops.identity(self._values[replica_id])
def _as_graph_element(self):
# pylint: disable=protected-access
if distribution_strategy_context.in_cross_replica_context():
return self.primary._as_graph_element()
return self.get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self.primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() call.
assert not as_ref
return ops.internal_convert_to_tensor(
var.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(MirroredVariable,
_tensor_conversion_mirrored)
def _enclosing_tpu_context():
# pylint: disable=protected-access
tpu_context = ops.get_default_graph()._get_control_flow_context()
# pylint: enable=protected-access
while tpu_context is not None and not isinstance(
tpu_context, control_flow_ops.XLAControlFlowContext):
tpu_context = tpu_context.outer_context
return tpu_context
# TODO(jhseu): Deduplicate code. We copy code because we don't want to
# inherit from DistributedDelegate. DistributedDelegate will not work in a
# tpu.replicate() because it assumes that you're in a device context where you
# can operate on a single version of the variable, but a tpu.replicate()
# operates on all variables and is replicated during a rewrite pass.
class TPUMirroredVariable(trackable.Trackable):
"""Holds a map from device to TPU variables whose values are kept in sync."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
assert isinstance(device_map, DeviceMap)
self._distribute_strategy = strategy
self._device_map = device_map
self._values = tuple(values)
if logical_device is None:
logical_device = device_map.logical_device_from_values(self._values)
self._logical_device = logical_device
# Use a weakref to make it easy to map from the contained values
# to the container without introducing a reference cycle.
for v in self._values:
v._mirrored_container = weakref.ref(self) # pylint: disable=protected-access
self._common_name = self.primary.name.split(":")[0]
self._aggregation = aggregation
# Needed for GradientTape
self._trainable = self.primary.trainable
# Typically like `DistributedVariable`, a `TPUMirroredVariable`'s
# initializer is composed of the initializers of the components variables.
# However, in some cases, such as when restoring from a checkpoint, we may
# set the _initializer_op property on the entire `TPUMirroredVariable`.
self._initializer_op = None
def _get(self, device=None):
"""Returns the value for the current device or raises a ValueError."""
if device is None:
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
return self._device_map.select_for_current_replica(
self._values, replica_context)
else:
device = distribute_lib.get_update_device()
if device is None:
return self._get_cross_replica()
device = device_util.canonicalize(device)
return self._device_map.select_for_device(self._values, device)
@property
def primary(self):
"""Returns a representative component."""
return self._values[0]
@property
def devices(self):
return self._device_map.logical_to_actual_devices(self._logical_device)
@property
def logical_device(self):
return self._logical_device
@property
def device_map(self):
return self._device_map
# TODO(josh11b): Replace unwrap with this?
@property
def values(self):
return self._values
@property
def distribute_strategy(self):
return self._distribute_strategy
# pylint: disable=multiple-statements
def __add__(self, o): return self.read_value() + o
def __radd__(self, o): return o + self.read_value()
def __sub__(self, o): return self.read_value() - o
def __rsub__(self, o): return o - self.read_value()
def __mul__(self, o): return self.read_value() * o
def __rmul__(self, o): return o * self.read_value()
def __truediv__(self, o): return self.read_value() / o
def __rtruediv__(self, o): return o / self.read_value()
def __floordiv__(self, o): return self.read_value() // o
def __rfloordiv__(self, o): return o // self.read_value()
def __mod__(self, o): return self.read_value() % o
def __rmod__(self, o): return o % self.read_value()
def __lt__(self, o): return self.read_value() < o
def __le__(self, o): return self.read_value() <= o
def __gt__(self, o): return self.read_value() > o
def __ge__(self, o): return self.read_value() >= o
def __and__(self, o): return self.read_value() & o
def __rand__(self, o): return o & self.read_value()
def __or__(self, o): return self.read_value() | o
def __ror__(self, o): return o | self.read_value()
def __xor__(self, o): return self.read_value() ^ o
def __rxor__(self, o): return o ^ self.read_value()
def __getitem__(self, o): return self.read_value()[o]
def __pow__(self, o, modulo=None): return pow(self.read_value(), o, modulo)
def __rpow__(self, o): return pow(o, self.read_value())
def __invert__(self): return ~self.read_value()
def __neg__(self): return -self.read_value()
def __abs__(self): return abs(self.read_value())
def __div__(self, o):
try:
return self.read_value().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self.read_value().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self.read_value().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self.read_value().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __str__(self):
devices = self.devices
debug_str = ",\n".join(" %d %s: %s" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
devices = self.devices
debug_repr = ",\n".join(" %d %s: %r" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
@property
def handle(self):
# If we're in a tpu.rewrite(), return the replicated handle.
tpu_context = _enclosing_tpu_context()
if tpu_context is not None:
return tpu_context.get_replicated_var_handle(
self._common_name, self._values)
device = distribute_lib.get_update_device()
if device is None:
return self.primary.handle
return self._get(device=device).handle
@property
def device(self):
return self._get().device
def eval(self, session=None):
return self.primary.eval(session)
# The arguments to update() are automatically unwrapped so the update()
# function would normally see regular variables, not MirroredVariables.
# However, the update function can still operate on wrapped MirroredVariables
# through object members, captured arguments, etc. This is more likely in an
# update_non_slot() function (like OptimizerV2._finish), which can
# update several non-slot variables in one call.
def _assign_func(self, *args, **kwargs):
_assert_strategy(self._distribute_strategy)
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
if _enclosing_tpu_context() is not None:
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
update_device = distribute_lib.get_update_device()
# We are calling update on the mirrored variable in cross replica context.
if update_device is not None:
# We are calling an assign function on the mirrored variable in cross
# replica context.
v = self._get(device=update_device)
return f(v, *args, **kwargs)
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
_assert_replica_context(self._distribute_strategy)
# We are calling an assign function on the mirrored variable in replica
# context.
# We reduce the value we want to assign/add/sub. More details about how we
# handle the different use cases can be found in the _reduce method.
# We call the function on each of the mirrored variables with the reduced
# value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError("You must specify an aggregation method to update a "
"TPUMirroredVariable in Replica Context.")
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=args, kwargs=kwargs)
@contextlib.contextmanager
def _handle_graph(self, handle):
# Note: might have an eager tensor but not be executing eagerly when
# building functions.
if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor)
or ops.has_default_graph()):
yield
else:
with handle.graph.as_default():
yield
@property
def trainable(self):
return self._trainable
def _read_variable_op(self, parent_op=None):
if self.trainable:
tape.variable_accessed(self)
if parent_op is not None:
with ops.control_dependencies([parent_op]):
return gen_resource_variable_ops.read_variable_op(
self.handle, self.dtype)
return gen_resource_variable_ops.read_variable_op(
self.handle, self.dtype)
def read_value(self):
return self._read_variable_op()
def assign_sub(self, *args, **kwargs):
def assign_sub_fn(var, delta, *ar, **kw):
del ar
name = kw.pop("name", None)
read_value = kw.pop("read_value", True)
with self._handle_graph(var.handle):
op = gen_resource_variable_ops.assign_sub_variable_op(
var.handle, ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op(parent_op=op)
return op
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
def assign_add_fn(var, delta, *ar, **kw):
del ar
name = kw.pop("name", None)
read_value = kw.pop("read_value", True)
with self._handle_graph(var.handle):
op = gen_resource_variable_ops.assign_add_variable_op(
var.handle, ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op(parent_op=op)
return op
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
def assign_fn(var, value, *ar, **kw):
del ar
name = kw.pop("name", None)
read_value = kw.pop("read_value", True)
with self._handle_graph(var.handle):
op = gen_resource_variable_ops.assign_variable_op(
var.handle, ops.convert_to_tensor(value, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op(parent_op=op)
return op
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def aggregation(self):
return self._aggregation
@property
def constraint(self):
return None
@property
def initializer(self):
if self._initializer_op:
init_op = self._initializer_op
else:
init_op = control_flow_ops.group(tuple(
v.initializer for v in self._values))
return init_op
@property
def graph(self):
return self.primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self.primary._unique_id # pylint: disable=protected-access
@property
def name(self):
return self.primary.name
@property
def dtype(self):
return self.primary.dtype
@property
def shape(self):
return self.primary.shape
def get_shape(self):
return self.primary.get_shape()
def to_proto(self, export_scope=None):
return self.primary.to_proto(export_scope=export_scope)
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica = self._device_map.replica_for_device(device)
if replica is None:
return self.primary
return self._values[replica]
def _as_graph_element(self):
# pylint: disable=protected-access
if distribution_strategy_context.in_cross_replica_context():
return self.primary._as_graph_element()
return self._read_variable_op()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self.primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
# Needed to pass ResourceVariable checks.
@property
def op(self):
return self.primary.op
# pylint: disable=protected-access
@property
def _save_slice_info(self):
return self.primary._save_slice_info
def _get_save_slice_info(self):
return self.primary._get_save_slice_info()
def _set_save_slice_info(self, save_slice_info):
return self.primary._set_save_slice_info(save_slice_info)
# pylint: enable=protected-access
@property
def _in_graph_mode(self):
return self.primary._in_graph_mode # pylint: disable=protected-access
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if _enclosing_tpu_context() is None:
return self._get()._dense_var_to_tensor(dtype, name, as_ref)
# pylint: enable=protected-access
if dtype is not None and dtype != self.dtype:
return math_ops.cast(self.read_value(), dtype)
if as_ref:
return self.handle
else:
return self.read_value()
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
# TODO(jhseu): Do we need TPU context implementation?
result = self.primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(result, self._values[-1].is_initialized(),
name=name)
return result
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_tpu_mirrored(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(TPUMirroredVariable,
_tensor_conversion_tpu_mirrored)
ops.register_dense_tensor_like_type(TPUMirroredVariable)
class _SyncOnReadSaveable(saver.BaseSaverBuilder.SaveableObject):
"""Class for defining how to restore a SyncOnReadVariable."""
def __init__(self, sync_on_read_variable, name):
self._sync_on_read_variable = sync_on_read_variable
# We use a callable so that we don't have to evaluate this expression
# in the case where we are trying to restore instead of save.
def tensor():
strategy = sync_on_read_variable._distribute_strategy # pylint: disable=protected-access
return strategy.extended.read_var(sync_on_read_variable)
spec = saver.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name,
dtype=sync_on_read_variable.dtype)
super(_SyncOnReadSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return self._sync_on_read_variable.assign(tensor)
def _assert_replica_context(strategy):
replica_context = distribution_strategy_context.get_replica_context()
if not replica_context:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
if replica_context.strategy is not strategy:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
class SyncOnReadVariable(DistributedVariable, PerReplica, trackable.Trackable):
"""Holds a map from device to variables whose values are reduced on save."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
self._aggregation = aggregation
super(SyncOnReadVariable, self).__init__(
strategy, device_map, values, logical_device=logical_device)
def assign_sub(self, *args, **kwargs):
_assert_replica_context(self._distribute_strategy)
return self.get().assign_sub(*args, **kwargs)
def assign_add(self, *args, **kwargs):
_assert_replica_context(self._distribute_strategy)
return self.get().assign_add(*args, **kwargs)
def assign(self, *args, **kwargs):
if distribution_strategy_context.in_cross_replica_context():
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
tensor = args[0]
if self._aggregation == vs.VariableAggregation.SUM:
tensor *= 1. / len(self.devices)
return control_flow_ops.group(tuple(
_assign_on_device(v.device, v, tensor) for v in self._values))
else:
_assert_replica_context(self._distribute_strategy)
return self.get().assign(*args, **kwargs)
@property
def aggregation(self):
return self._aggregation
def _get_cross_replica(self):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return self.primary
return self._distribute_strategy.reduce(
reduce_util.ReduceOp.from_variable_aggregation(self.aggregation), self)
def _as_graph_element(self):
# pylint: disable=protected-access
if distribution_strategy_context.in_cross_replica_context():
return self._get_cross_replica()
return self.get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
`SyncOnReadVariable`s.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _SyncOnReadSaveable(self, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
# Register a conversion function for SyncOnReadVariable which allows as_ref to
# be true.
def _tensor_conversion_sync_on_read(var, dtype=None, name=None, as_ref=False):
return ops.internal_convert_to_tensor(
var.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(SyncOnReadVariable,
_tensor_conversion_sync_on_read)
def regroup(device_map, values, wrap_class=PerReplica):
"""Makes a nest per-replica into a nest of PerReplica/Mirrored values."""
assert isinstance(device_map, DeviceMap)
assert len(values) == device_map.num_replicas_in_graph
v0 = values[0]
if isinstance(v0, list):
for v in values[1:]:
assert isinstance(v, list)
assert len(v) == len(v0), ("len(v) == %d, len(v0) == %d, v: %s, v0: %s" %
(len(v), len(v0), v, v0))
return [regroup(device_map, tuple(v[i] for v in values), wrap_class)
for i in range(len(v0))]
if isinstance(v0, tuple):
for v in values[1:]:
assert isinstance(v, tuple)
assert len(v) == len(v0)
regrouped_tuple = tuple(
regroup(device_map, tuple(v[i] for v in values), wrap_class)
for i in range(len(v0)))
if hasattr(v0, "_fields"):
# This tuple is in fact a namedtuple! Create a new namedtuple instance
# and initialize it with the regrouped values:
assert hasattr(type(v0), "_make")
return type(v0)._make(regrouped_tuple)
else:
return regrouped_tuple
if isinstance(v0, dict):
v0keys = set(v0.keys())
for v in values[1:]:
assert isinstance(v, dict), ("v[0]: %r v[i]: %r" % (v0, v))
assert set(v.keys()) == v0keys, ("v[0].keys: %s v[i].keys: %s" %
(v0keys, set(v.keys())))
return {key: regroup(device_map, tuple(v[key] for v in values), wrap_class)
for key in v0keys}
# If exactly the same object across all devices, return it unwrapped.
same_id = True
for v in values[1:]:
if v is not v0:
same_id = False
break
# Consider three cases where same_id is true:
# * If v0 is a DistributedVariable (a MirroredVariable or
# SyncOnReadVariable, and same_id means it is the same across all
# devices), we want to return it. We check DistributedVariable
# specifically since it can look like it has a
# _distributed_container member since its members do.
# * If v0 is a member of a distributed variable, in which case
# hasattr(v0, "_distributed_container") is true, we want to
# return the DistributedVariable that contains it using the
# _distributed_container logic below. This case can trigger
# same_id when there is only one device.
# * In any other situation, same_id means we return v0.
if same_id and (isinstance(v0, DistributedVariable) or
not hasattr(v0, "_distributed_container")):
return v0
# Detect the case where each device has a parallel component of the
# same MirroredVariable (or SyncOnReadVariable). In this case we
# want to return the containing MirroredVariable, after a bunch of
# sanity checking. In particular, each component should have the
# same container, and the devices of the variables should match the
# keys of the per-replica dictionary.
if hasattr(v0, "_distributed_container"):
# pylint: disable=protected-access
assert not isinstance(v0, MirroredVariable), (
"ids = %s, values = %s" % ([id(v) for v in values], values))
assert device_map.is_device_in_replica(v0.device, 0), (
"v0.device = %s, device_map = %s" % (v0.device, device_map))
distributed_container = v0._distributed_container()
assert distributed_container is not None
for r, v in enumerate(values[1:]):
assert device_map.is_device_in_replica(v.device, r + 1), (
"v.device = %s, r = %d, device_map = %s" %
(v.device, r + 1, device_map))
assert distributed_container is v._distributed_container()
return distributed_container
# pylint: enable=protected-access
return wrap_class(device_map, values)
def select_replica(replica_id, structured):
"""Specialize a nest of regular & per-replica values for one replica."""
def _get(x):
return x.values[replica_id] if isinstance(x, DistributedValues) else x
return nest.map_structure(_get, structured)
def select_device_mirrored(device, structured):
"""Specialize a nest of regular & mirrored values for one device."""
def _get_mirrored(x):
if isinstance(x, DistributedValues):
if not isinstance(x, Mirrored):
raise TypeError(
"Expected value to be mirrored across replicas: %s in %s." %
(x, structured))
return x.get(device)
else:
return x
return nest.map_structure(_get_mirrored, structured)
def update_regroup(extended, device_map, updates, group):
"""Regroup for an update, with dependencies to ensure all updates execute."""
# TODO(josh11b): Replace "Mirrored" here with a function that does the following
# so we can avoid all these nest operations.
regrouped = regroup(device_map, updates, Mirrored)
if not group:
return nest.map_structure(extended._unwrap, regrouped) # pylint: disable=protected-access
grouped_flat = []
for u in nest.flatten(regrouped):
if isinstance(u, DistributedValues):
g = extended._group(u) # pylint: disable=protected-access
if u.is_tensor_like:
# Make sure we run all updates. Without this, something like
# session.run(extended.update(...)) may only update one replica.
values = []
for d in u.devices:
with ops.device(d), ops.control_dependencies([g]):
values.append(array_ops.identity(u.get(d)))
g = Mirrored(u.device_map, values)
else:
g = u
grouped_flat.append(g)
return nest.pack_sequence_as(regrouped, grouped_flat)
def value_container(val):
"""Returns the container that this per-replica `value` belongs to.
Args:
val: A value returned by `call_for_each_replica()` or a variable
created in `scope()`.
Returns:
A container that `value` belongs to.
If value does not belong to any container (including the case of
container having been destroyed), returns the value itself.
"""
if (hasattr(val, "_distributed_container") and
# DistributedVariable has _distributed_container defined
# but we don't want to return it.
not isinstance(val, DistributedVariable)):
container = val._distributed_container() # pylint: disable=protected-access
if container is not None:
return container
return val
# TODO(josh11b): Descend from Variable.
class AggregatingVariable(trackable.Trackable):
"""A wrapper around a variable that aggregates updates across replicas."""
def __init__(self, strategy, v, aggregation):
self._distribute_strategy = strategy
self._v = v
# NOTE: We don't use "_distributed_container" here because we don't want
# to trigger that code path in regroup().
v._aggregating_container = weakref.ref(self) # pylint: disable=protected-access
self._aggregation = aggregation
def get(self):
return self._v
@property
def distribute_strategy(self):
return self._distribute_strategy
def __getattr__(self, name):
return getattr(self._v, name)
def _assign_func(self, *args, **kwargs):
_assert_strategy(self._distribute_strategy)
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
update_device = distribute_lib.get_update_device()
if update_device is not None:
# We are calling an assign function in an update context.
return f(self._v, *args, **kwargs)
# We are calling an assign function in cross replica context, wrap it in
# an update call.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
replica_context = distribution_strategy_context.get_replica_context()
assert replica_context
# We are calling an assign function in replica context.
# We reduce the value we want to assign/add/sub. More details about how we
# handle the different use cases can be found in the _reduce method.
# We call the function with the reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError("You must specify an aggregation method to update a "
"a variable in replica context.")
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def aggregation(self):
return self._aggregation
@property
def name(self):
return self._v.name
@property
def dtype(self):
return self._v.dtype
# TODO(josh11b): Test saving & restoring.
def _gather_saveables_for_checkpoint(self):
return {trackable.VARIABLE_VALUE_KEY: self._v}
# pylint: disable=multiple-statements
def __add__(self, o): return self._v + o
def __radd__(self, o): return o + self._v
def __sub__(self, o): return self._v - o
def __rsub__(self, o): return o - self._v
def __mul__(self, o): return self._v * o
def __rmul__(self, o): return o * self._v
def __truediv__(self, o): return self._v / o
def __rtruediv__(self, o): return o / self._v
def __floordiv__(self, o): return self._v // o
def __rfloordiv__(self, o): return o // self._v
def __mod__(self, o): return self._v % o
def __rmod__(self, o): return o % self._v
def __lt__(self, o): return self._v < o
def __le__(self, o): return self._v <= o
def __gt__(self, o): return self._v > o
def __ge__(self, o): return self._v >= o
def __and__(self, o): return self._v & o
def __rand__(self, o): return o & self._v
def __or__(self, o): return self._v | o
def __ror__(self, o): return o | self._v
def __xor__(self, o): return self._v ^ o
def __rxor__(self, o): return o ^ self._v
def __getitem__(self, o): return self._v[o]
def __pow__(self, o, modulo=None): return pow(self._v, o, modulo)
def __rpow__(self, o): return pow(o, self._v)
def __invert__(self): return ~self._v
def __neg__(self): return -self._v
def __abs__(self): return abs(self._v)
def __div__(self, o):
try:
return self._v.__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._v.__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._v.__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._v.__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __str__(self):
return str(self._v)
def __repr__(self):
return repr(self._v)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False):
return ops.internal_convert_to_tensor(
var.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(
AggregatingVariable, _tensor_conversion_aggregate)
ops.register_dense_tensor_like_type(AggregatingVariable)
|
{
"content_hash": "e330d3f9f8703e968f229332749a72e5",
"timestamp": "",
"source": "github",
"line_count": 1582,
"max_line_length": 108,
"avg_line_length": 36.53350189633375,
"alnum_prop": 0.6729877500173023,
"repo_name": "ageron/tensorflow",
"id": "374810e1284837407fba9b4a0bd8c8c0c746f3e4",
"size": "58485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
import logging
import multiprocessing
import traceback
from abc import abstractmethod
from collections import OrderedDict
from Queue import Queue
from concurrent.futures import ThreadPoolExecutor
from twitter.common.collections import maybe_list
from pants.base.exceptions import TaskError
from pants.engine.nodes import Noop, Return, State, Throw
from pants.engine.objects import SerializationError
from pants.engine.processing import StatefulPool
from pants.engine.storage import Cache, Storage
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
try:
import cPickle as pickle
except ImportError:
import pickle
logger = logging.getLogger(__name__)
class InFlightException(Exception):
pass
class StepBatchException(Exception):
pass
class ExecutionError(Exception):
pass
class Engine(AbstractClass):
"""An engine for running a pants command line."""
class Result(datatype('Result', ['error', 'root_products'])):
"""Represents the result of a single engine run."""
@classmethod
def finished(cls, root_products):
"""Create a success or partial success result from a finished run.
Runs can either finish with no errors, satisfying all promises, or they can partially finish
if run in fail-slow mode producing as many products as possible.
:param root_products: Mapping of root SelectNodes to their State values.
:rtype: `Engine.Result`
"""
return cls(error=None, root_products=root_products)
@classmethod
def failure(cls, error):
"""Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products.
:param error: The execution error encountered.
:type error: :class:`pants.base.exceptions.TaskError`
:rtype: `Engine.Result`
"""
return cls(error=error, root_products=None)
def __init__(self, scheduler, storage=None, cache=None, use_cache=True):
"""
:param scheduler: The local scheduler for creating execution graphs.
:type scheduler: :class:`pants.engine.scheduler.LocalScheduler`
:param storage: The storage instance for serializables keyed by their hashes.
:type storage: :class:`pants.engine.storage.Storage`
:param cache: The cache instance for storing execution results, by default it uses the same
Storage instance if not specified.
:type cache: :class:`pants.engine.storage.Cache`
:param use_cache: True to enable usage of the cache. The cache incurs a large amount of
overhead for small tasks, and needs TODO: further improvement.
:type use_cache: bool
"""
self._scheduler = scheduler
self._storage = storage or Storage.create()
self._cache = cache or Cache.create(storage)
self._use_cache = use_cache
def execute(self, execution_request):
"""Executes the requested build.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The result of the run.
:rtype: :class:`Engine.Result`
"""
try:
self.reduce(execution_request)
return self.Result.finished(self._scheduler.root_entries(execution_request))
except TaskError as e:
return self.Result.failure(e)
def product_request(self, product, subjects):
"""Executes a request for a singular product type from the scheduler for one or more subjects
and yields the products.
:param class product: A product type for the request.
:param list subjects: A list of subjects for the request.
:yields: The requested products.
"""
request = self._scheduler.execution_request([product], subjects)
result = self.execute(request)
if result.error:
raise result.error
result_items = self._scheduler.root_entries(request).items()
# State validation.
unknown_state_types = tuple(
type(state) for _, state in result_items if type(state) not in (Throw, Return, Noop)
)
if unknown_state_types:
State.raise_unrecognized(unknown_state_types)
# Throw handling.
throw_roots = tuple(root for root, state in result_items if type(state) is Throw)
if throw_roots:
cumulative_trace = '\n'.join(
'\n'.join(self._scheduler.product_graph.trace(root)) for root in throw_roots
)
stringified_throw_roots = ', '.join(str(x) for x in throw_roots)
raise ExecutionError('received unexpected Throw state(s) for root(s): {}\n{}'
.format(stringified_throw_roots, cumulative_trace))
# Noop handling.
noop_roots = tuple(root for root, state in result_items if type(state) is Noop)
if noop_roots:
raise ExecutionError('received unexpected Noop state(s) for the following root(s): {}'
.format(noop_roots))
# Return handling.
returns = tuple(state.value for _, state in result_items if type(state) is Return)
for return_value in returns:
for computed_product in maybe_list(return_value, expected_type=product):
yield computed_product
def close(self):
"""Shutdown this engine instance, releasing resources it was using."""
self._storage.close()
self._cache.close()
def cache_stats(self):
"""Returns cache stats for the engine."""
return self._cache.get_stats()
def _maybe_cache_get(self, node_entry, runnable):
"""If caching is enabled for the given Entry, create a key and perform a lookup.
The sole purpose of a keyed request is to get a stable cache key, so we can sort
keyed_request.dependencies by keys as opposed to requiring dep nodes to support compare.
:returns: A tuple of a key and result, either of which may be None.
"""
if not self._use_cache or not node_entry.node.is_cacheable:
return None, None
return self._cache.get(runnable)
def _maybe_cache_put(self, key, result):
if key is not None:
self._cache.put(key, result)
@abstractmethod
def reduce(self, execution_request):
"""Reduce the given execution graph returning its root products.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The root products promised by the execution graph.
:rtype: dict of (:class:`Promise`, product)
"""
class LocalSerialEngine(Engine):
"""An engine that runs tasks locally and serially in-process."""
def reduce(self, execution_request):
generator = self._scheduler.schedule(execution_request)
for runnable_batch in generator:
completed = []
for entry, runnable in runnable_batch:
key, result = self._maybe_cache_get(entry, runnable)
if result is None:
try:
result = Return(runnable.func(*runnable.args))
self._maybe_cache_put(key, result)
except Exception as e:
result = Throw(e)
completed.append((entry, result))
generator.send(completed)
def _try_pickle(obj):
try:
pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
except Exception as e:
# Unfortunately, pickle can raise things other than PickleError instances. For example it
# will raise ValueError when handed a lambda; so we handle the otherwise overly-broad
# `Exception` type here.
raise SerializationError('Failed to pickle {}: {}'.format(obj, e))
class ConcurrentEngine(Engine):
def reduce(self, execution_request):
"""The main reduction loop."""
# 1. Whenever we don't have enough work to saturate the pool, request more.
# 2. Whenever the pool is not saturated, submit currently pending work.
# Step instances which have not been submitted yet.
pending_submission = OrderedDict()
in_flight = dict() # Dict from step id to a Promise for Steps that have been submitted.
def submit_until(completed, n):
submitted, local_completed = self._submit_until(pending_submission, in_flight, n)
completed.extend(local_completed)
return submitted
def await_one(completed):
completed.append(self._await_one(in_flight))
generator = self._scheduler.schedule(execution_request)
for step_batch in generator:
completed = []
if not step_batch:
# A batch should only be empty if all dependency work is currently blocked/running.
if not in_flight and not pending_submission:
raise StepBatchException(
'Scheduler provided an empty batch while no work is in progress!')
else:
# Submit and wait for work for as long as we're able to keep the pool saturated.
pending_submission.update(step_batch)
while submit_until(completed, self._pool_size) > 0:
await_one(completed)
# Await at least one entry per scheduling loop.
submit_until(completed, 0)
if in_flight:
await_one(completed)
# Indicate which items have completed.
generator.send(completed)
if pending_submission or in_flight:
raise AssertionError('Engine loop completed with items: {}, {}'.format(pending_submission, in_flight))
@abstractmethod
def _submit_until(self, pending_submission, in_flight, n):
"""Submit pending while there's capacity, and more than `n` items in pending_submission.
Returns a tuple of entries running in the background, and entries that completed immediately.
"""
@abstractmethod
def _await_one(self, in_flight):
"""Await one completed step, remove it from in_flight, and return it."""
class ThreadHybridEngine(ConcurrentEngine):
"""An engine that runs locally but allows nodes to be optionally run concurrently.
The decision to run concurrently or in serial is determined by _is_async_node.
For IO bound nodes we will run concurrently using threads.
"""
def __init__(self, scheduler, storage, cache=None, threaded_node_types=tuple(),
pool_size=None, debug=True):
"""
:param scheduler: The local scheduler for creating execution graphs.
:type scheduler: :class:`pants.engine.scheduler.LocalScheduler`
:param storage: The storage instance for serializables keyed by their hashes.
:type storage: :class:`pants.engine.storage.Storage`
:param cache: The cache instance for storing execution results, by default it uses the same
Storage instance if not specified.
:type cache: :class:`pants.engine.storage.Cache`
:param tuple threaded_node_types: Node types that will be processed using the thread pool.
:param int pool_size: The number of worker processes to use; by default 2 processes per core will
be used.
:param bool debug: `True` to turn on pickling error debug mode (slower); True by default.
"""
super(ThreadHybridEngine, self).__init__(scheduler, storage, cache)
self._pool_size = pool_size if pool_size and pool_size > 0 else 2 * multiprocessing.cpu_count()
self._pending = set() # Keep track of futures so we can cleanup at the end.
self._processed_queue = Queue()
self._async_nodes = threaded_node_types
self._node_builder = scheduler.node_builder
self._state = (self._node_builder, storage)
self._pool = ThreadPoolExecutor(max_workers=self._pool_size)
self._debug = debug
def _is_async_node(self, node):
"""Override default behavior and handle specific nodes asynchronously."""
return isinstance(node, self._async_nodes)
def _maybe_cache_step(self, step_request):
if step_request.node.is_cacheable:
return step_request.step_id, self._cache.get(step_request)
else:
return step_request.step_id, None
def _execute_step(self, step_entry, runnable):
"""A function to help support local step execution.
:param step_entry: Entry that the step is for.
:param runnable: Runnable to execute.
"""
key, result = self._maybe_cache_get(step_entry, runnable)
if result is None:
try:
result = Return(runnable.func(*runnable.args))
self._maybe_cache_put(key, result)
except Exception as e:
result = Throw(e)
return step_entry, result
def _processed_node_callback(self, finished_future):
self._processed_queue.put(finished_future)
self._pending.remove(finished_future)
def _submit_until(self, pending_submission, in_flight, n):
"""Submit pending while there's capacity, and more than `n` items in pending_submission."""
to_submit = min(len(pending_submission) - n, self._pool_size - len(in_flight))
submitted = 0
completed = []
for _ in range(to_submit):
step, runnable = pending_submission.popitem(last=False)
if self._is_async_node(step.node):
# Run in a future.
if step in in_flight:
raise InFlightException('{} is already in_flight!'.format(step))
future = self._pool.submit(functools.partial(self._execute_step, step, runnable))
in_flight[step] = future
self._pending.add(future)
future.add_done_callback(self._processed_node_callback)
submitted += 1
else:
# Run inline.
completed.append(self._execute_step(step, runnable))
return submitted, completed
def _await_one(self, in_flight):
"""Await one completed step, and remove it from in_flight."""
if not in_flight:
raise InFlightException('Awaited an empty pool!')
entry, result = self._processed_queue.get().result()
if isinstance(result, Exception):
raise result
in_flight.pop(entry)
return entry, result
def close(self):
"""Cleanup thread pool."""
for f in self._pending:
f.cancel()
self._pool.shutdown() # Wait for pool to cleanup before we cleanup storage.
super(ThreadHybridEngine, self).close()
def _execute_step(debug, process_state, step):
"""A picklable top-level function to help support local multiprocessing uses.
Executes the Step for the given node builder and storage, and returns a tuple of step id and
result or exception. Since step execution is only on cache misses, this also saves result
to the cache.
"""
storage, cache = process_state
step_id, runnable_key, is_cacheable = step
def execute():
try:
runnable = storage.get_state(runnable_key)
result = Return(runnable.func(*runnable.args))
if debug:
_try_pickle(result)
result_key = storage.put_state(result)
if is_cacheable:
cache.put(runnable_key, result)
except Exception as e:
result_key = storage.put_state(Throw(e))
return result_key
try:
return step_id, execute()
except Exception as e:
# Trap any exception raised by the execution node that bubbles up, and
# pass this back to our main thread for handling.
logger.warn(traceback.format_exc())
return step_id, e
def _process_initializer(storage):
"""Another picklable top-level function that provides multi-processes' initial states.
States are returned as a tuple. States are `Closable` so they can be cleaned up once
processes are done.
"""
storage = Storage.clone(storage)
return (storage, Cache.create(storage=storage))
class LocalMultiprocessEngine(ConcurrentEngine):
"""An engine that runs tasks locally and in parallel when possible using a process pool.
This implementation stores all process inputs in Storage and executes cache lookups before
submitting a task to another process. This use of Storage means that only a Key for the
Runnable is sent (directly) across process boundaries, and avoids sending the same data across
process boundaries repeatedly.
"""
def __init__(self, scheduler, storage=None, cache=None, pool_size=None, debug=True):
"""
:param scheduler: The local scheduler for creating execution graphs.
:type scheduler: :class:`pants.engine.scheduler.LocalScheduler`
:param storage: The storage instance for serializables keyed by their hashes.
:type storage: :class:`pants.engine.storage.Storage`
:param cache: The cache instance for storing execution results, by default it uses the same
Storage instance if not specified.
:type cache: :class:`pants.engine.storage.Cache`
:param int pool_size: The number of worker processes to use; by default 2 processes per core will
be used.
:param bool debug: `True` to turn on pickling error debug mode (slower); True by default.
"""
# This is the only place where non in-memory storage is needed, create one if not specified.
storage = storage or Storage.create(in_memory=False)
super(LocalMultiprocessEngine, self).__init__(scheduler, storage, cache)
self._pool_size = pool_size if pool_size and pool_size > 0 else 2 * multiprocessing.cpu_count()
execute_step = functools.partial(_execute_step, debug)
self._processed_queue = Queue()
self.node_builder = scheduler.node_builder
process_initializer = functools.partial(_process_initializer, self._storage)
self._pool = StatefulPool(self._pool_size, process_initializer, execute_step)
self._debug = debug
self._pool.start()
def _submit(self, step_id, runnable_key, is_cacheable):
entry = (step_id, runnable_key, is_cacheable)
if self._debug:
_try_pickle(entry)
self._pool.submit(entry)
def close(self):
self._pool.close()
def _submit_until(self, pending_submission, in_flight, n):
"""Submit pending while there's capacity, and more than `n` items pending_submission."""
to_submit = min(len(pending_submission) - n, self._pool_size - len(in_flight))
submitted = 0
completed = []
for _ in range(to_submit):
step, runnable = pending_submission.popitem(last=False)
if step in in_flight:
raise InFlightException('{} is already in_flight!'.format(step))
# We eagerly compute a key for the Runnable, because it allows us to avoid sending the same
# data across process boundaries repeatedly.
runnable_key = self._storage.put_state(runnable)
is_cacheable = self._use_cache and step.node.is_cacheable
result = self._cache.get_for_key(runnable_key) if is_cacheable else None
if result is not None:
# Skip in_flight on cache hit.
completed.append((step, result))
else:
step_id = id(step)
in_flight[step_id] = step
self._submit(step_id, runnable_key, is_cacheable)
submitted += 1
return submitted, completed
def _await_one(self, in_flight):
"""Await one completed step, and remove it from in_flight."""
if not in_flight:
raise InFlightException('Awaited an empty pool!')
step_id, result_key = self._pool.await_one_result()
if isinstance(result_key, Exception):
raise result_key
if step_id not in in_flight:
raise InFlightException(
'Received unexpected work from the Executor: {} vs {}'.format(step_id, in_flight.keys()))
return in_flight.pop(step_id), self._storage.get_state(result_key)
|
{
"content_hash": "98834dac144ae4593c7b71ec024a4e3c",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 108,
"avg_line_length": 38.520161290322584,
"alnum_prop": 0.692504972260023,
"repo_name": "kwlzn/pants",
"id": "1fe82b42bea93fba324b126e0838dd24747bdb56",
"size": "19253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/engine/engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "450840"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5255934"
},
{
"name": "Scala",
"bytes": "85210"
},
{
"name": "Shell",
"bytes": "58882"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import FileModel
@admin.register(FileModel)
class HelperAdmin(admin.ModelAdmin):
def save_model(self, request, instance, form, change):
user = request.user
instance = form.save(commit=False)
if not change or not instance.created_by:
instance.author = user
instance.save()
form.save_m2m()
return instance
fields = ('subject', 'file')
list_display = ('file', 'subject', 'author', 'uploaded_at')
|
{
"content_hash": "9ef054b36558dd77c9b4ed3582bc84b3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 30.235294117647058,
"alnum_prop": 0.6459143968871596,
"repo_name": "k0t3n/fspo_helper",
"id": "ae85dce96b95642674430b626ee0f34ff040f102",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helper/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7251"
},
{
"name": "Python",
"bytes": "7075"
}
],
"symlink_target": ""
}
|
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
def readfile(filename):
fp = open(filename)
try:
data = fp.read()
except:
raise
finally:
fp.close()
return data
setup(
name='BaseConv',
version='0.1a',
description='A generic base-conversion library for Python.',
long_description=readfile('README.rst'),
license=readfile('LICENSE'),
author='Zachary Voase',
author_email='zack@biga.mp',
url='http://github.com/zvoase/baseconv/tree/master',
packages=find_packages(exclude='tests'),
include_package_data=True,
exclude_package_data={'': ['README.textile', 'README.rst', 'LICENSE']}
)
|
{
"content_hash": "72692ec82776d268b2a19cd43abd7a62",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 26.51851851851852,
"alnum_prop": 0.6578212290502793,
"repo_name": "zvoase/baseconv",
"id": "260af7640609a4ae536650b127c84f7481b78545",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20748"
}
],
"symlink_target": ""
}
|
from common_fixtures import * # NOQA
LB_IMAGE_UUID = "docker:sangeetha/lbtest:latest"
CONTAINER_HOST_NAMES = ["container1", "container2", "container3"]
containers_in_host = []
logger = logging.getLogger(__name__)
if_lb_containers = pytest.mark.skipif(
not os.environ.get('EXECUTE_STANDALONE_LB') or
os.environ.get('EXECUTE_STANDALONE_LB').lower() != "true",
reason='LB support for containers is terminated')
@pytest.fixture(scope='session', autouse=True)
def lb_targets(request, client):
hosts = client.list_host(kind='docker', removed_null=True, state="active")
assert len(hosts) > 1, "Need at least 2 hosts for executing Lb test cases"
for n in range(0, 2):
con_name = random_str()
con1 = client.create_container(name=con_name,
networkMode=MANAGED_NETWORK,
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME':
CONTAINER_HOST_NAMES[n]
},
requestedHostId=hosts[n].id
)
con1 = client.wait_success(con1, timeout=180)
containers_in_host.append(con1)
def create_lb_for_container_lifecycle(client, host, port):
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
con1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME':
CONTAINER_HOST_NAMES[2]
},
requestedHostId=host.id
)
con1 = client.wait_success(con1, timeout=180)
lb = lb.addtarget(
loadBalancerTarget={"instanceId": con1.id, "ports": ["80"]})
validate_add_target(client, con1, lb)
con_hostname = CONTAINER_HOST_NAMES
check_round_robin_access(con_hostname, host, port)
return lb, lb_config, listener, con1
def create_lb_with_one_listener_one_host_two_targets(client,
host, port,
lb_config=None,
lb_config_params=None,
listener_algorithm=None,
containers=None):
"""
This method creates a LB rule.
Adds the host that is passed to LB.
Adds targets to the host. These targets are the containers parameter
if passed, else shared containers are used.
If lb_config is not passed , a new LB configuration is created which has a
Lb listener with listener_algorithm if it is provided.If listener_algorithm
is not provided , then it gets defaulted to round_robin.
If lb_config is passed , this LB configuration is used for the LB rule.
If listener_algorithm parameter is passed , listener gets created with
default listener algorithm(round robin).
"""
listener_config = {"name": random_str(),
"sourcePort": port,
"targetPort": '80',
"sourceProtocol": 'http',
"targetProtocol": 'http'
}
listener = None
if listener_algorithm is not None:
listener_config["algorithm"] = listener_algorithm
if lb_config is None:
# Create Listener
listener = client.create_loadBalancerListener(**listener_config)
listener = client.wait_success(listener)
# Create LB Config
if lb_config_params is not None:
lb_config = client.create_loadBalancerConfig(name=random_str(),
**lb_config_params)
else:
lb_config = client.create_loadBalancerConfig(name=random_str())
lb_config = client.wait_success(lb_config)
lb_config = lb_config.addlistener(loadBalancerListenerId=listener.id)
validate_add_listener(client, listener, lb_config)
# Create LB
lb = client.create_loadBalancer(name=random_str(),
loadBalancerConfigId=lb_config.id)
lb = client.wait_success(lb)
assert lb.state == "active"
# Add host to LB
lb = lb.addhost(hostId=host.id)
validate_add_host(client, host, lb)
# Add container to LB
if containers is None:
# Add default containers to LB
for n in range(0, 2):
target = {"instanceId": containers_in_host[n].id, "ports": ["80"]}
lb = lb.addtarget(loadBalancerTarget=target)
validate_add_target(client, containers_in_host[n], lb)
else:
for container in containers:
target = {"instanceId": container.id, "ports": ["80"]}
lb = lb.addtarget(loadBalancerTarget=target)
validate_add_target(client, container, lb)
if lb_config_params is None and listener_algorithm is None \
and containers is None:
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
return lb, lb_config, listener
@if_lb_containers
def test_lb_with_targets(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "8081"
logger.info("Create LB for 2 targets on port - " + port)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client, host, "8081")
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_add_host_target_in_parallel(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "9081"
listener_config = {"name": random_str(),
"sourcePort": port,
"targetPort": '80',
"sourceProtocol": 'http',
"targetProtocol": 'http'
}
listener = client.create_loadBalancerListener(**listener_config)
listener = client.wait_success(listener)
lb_config = client.create_loadBalancerConfig(name=random_str())
listener = client.wait_success(listener)
lb = client.create_loadBalancer(name=random_str(),
loadBalancerConfigId=lb_config.id)
lb = client.wait_success(lb)
assert lb.state == "active"
# Add host to LB , container to LB and listener to Lb config associated
# with LB in parallel
lb = lb.addhost(hostId=host.id)
for n in range(0, 2):
target = {"instanceId": containers_in_host[n].id, "ports": ["80"]}
lb = lb.addtarget(loadBalancerTarget=target)
lb_config = lb_config.addlistener(loadBalancerListenerId=listener.id)
validate_add_listener(client, listener, lb_config)
validate_add_host(client, host, lb)
for n in range(0, 2):
validate_add_target(client, containers_in_host[n], lb)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_add_target(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "8082"
logger.info("Create LB for 2 targets on port - " + port)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
con1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME':
CONTAINER_HOST_NAMES[2]
},
requestedHostId=host.id
)
con1 = client.wait_success(con1, timeout=180)
# Add target to existing LB
lb = lb.addtarget(
loadBalancerTarget={"instanceId": con1.id, "ports": ["80"]})
validate_add_target(client, con1, lb)
logger.info("Check LB access after adding target with container name: "
+ CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_remove_target(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "8083"
logger.info("Create LB for 2 targets on port - " + port)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
con1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME':
CONTAINER_HOST_NAMES[2]
},
requestedHostId=host.id
)
con1 = client.wait_success(con1, timeout=180)
# Add target to existing LB
lb = lb.addtarget(
loadBalancerTarget={"instanceId": con1.id, "ports": ["80"]})
validate_add_target(client, con1, lb)
logger.info("Check LB access after adding target with container name: "
+ CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES
check_round_robin_access(con_hostname, host, port)
# Remove target to existing LB
lb = lb.removetarget(
loadBalancerTarget={"instanceId": con1.id, "ports": ["80"]})
validate_remove_target(client, con1, lb)
logger.info("Check LB access after removing target with container name: "
+ CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_add_listener(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port1 = "8084"
port2 = "8085"
logger.info("Create LB for 2 targets on port - " + port1)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client, host, port1)
# Create Listener
listener = client.create_loadBalancerListener(name=random_str(),
sourcePort=port2,
targetPort='80',
sourceProtocol='http',
targetProtocol='http')
listener = client.wait_success(listener)
# Add listener to LB config which is associated to LB
lb_config = lb_config.addlistener(loadBalancerListenerId=listener.id)
validate_add_listener(client, listener, lb_config)
logger.info("Check LB access after adding listener for port: " + port2)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port1)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port2)
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_remove_listener(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port1 = "8086"
port2 = "8087"
logger.info("Create LB for 2 targets on port - " + port1)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port1)
# Create Listener
listener = client.create_loadBalancerListener(name=random_str(),
sourcePort=port2,
targetPort='80',
sourceProtocol='http',
targetProtocol='http')
listener = client.wait_success(listener)
# Add listener to LB config which is associated to LB
lb_config = lb_config.addlistener(loadBalancerListenerId=listener.id)
validate_add_listener(client, listener, lb_config)
logger.info("Check LB access after adding listener for port: " + port2)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port1)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port2)
# Remove listener to LB config which is associated to LB
lb_config = lb_config.removelistener(loadBalancerListenerId=listener.id)
validate_remove_listener(client, listener, lb_config)
logger.info("Check LB access after removing listener for port: " + port2)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port1)
check_no_access(host, port2)
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_add_host(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 1
host = hosts[0]
host2 = hosts[1]
port = "8088"
logger.info("Create LB for 2 targets on port - " + port)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
# Add host to existing LB
lb = lb.addhost(hostId=host2.id)
validate_add_host(client, host2, lb)
logger.info("Check LB access after adding host: " + host.id)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
check_round_robin_access(con_hostname, host2, port)
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_remove_host(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 1
host = hosts[0]
host2 = hosts[1]
port = "8089"
logger.info("Create LB for 2 targets on port - " + port)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
# Add host to LB
lb = lb.addhost(hostId=host2.id)
validate_add_host(client, host2, lb)
logger.info("Check LB access after adding host: " + host.id)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
check_round_robin_access(con_hostname, host2, port)
# Remove host to LB
lb = lb.removehost(hostId=host2.id)
validate_remove_host(client, host2, lb)
logger.info("Check LB access after removing host: " + host.id)
check_round_robin_access(con_hostname, host, port)
# Check no access on host2 - TBD
check_no_access(host2, port)
# Add host to LB
lb = lb.addhost(hostId=host2.id)
validate_add_host(client, host2, lb)
logger.info("Check LB access after adding host: " + host.id)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
check_round_robin_access(con_hostname, host2, port)
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_container_lifecycle_stop_start(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "9090"
logger.info("Create LB for 2 targets on port - " + port)
lb, lb_config, listener, con1 =\
create_lb_for_container_lifecycle(client, host, port)
# Stop container
con1 = client.wait_success(con1.stop())
assert con1.state == 'stopped'
logger.info("Check LB access after stopping container "
+ CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
# Start Container
con1 = client.wait_success(con1.start())
assert con1.state == 'running'
logger.info("Check LB access after starting container "
+ CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_container_lifecycle_restart(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "9091"
logger.info("Create LB for 3 targets on port - " + port)
lb, lb_config, listener, con1 = \
create_lb_for_container_lifecycle(client, host, port)
# Restart Container
con1 = client.wait_success(con1.restart())
assert con1.state == 'running'
logger.info(
"Check LB access after restarting container " +
CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
@pytest.mark.skipif(True, reason='not implemented yet')
def test_lb_container_lifecycle_delete_restore(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "9092"
logger.info("Create LB for 3 targets on port - " + port)
lb, con1 = create_lb_for_container_lifecycle(client, host, port)
# Delete Container
con1 = client.wait_success(client.delete(con1))
assert con1.state == 'removed'
logger.info("Check LB access after deleting container "
+ CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
# Restore Container
con1 = client.wait_success(con1.restore())
assert con1.state == 'stopped'
con1 = client.wait_success(con1.start())
assert con1.state == 'running'
logger.info("Check LB access after restoring container "
+ CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_container_lifecycle_delete_purge(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "9093"
logger.info("Create LB for 3 targets on port - " + port)
lb, lb_config, listener, con1 =\
create_lb_for_container_lifecycle(client, host, port)
# Delete Container and purge it
con1 = client.wait_success(client.delete(con1))
assert con1.state == 'removed'
con1 = client.wait_success(con1.purge())
logger.info("Check LB access after purging container "
+ CONTAINER_HOST_NAMES[2])
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
target_maps = client.list_loadBalancerTarget(loadBalancerId=lb.id,
instanceId=con1.id)
assert len(target_maps) == 1
target_map = target_maps[0]
target_map = wait_for_condition(client, target_map,
lambda x: x.state == 'removed',
lambda x: 'State is: ' + x.state)
assert target_map.state == "removed"
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_add_target_in_different_host(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 1
host = hosts[0]
host2 = hosts[1]
port = "8091"
logger.info("Create LB for 2 targets on port - " + port)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client,
host,
port)
con1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME':
CONTAINER_HOST_NAMES[2]
},
requestedHostId=host2.id
)
con1 = client.wait_success(con1, timeout=180)
lb = lb.addtarget(
loadBalancerTarget={"instanceId": con1.id, "ports": ["80"]})
validate_add_target(client, con1, lb)
con_hostname = CONTAINER_HOST_NAMES
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_config_shared_by_2_lb_instances(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 1
host = hosts[0]
host2 = hosts[1]
port = "8092"
logger.info("Create LB for 2 targets on port - " + port)
lb1, lb_config1, listener1 = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
# Create another LB using the same the Lb configuration
lb2, lb_config2, listener2 = \
create_lb_with_one_listener_one_host_two_targets(client,
host2, port,
lb_config1)
delete_all(client, [lb1])
cleanup_lb(client, lb2, lb_config2, listener2)
@if_lb_containers
def test_modify_lb_config_shared_by_2_lb_instances(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 1
host1 = hosts[0]
host2 = hosts[1]
port1 = "8093"
port2 = "8094"
logger.info("Create LB for 2 targets on port - " + port1 +
"- host-" + str(host1.id))
# Create LB - LB1
lb1, lb_config, listener1 = \
create_lb_with_one_listener_one_host_two_targets(client,
host1, port1)
logger.info("Create LB for 2 targets on port - " + port1 +
"- host-" + str(host2.id))
# Create another LB - LB2 using the same the Lb configuration
lb2, lb_config, listener2 = \
create_lb_with_one_listener_one_host_two_targets(client,
host2, port1,
lb_config)
# Add new listener to existing LB configuration that is attached to 2 LBs.
listener2 = client.create_loadBalancerListener(name=random_str(),
sourcePort=port2,
targetPort='80',
sourceProtocol='http',
targetProtocol='http')
listener2 = client.wait_success(listener2)
# Add listener to lB config
lb_config = lb_config.addlistener(loadBalancerListenerId=listener2.id)
validate_add_listener(client, listener2, lb_config)
# Check is new listener is associated with LB1
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host1, port1)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host1, port2)
# Check is new listener is associated with LB2
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host2, port1)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host2, port2)
# Remove listener from lB config
lb_config = lb_config.removelistener(loadBalancerListenerId=listener2.id)
validate_remove_listener(client, listener2, lb_config)
# Check if removed listener is not associated with LB1 anymore
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host1, port1)
check_no_access(host1, port2)
# Check if removed listener is not associated with LB2 anymore
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host2, port1)
check_no_access(host2, port2)
cleanup_lb(client, lb1, None, listener1)
cleanup_lb(client, lb2, lb_config, listener2)
@if_lb_containers
def test_reuse_port_after_lb_deletion(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "9000"
logger.info("Create LB for 2 targets on port - " + port)
lb_1, lb_config_1, listener_1 = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
lb_1 = client.wait_success(client.delete(lb_1))
assert lb_1.state == 'removed'
lb_2, lb_config_2, listener_2 = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
con1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME':
CONTAINER_HOST_NAMES[2]
},
requestedHostId=host.id
)
con1 = client.wait_success(con1, timeout=180)
lb_2 = lb_2.addtarget(loadBalancerTarget={
"instanceId": con1.id, "ports": ["80"]})
validate_add_target(client, con1, lb_2)
con_hostname = CONTAINER_HOST_NAMES
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb_1, lb_config_1, listener_1)
cleanup_lb(client, lb_2, lb_config_2, listener_2)
@if_lb_containers
def test_lb_for_container_with_port_mapping(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port1 = "9002"
con1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME':
CONTAINER_HOST_NAMES[0]
},
ports=[port1+":80/tcp"],
requestedHostId=host.id
)
con1 = client.wait_success(con1, timeout=180)
port2 = "9003"
con2 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME':
CONTAINER_HOST_NAMES[1]
},
ports=[port2+":80/tcp"],
requestedHostId=host.id
)
con2 = client.wait_success(con2, timeout=180)
port = "9001"
logger.info("Create LB for 2 targets which have port "
"mappings on port - " + port)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(client,
host, port)
# Check LB rule works
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
# Check exposed ports for containers work
check_access(host, port1, CONTAINER_HOST_NAMES[0])
check_access(host, port2, CONTAINER_HOST_NAMES[1])
delete_all(client, [con1, con2])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_with_lb_cookie(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "8095"
logger.info("Create LB for 2 targets with lbCookieStickinessPolicy " +
"on port - " + port)
lbcookie_policy = {"lbCookieStickinessPolicy":
{"mode": "insert",
"cookie": "cookie-1",
"indirect": True,
"nocache": True,
"postonly": False
}
}
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(
client,
host, port,
lb_config_params=lbcookie_policy
)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_for_lbcookie_policy(con_hostname, host, port)
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_with_app_cookie(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "8096"
cookie_name = "appcookie1"
logger.info("Create LB for 2 targets with appCookieStickinessPolicy " +
"on port - " + port)
appcookie_policy = {"appCookieStickinessPolicy":
{"mode": "query_string",
"requestLearn": True,
"timeout": 3600000,
"cookie": cookie_name,
"maxLength": 40,
"prefix": False
}
}
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(
client,
host, port,
lb_config_params=appcookie_policy
)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_for_appcookie_policy(con_hostname, host, port, cookie_name)
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_with_health_check_with_uri(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "8097"
logger.info("Create LB for 2 targets with health check enabled " +
"on port - " + port)
health_check = {"healthCheck":
{"interval": 2000,
"responseTimeout": 2000,
"healthyThreshold": 2,
"unhealthyThreshold": 3,
"uri": "GET /name.html"
}
}
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(
client,
host, port,
lb_config_params=health_check
)
con1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=TEST_IMAGE_UUID,
requestedHostId=host.id
)
con1 = client.wait_success(con1, timeout=180)
lb = lb.addtarget(
loadBalancerTarget={"instanceId": con1.id, "ports": ["80"]})
validate_add_target(client, con1, lb)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_with_health_check_without_uri(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "8098"
logger.info("Create LB for 2 targets with health check enabled " +
"on port - " + port)
health_check = {"healthCheck":
{"interval": 2000,
"responseTimeout": 2000,
"healthyThreshold": 2,
"unhealthyThreshold": 3
}
}
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(
client,
host, port,
lb_config_params=health_check
)
con1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=TEST_IMAGE_UUID,
requestedHostId=host.id
)
con1 = client.wait_success(con1, timeout=180)
lb = lb.addtarget(
loadBalancerTarget={"instanceId": con1.id, "ports": ["80"]})
validate_add_target(client, con1, lb)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_round_robin_access(con_hostname, host, port)
delete_all(client, [con1])
cleanup_lb(client, lb, lb_config, listener)
@if_lb_containers
def test_lb_with_source(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
host = hosts[0]
port = "8101"
logger.info("Create LB for 2 targets with source algorithm " +
"on port - " + port)
lb, lb_config, listener = \
create_lb_with_one_listener_one_host_two_targets(
client,
host, port,
listener_algorithm="source"
)
con_hostname = CONTAINER_HOST_NAMES[0:2]
check_for_stickiness(con_hostname, host, port)
cleanup_lb(client, lb, lb_config, listener)
def check_round_robin_access(container_names, host, port):
wait_until_lb_is_active(host, port)
con_hostname = container_names[:]
con_hostname_ordered = []
url = "http://" + host.ipAddresses()[0].address +\
":" + port + "/name.html"
logger.info(url)
for n in range(0, len(con_hostname)):
r = requests.get(url)
response = r.text.strip("\n")
logger.info(response)
r.close()
assert response in con_hostname
con_hostname.remove(response)
con_hostname_ordered.append(response)
logger.info(con_hostname_ordered)
i = 0
for n in range(0, 10):
r = requests.get(url)
response = r.text.strip("\n")
r.close()
logger.info(response)
assert response == con_hostname_ordered[i]
i = i + 1
if i == len(con_hostname_ordered):
i = 0
@if_lb_containers
def check_no_access(host, port):
try:
url = "http://" + host.ipAddresses()[0].address + ":" +\
port + "/name.html"
requests.get(url)
assert False
except requests.ConnectionError:
logger.info("Connection Error - " + url)
@if_lb_containers
def check_access(host, port, expected_response):
url = "http://" + host.ipAddresses()[0].address + ":" +\
port + "/name.html"
r = requests.get(url)
response = r.text.strip("\n")
logger.info(response)
r.close()
assert response == expected_response
@if_lb_containers
def check_for_appcookie_policy(container_names, host, port, cookie_name):
wait_until_lb_is_active(host, port)
con_hostname = container_names[:]
url = "http://" + host.ipAddresses()[0].address + \
":" + port + "/name.html"
headers = {"Cookie": cookie_name + "=test123"}
r = requests.get(url, headers=headers)
sticky_response = r.text.strip("\n")
logger.info(sticky_response)
r.close()
assert sticky_response in con_hostname
for n in range(0, 10):
r = requests.get(url, headers=headers)
response = r.text.strip("\n")
r.close()
logger.info(response)
assert response == sticky_response
@if_lb_containers
def check_for_lbcookie_policy(container_names, host, port):
wait_until_lb_is_active(host, port)
con_hostname = container_names[:]
url = "http://" + host.ipAddresses()[0].address + \
":" + port + "/name.html"
session = requests.Session()
r = session.get(url)
sticky_response = r.text.strip("\n")
logger.info(sticky_response)
r.close()
assert sticky_response in con_hostname
for n in range(0, 10):
r = session.get(url)
response = r.text.strip("\n")
r.close()
logger.info(response)
assert response == sticky_response
@if_lb_containers
def check_for_stickiness(container_names, host, port):
wait_until_lb_is_active(host, port)
con_hostname = container_names[:]
url = "http://" + host.ipAddresses()[0].address + \
":" + port + "/name.html"
r = requests.get(url)
sticky_response = r.text.strip("\n")
logger.info(sticky_response)
r.close()
assert sticky_response in con_hostname
for n in range(0, 10):
r = requests.get(url)
response = r.text.strip("\n")
r.close()
logger.info(response)
assert response == sticky_response
def validate_add_target(client, container, lb):
target_maps = client.list_loadBalancerTarget(loadBalancerId=lb.id,
instanceId=container.id)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
client, target_map,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state)
def validate_remove_target(client, container, lb):
target_maps = client.list_loadBalancerTarget(loadBalancerId=lb.id,
instanceId=container.id)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
client, target_map,
lambda x: x.state == "removed",
lambda x: 'State is: ' + x.state)
def validate_add_listener(client, listener, lb_config):
lb_config_maps = client.\
list_loadBalancerConfigListenerMap(loadBalancerListenerId=listener.id,
loadBalancerConfigId=lb_config.id)
assert len(lb_config_maps) == 1
config_map = lb_config_maps[0]
wait_for_condition(
client, config_map,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state)
def validate_remove_listener(client, listener, lb_config):
lb_config_maps = client.\
list_loadBalancerConfigListenerMap(loadBalancerListenerId=listener.id,
loadBalancerConfigId=lb_config.id)
assert len(lb_config_maps) == 1
config_map = lb_config_maps[0]
wait_for_condition(
client, config_map,
lambda x: x.state == "removed",
lambda x: 'State is: ' + x.state)
def validate_add_host(client, host, lb):
host_maps = client.list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id,
removed_null=True)
assert len(host_maps) == 1
host_map = host_maps[0]
wait_for_condition(
client, host_map,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state)
def validate_remove_host(client, host, lb):
host_maps = client.list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id)
assert len(host_maps) == 1
host_map = host_maps[0]
wait_for_condition(
client, host_map,
lambda x: x.state == "removed",
lambda x: 'State is: ' + x.state)
def wait_until_lb_is_active(host, port, timeout=45):
start = time.time()
while check_for_no_access(host, port):
time.sleep(.5)
print "No access yet"
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(host, port):
try:
url = "http://" + host.ipAddresses()[0].address + ":" +\
port + "/name.html"
requests.get(url)
return False
except requests.ConnectionError:
logger.info("Connection Error - " + url)
return True
def cleanup_lb(client, lb, lb_config, listener):
delete_all(client, [lb])
if lb_config is not None:
delete_all(client, [lb_config])
if listener is not None:
delete_all(client, [listener])
|
{
"content_hash": "e8247cd2559fadb9215cd713942c3fc3",
"timestamp": "",
"source": "github",
"line_count": 1230,
"max_line_length": 79,
"avg_line_length": 33.34634146341463,
"alnum_prop": 0.5646333138287497,
"repo_name": "hibooboo2/validation-tests",
"id": "06802eeca63c45f197dcc83901cf6eb7ca43e3bf",
"size": "41016",
"binary": false,
"copies": "5",
"ref": "refs/heads/pr/128",
"path": "tests/validation_v2/cattlevalidationtest/core/test_lb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1140880"
},
{
"name": "Shell",
"bytes": "3920"
}
],
"symlink_target": ""
}
|
"""
If dbus is available, this module implements a
org.freedesktop.Notifications service.
"""
from .log_utils import logger
try:
import dbus
from dbus import service
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
has_dbus = True
except ImportError:
has_dbus = False
BUS_NAME = 'org.freedesktop.Notifications'
SERVICE_PATH = '/org/freedesktop/Notifications'
if has_dbus:
class NotificationService(service.Object):
def __init__(self, manager):
bus = dbus.SessionBus()
bus.request_name(BUS_NAME)
bus_name = service.BusName(BUS_NAME, bus=bus)
service.Object.__init__(self, bus_name, SERVICE_PATH)
self.manager = manager
@service.method(BUS_NAME, in_signature='', out_signature='as')
def GetCapabilities(self): # noqa: N802
return ('body')
@service.method(BUS_NAME, in_signature='susssasa{sv}i', out_signature='u')
def Notify(self, app_name, replaces_id, app_icon, summary, # noqa: N802
body, actions, hints, timeout):
notif = Notification(summary, body, timeout, hints)
return self.manager.add(notif)
@service.method(BUS_NAME, in_signature='u', out_signature='')
def CloseNotification(self, id): # noqa: N802
pass
@service.signal(BUS_NAME, signature='uu')
def NotificationClosed(self, id_in, reason_in): # noqa: N802
pass
@service.method(BUS_NAME, in_signature='', out_signature='ssss')
def GetServerInformation(self): # noqa: N802
return ("qtile-notify-daemon", "qtile", "1.0", "1")
class Notification:
def __init__(self, summary, body='', timeout=-1, hints=None):
self.summary = summary
self.hints = hints or {}
self.body = body
self.timeout = timeout
class NotificationManager:
def __init__(self):
self.notifications = []
self.callbacks = []
self._service = None
@property
def service(self):
if has_dbus and self._service is None:
try:
self._service = NotificationService(self)
except Exception:
logger.exception('Dbus connection failed')
self._service = None
return self._service
def register(self, callback):
if not self.service:
logger.warning(
'Registering %s without any dbus connection existing',
callback.__name__,
)
self.callbacks.append(callback)
def add(self, notif):
self.notifications.append(notif)
notif.id = len(self.notifications)
for callback in self.callbacks:
callback(notif)
return len(self.notifications)
def show(self, *args, **kwargs):
notif = Notification(*args, **kwargs)
return (notif, self.add(notif))
notifier = NotificationManager()
|
{
"content_hash": "cdfd8347e33ddc1048e80838079e099b",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 82,
"avg_line_length": 31.526315789473685,
"alnum_prop": 0.5996661101836394,
"repo_name": "flacjacket/qtile",
"id": "4005d58ef9b223536e734a0be8fb520b98f828d6",
"size": "4223",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "libqtile/notify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "994"
},
{
"name": "Python",
"bytes": "1173072"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "6235"
}
],
"symlink_target": ""
}
|
"""
Flask-Mandrill
==============
A Flask Extension to remove some of the boiler plate encountered when
sending emails with `Mandrill <http://www.mandrill.com/>`_
Installation
````````````
.. code:: bash
$ pip install flask-mandrill
Usage
`````
.. code:: python
from flask import Flask
from flask.ext.mandrill import Mandrill
app = Flask(__name__)
app.config['MANDRILL_API_KEY'] = 'your api key'
mandrill = Mandrill(app)
mandrill.send_email(
from_email='someone@yourdomain.com',
to=[{'email': 'someoneelse@someotherdomain.com'}],
text='Hello World'
)
"""
from setuptools import setup
setup(
name='Flask-Mandrill',
version='0.3',
url='http://github.com/volker48/flask-mandrill',
license='MIT',
author='Marcus McCurdy',
author_email='marcus.mccurdy@gmail.com',
description='Adds Mandrill support to Flask applications',
long_description=__doc__ + '\n\n' +
open('HISTORY.rst').read(),
py_modules=['flask_mandrill'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'requests'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
{
"content_hash": "eb138fe07740efd681983c8d338d8c05",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 70,
"avg_line_length": 25.234375,
"alnum_prop": 0.5857585139318885,
"repo_name": "volker48/flask-mandrill",
"id": "3f01210e72b31cf3eafe26eca0723a21876bb1e2",
"size": "1615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7061"
}
],
"symlink_target": ""
}
|
from ...fluid.initializer import Initializer
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.core import VarDesc
from ...fluid import framework
from ...fluid.framework import _current_expected_place
from paddle import in_dynamic_mode
from paddle.utils import unique_name
from paddle import _C_ops
from ... import fluid
__all__ = []
class Dirac(Initializer):
r"""Initialize the 3D/4D/5D Tensor with Dirac delta function.
It can reserve the feature of convolution layer input, which means that
as many channels are reserved as possible.
In this initialize method, elements in the middle of convolution kernels will
be set to 1 . The formula can be described as follow.
.. math::
X[d, d, shape[2]//2, shape[3]//2, ...]=1, \ d=0,1...N
where, ``N`` is the minimum value of ``in_channels`` and ``out_channels``
Args:
groups(int, optional): 0-dimension of the Tensor will be divided by groups,
each group has the same value. Default: 1.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Dirac initializer instance objects.
Examples:
.. code-block:: python
import paddle
#1. For kernel_size is uneven number:
attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Dirac())
conv = paddle.nn.Conv1D(3, 2, 3, weight_attr=attr)
conv.weight
# Tensor(shape=[2, 3, 3], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[[0., 1., 0.],
# [0., 0., 0.],
# [0., 0., 0.]],
#
# [[0., 0., 0.],
# [0., 1., 0.],
# [0., 0., 0.]]])
input = paddle.rand([8, 3, 10])
output = conv(input)
output == input[:, 0:2, 1:9]
# output.shape is [8, 2, 8], It means output is almost the same with input, 2 channels are reserved
#2. For kernel_size is even number:
attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Dirac())
conv = paddle.nn.Conv1D(3, 2, 4, weight_attr=attr)
conv.weight
# Tensor(shape=[2, 3, 4], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[[0., 0., 1., 0.],
# [0., 0., 0., 0.],
# [0., 0., 0., 0.]],
#
# [[0., 0., 0., 0.],
# [0., 0., 1., 0.],
# [0., 0., 0., 0.]]])
"""
def __init__(self, groups=1, name=None):
assert groups > 0 and isinstance(
groups, int
), " 'groups' must be a positive integer. "
super().__init__()
self._groups = groups
def __call__(self, var, block=None):
"""Initialize the input tensor with dirac initializer.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The most critical OP(scatter) in this initializer, which contains 7~8 ops in total.
"""
block = self._check_block(block)
assert isinstance(var, framework.Parameter)
assert isinstance(block, framework.Block)
check_variable_and_dtype(
var, "Out", ['float16', 'bfloat16', 'float32', 'float64'], 'Dirac'
)
assert len(var.shape) in [
3,
4,
5,
], "Only Tensor with 3/4/5 dimensions can be initialized by Dirac"
assert (
var.shape[0] % self._groups
) == 0, "Tensor 0-dimension must be divisible by groups"
if var.dtype != VarDesc.VarType.FP32:
out_var = block.create_var(
name=unique_name.generate(".".join(['dirac', var.name, 'tmp'])),
shape=var.shape,
dtype=VarDesc.VarType.FP32,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False,
)
else:
out_var = var
op = None
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
place = _current_expected_place()
_C_ops.full_(
out_var, out_var.shape, str(float(0)), out_var.dtype, place
)
else:
block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': out_var},
attrs={
'value': float(0),
'dtype': out_var.dtype,
'shape': out_var.shape,
},
stop_gradient=True,
)
origin_shape = var.shape
num_per_group = origin_shape[0] // self._groups
min_shape = min(num_per_group, origin_shape[1])
idx_list = []
value_list = []
strides = []
prod = 1
for dim in reversed(origin_shape):
strides.insert(0, prod)
prod *= dim
for i in range(self._groups):
for j in range(min_shape):
value_list.append(1.0)
offset = 0
for (k, stride) in enumerate(strides):
if k == 0:
offset += (j + i * num_per_group) * stride
elif k == 1:
offset += j * stride
else:
offset += origin_shape[k] // 2 * stride
idx_list.append(offset)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_out = _C_ops.reshape(out_var, [-1])
tmp_out._share_underline_tensor_to(out_var)
else:
x_shape = block.create_var(
name=unique_name.generate(".".join([out_var.name, "XShape"])),
dtype=out_var.dtype,
shape=out_var.shape,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=True,
)
block.append_op(
type="reshape2",
inputs={"X": out_var},
attrs={'shape': [-1]},
outputs={"Out": out_var, "XShape": x_shape},
stop_gradient=True,
)
index_tensor = block.create_var(
name=unique_name.generate('scatter_index'),
persistable=False,
stop_gradient=True,
)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_tensor = framework._varbase_creator()
_C_ops.assign_value_(
tmp_tensor,
[len(idx_list)],
VarDesc.VarType.INT64,
idx_list,
_current_expected_place(),
)
tmp_tensor._share_underline_tensor_to(index_tensor)
else:
block.append_op(
type='assign_value',
outputs={'Out': index_tensor},
attrs={
'dtype': VarDesc.VarType.INT64,
'shape': [len(idx_list)],
'int64_values': idx_list,
},
stop_gradient=True,
)
value_tensor = block.create_var(
name=unique_name.generate('scatter_value'),
persistable=False,
stop_gradient=True,
)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_tensor = framework._varbase_creator()
_C_ops.assign_value_(
tmp_tensor,
[len(value_list)],
VarDesc.VarType.FP32,
value_list,
_current_expected_place(),
)
tmp_tensor._share_underline_tensor_to(value_tensor)
else:
block.append_op(
type='assign_value',
outputs={'Out': value_tensor},
attrs={
'dtype': VarDesc.VarType.FP32,
'shape': [len(value_list)],
'fp32_values': value_list,
},
stop_gradient=True,
)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_out = _C_ops.scatter(
out_var, index_tensor, value_tensor, True
)
tmp_out._share_underline_tensor_to(out_var)
tmp_reshape_out = _C_ops.reshape(out_var, origin_shape)
tmp_reshape_out._share_underline_tensor_to(out_var)
if var.dtype != VarDesc.VarType.FP32:
tmp_cast_out = _C_ops.cast(out_var, var.dtype)
tmp_cast_out._share_underline_tensor_to(var)
else:
op = block.append_op(
type="scatter",
inputs={
"X": out_var,
"Ids": index_tensor,
"Updates": value_tensor,
},
attrs={'overwrite': True},
outputs={"Out": out_var},
stop_gradient=True,
)
x_shape = block.create_var(
name=unique_name.generate(".".join([out_var.name, "XShape"])),
dtype=out_var.dtype,
shape=out_var.shape,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=True,
)
block.append_op(
type="reshape2",
inputs={"X": out_var},
attrs={'shape': origin_shape},
outputs={"Out": out_var, "XShape": x_shape},
stop_gradient=True,
)
if var.dtype != VarDesc.VarType.FP32:
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype},
stop_gradient=True,
)
if not in_dynamic_mode():
var.op = op
return op
|
{
"content_hash": "7c09984dc44d54a0c30f182126c162d8",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 111,
"avg_line_length": 35.918088737201366,
"alnum_prop": 0.4713987077156975,
"repo_name": "PaddlePaddle/Paddle",
"id": "489033291a7fc3ffe66db291a7933c6ac644ef4a",
"size": "11137",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/nn/initializer/dirac.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read_file(name):
filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
name
)
data = open(filepath)
try:
return data.read()
except IOError:
print "could not read %r" % name
data.close()
PROJECT = 'ec2stack'
VERSION = '0.8.1'
URL = 'https://git-wip-us.apache.org/repos/asf?p=cloudstack-ec2stack.git'
AUTHOR = 'Apache Software Foundation'
AUTHOR_EMAIL = 'dev@cloudstack.apache.org'
DESC = "EC2 compatibility interface for Apache Cloudstack"
LONG_DESC = read_file('README.md')
REQUIRES = [
'Flask-SQLAlchemy', 'flask', 'requests', 'alembic'
]
setup(
name=PROJECT,
version=VERSION,
description=DESC,
long_description=LONG_DESC,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license='Apache License (2.0)',
package_data={'ec2stack': ['templates/*.xml'],
'migrations': ['*.mako', 'versions/*']},
packages=['ec2stack',
'ec2stack.controllers',
'ec2stack.providers',
'ec2stack.models',
'ec2stack.models.users',
'ec2stack.providers.cloudstack',
'migrations'],
include_package_data=True,
zip_safe=False,
install_requires=REQUIRES,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
'Programming Language :: Python :: 2.7',
],
entry_points="""
[console_scripts]
ec2stack = ec2stack.__main__:main
ec2stack-configure = ec2stack.configure:main
ec2stack-register = ec2stack.secretkey_manager:register
ec2stack-remove = ec2stack.secretkey_manager:remove
"""
)
|
{
"content_hash": "273bbd9c3a4a4615b2d536e1290a2584",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 73,
"avg_line_length": 28.955882352941178,
"alnum_prop": 0.6084306754697816,
"repo_name": "apache/cloudstack-ec2stack",
"id": "21bea7229a13729d74e6136692ac6899493dfbae",
"size": "2813",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "239301"
},
{
"name": "Shell",
"bytes": "5226"
}
],
"symlink_target": ""
}
|
""" Record a few seconds of audio and save to a WAVE file. """
import logging
import socket
import os
import sys
import wave
import random
from itertools import izip
from math import sqrt
import pyaudio
import numpy as np
from globalvars import (SOCKET_ADDR, SAMPLES_PER_SECOND)
#PyAudio setup
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 96000
chunk_size = RATE / SAMPLES_PER_SECOND
seconds_per_sample = 1.0 / SAMPLES_PER_SECOND
#default frequency
BASE_FREQUENCY = 1010.0
def format_audio(chunk):
"""
Create NumPy arrays of L/R channels from chunk
"""
data = np.fromstring(chunk, dtype=np.short)
ldata = data[0::2]
rdata = data[1::2]
return (ldata, rdata)
def argmax(array):
"""
Stolen from http://www.daniel-lemire.com/blog/archives/2008/12/17/fast-argmax-in-python/
The faster option he gives doesn't work with NumPy arrays.
"""
return max(izip(array, xrange(len(array))))[1]
def get_freq(signal):
"""
Given a PyAudio input chunk, determine the dominant frequency
"""
#do the fft, this code adapted from
#http://stackoverflow.com/questions/6908540/pyaudio-how-to-tell-frequency-and-amplitude-while-recording
p = 20*np.log10(np.abs(np.fft.rfft(signal)))
f = np.linspace(0, RATE/2.0, len(p))
offset = 1 # remove the first few entries in the fft
#(first few entries are usually big but not what we're looking for)
return f[argmax(p[offset:]) + offset]
SHORT_NORMALIZE = (1.0/32768.0)
def get_rms(signal):
"""
Measures the average audio amplitude.
Adapted from http://stackoverflow.com/questions/4160175/detect-tap-with-pyaudio-from-live-mic#_=_
"""
sum_squares = 0.0
for sample in random.sample(signal, len(signal) / 10):
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * SHORT_NORMALIZE
sum_squares += n*n
return sqrt(sum_squares / (len(signal) / 10))
ZERO_AMPLITUDE_THRESHOLD = .03
def get_relative_velocity(chunk):
"""
Returns the velocity of the record relative to the base velocity
i.e. normal speed = 1.0
reverse at normal speed = -1.0
stopped = 0.0
"""
lchunk, rchunk = format_audio(chunk)
#check the amplitude first
amplitude = get_rms(lchunk)
if abs(amplitude) < ZERO_AMPLITUDE_THRESHOLD:
amplitude = 0
logging.debug("amplitude = %f" % amplitude)
if amplitude == 0:
return 0
#only need one chunk for frequency
freq = get_freq(lchunk)
#determine direction
argmax_l = argmax(lchunk[:chunk_size / 2])
peak_offset = chunk_size / (seconds_per_sample * freq) / 4
if rchunk[min(argmax_l + peak_offset, len(rchunk) - 1)] < 0:
direction = 1
else:
direction = -1
return round(direction * (freq / BASE_FREQUENCY), 1)
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = chunk_size)
logging.info("**waiting for connection**")
#set up a socket
SOCKET_ADDR = ("localhost", 6666)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
while True:
try:
data = stream.read(chunk_size)
velocity = get_relative_velocity(data)
logging.debug(("velocity = %f" % velocity))
s.sendto(str(velocity) + "\n", SOCKET_ADDR)
except IOError:
pass
#logging.warning("audio error")
except:
logging.exception("watwat?")
finally:
s.close()
print "**socket closed**"
stream.close()
p.terminate()
|
{
"content_hash": "71ef015d1912e2886d0985080eec69b3",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 103,
"avg_line_length": 26.18840579710145,
"alnum_prop": 0.6405644714997233,
"repo_name": "zwass/lpControl",
"id": "c5117ad19a76cd55f21f5707aa5052ab4a7277c4",
"size": "3614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "input.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12245"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
import random
import re
import time
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class FIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(10, tf.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(3, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testHighDimension(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(50, tf.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = range(elements_enqueued, elements_enqueued + count)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_op = q.enqueue_many((range(250),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = range(elements_dequeued, elements_dequeued + count)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.FIFOQueue(100, tf.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.FIFOQueue(total_count, tf.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.FIFOQueue(1, tf.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_a")
q_a_2 = tf.FIFOQueue(15, tf.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_b")
q_b_2 = tf.FIFOQueue(10, tf.int32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_c")
q_c_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.FIFOQueue(10, tf.float32, shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_f")
q_f_2 = tf.FIFOQueue(
10, (tf.float32, tf.int32), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.FIFOQueue(10, tf.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.FIFOQueue(10, tf.float32)
q2 = tf.FIFOQueue(15, tf.float32)
enq_q = tf.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.FIFOQueue(5, tf.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.FIFOQueue(5, tf.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(2, tf.int32, ((),))
elem = range(4)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "23cab438e61c3bb54e6ea6ce66ace7f7",
"timestamp": "",
"source": "github",
"line_count": 1071,
"max_line_length": 80,
"avg_line_length": 34.83940242763772,
"alnum_prop": 0.6023101867981668,
"repo_name": "brendandburns/tensorflow",
"id": "11b7d46318981208dc36a632f5a11a5e71ac3912",
"size": "37313",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/fifo_queue_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127080"
},
{
"name": "C++",
"bytes": "4892887"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "631255"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "44898"
},
{
"name": "Python",
"bytes": "2432627"
},
{
"name": "Shell",
"bytes": "1714"
},
{
"name": "TypeScript",
"bytes": "236089"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from loginurl.views import cleanup, login
from haystack.query import SearchQuerySet
from haystack.views import FacetedSearchView
from .views import IndexView, PersonDetailView, PersonCreateView, PersonUpdateView, \
DirectoryFacetedSearchForm, LoginView
sqs = SearchQuerySet().facet('region').facet('general_expertise') \
.facet('oer_expertise').facet('openacess_expertise').facet('mooc_expertise')
urlpatterns = patterns('',
url(r'^login/cleanup/$', cleanup, name='loginurl-cleanup'),
url(r'^login/(?P<key>[0-9A-Za-z]+-[a-z0-9-]+)/$', login, name='loginurl-login'),
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/directory/'}, name='logout'),
url(r'^search/$', FacetedSearchView(form_class=DirectoryFacetedSearchForm, searchqueryset=sqs), name='haystack_search'),
url(r'^professional/(?P<slug>[\w-]+)/', PersonDetailView.as_view(), name="person-detail"),
url(r'^add/$', PersonCreateView.as_view(), name="person-create"),
url(r'^edit/$', PersonUpdateView.as_view(), name="person-update"),
url(r'^$', IndexView.as_view(), name="index")
)
|
{
"content_hash": "7d1c7a051e06623341cd859d77487ced",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 121,
"avg_line_length": 43.77777777777778,
"alnum_prop": 0.7115059221658206,
"repo_name": "ocwc/directory",
"id": "56b76818bb0c8998a49a2fc615ca4791843998a8",
"size": "1182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "directory/web/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32480"
},
{
"name": "HTML",
"bytes": "17269"
},
{
"name": "JavaScript",
"bytes": "1495"
},
{
"name": "Python",
"bytes": "40560"
}
],
"symlink_target": ""
}
|
__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
import io
import os
import sys
import socket
import struct
import time
import tempfile
import itertools
import _multiprocess as _multiprocessing
from . import reduction
from . import util
from . import AuthenticationError, BufferTooShort
from .reduction import ForkingPickler
try:
import _winapi
from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
if sys.platform == 'win32':
raise
_winapi = None
#
#
#
BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.
_mmap_counter = itertools.count()
default_family = 'AF_INET'
families = ['AF_INET']
if hasattr(socket, 'AF_UNIX'):
default_family = 'AF_UNIX'
families += ['AF_UNIX']
if sys.platform == 'win32':
default_family = 'AF_PIPE'
families += ['AF_PIPE']
def _init_timeout(timeout=CONNECTION_TIMEOUT):
return time.time() + timeout
def _check_timeout(t):
return time.time() > t
#
#
#
def arbitrary_address(family):
'''
Return an arbitrary free address for the given family
'''
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), next(_mmap_counter)), dir="")
else:
raise ValueError('unrecognized family')
def _validate_family(family):
'''
Checks if the family is valid for the current environment.
'''
if sys.platform != 'win32' and family == 'AF_PIPE':
raise ValueError('Family %s is not recognized.' % family)
if sys.platform == 'win32' and family == 'AF_UNIX':
# double check
if not hasattr(socket, family):
raise ValueError('Family %s is not recognized.' % family)
def address_type(address):
'''
Return the types of the address
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
'''
if type(address) == tuple:
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
elif type(address) is str:
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)
#
# Connection classes
#
class _ConnectionBase:
_handle = None
def __init__(self, handle, readable=True, writable=True):
handle = handle.__index__()
if handle < 0:
raise ValueError("invalid handle")
if not readable and not writable:
raise ValueError(
"at least one of `readable` and `writable` must be True")
self._handle = handle
self._readable = readable
self._writable = writable
# XXX should we use util.Finalize instead of a __del__?
def __del__(self):
if self._handle is not None:
self._close()
def _check_closed(self):
if self._handle is None:
raise OSError("handle is closed")
def _check_readable(self):
if not self._readable:
raise OSError("connection is write-only")
def _check_writable(self):
if not self._writable:
raise OSError("connection is read-only")
def _bad_message_length(self):
if self._writable:
self._readable = False
else:
self.close()
raise OSError("bad message length")
@property
def closed(self):
"""True if the connection is closed"""
return self._handle is None
@property
def readable(self):
"""True if the connection is readable"""
return self._readable
@property
def writable(self):
"""True if the connection is writable"""
return self._writable
def fileno(self):
"""File descriptor or handle of the connection"""
self._check_closed()
return self._handle
def close(self):
"""Close the connection"""
if self._handle is not None:
try:
self._close()
finally:
self._handle = None
def send_bytes(self, buf, offset=0, size=None):
"""Send the bytes data from a bytes-like object"""
self._check_closed()
self._check_writable()
m = memoryview(buf)
# HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
if m.itemsize > 1:
m = memoryview(bytes(m))
n = len(m)
if offset < 0:
raise ValueError("offset is negative")
if n < offset:
raise ValueError("buffer length < offset")
if size is None:
size = n - offset
elif size < 0:
raise ValueError("size is negative")
elif offset + size > n:
raise ValueError("buffer length < offset + size")
self._send_bytes(m[offset:offset + size])
def send(self, obj):
"""Send a (picklable) object"""
self._check_closed()
self._check_writable()
self._send_bytes(ForkingPickler.dumps(obj))
def recv_bytes(self, maxlength=None):
"""
Receive bytes data as a bytes object.
"""
self._check_closed()
self._check_readable()
if maxlength is not None and maxlength < 0:
raise ValueError("negative maxlength")
buf = self._recv_bytes(maxlength)
if buf is None:
self._bad_message_length()
return buf.getvalue()
def recv_bytes_into(self, buf, offset=0):
"""
Receive bytes data into a writeable bytes-like object.
Return the number of bytes read.
"""
self._check_closed()
self._check_readable()
with memoryview(buf) as m:
# Get bytesize of arbitrary buffer
itemsize = m.itemsize
bytesize = itemsize * len(m)
if offset < 0:
raise ValueError("negative offset")
elif offset > bytesize:
raise ValueError("offset too large")
result = self._recv_bytes()
size = result.tell()
if bytesize < offset + size:
raise BufferTooShort(result.getvalue())
# Message can fit in dest
result.seek(0)
result.readinto(m[offset // itemsize :
(offset + size) // itemsize])
return size
def recv(self):
"""Receive a (picklable) object"""
self._check_closed()
self._check_readable()
buf = self._recv_bytes()
return ForkingPickler.loads(buf.getbuffer())
def poll(self, timeout=0.0):
"""Whether there is any input available to be read"""
self._check_closed()
self._check_readable()
return self._poll(timeout)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
if _winapi:
class PipeConnection(_ConnectionBase):
"""
Connection class based on a Windows named pipe.
Overlapped I/O is used, so the handles must have been created
with FILE_FLAG_OVERLAPPED.
"""
_got_empty_message = False
def _close(self, _CloseHandle=_winapi.CloseHandle):
_CloseHandle(self._handle)
def _send_bytes(self, buf):
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nwritten, err = ov.GetOverlappedResult(True)
assert err == 0
assert nwritten == len(buf)
def _recv_bytes(self, maxsize=None):
if self._got_empty_message:
self._got_empty_message = False
return io.BytesIO()
else:
bsize = 128 if maxsize is None else min(maxsize, 128)
try:
ov, err = _winapi.ReadFile(self._handle, bsize,
overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nread, err = ov.GetOverlappedResult(True)
if err == 0:
f = io.BytesIO()
f.write(ov.getbuffer())
return f
elif err == _winapi.ERROR_MORE_DATA:
return self._get_more_data(ov, maxsize)
except OSError as e:
if e.winerror == _winapi.ERROR_BROKEN_PIPE:
raise EOFError
else:
raise
raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")
def _poll(self, timeout):
if (self._got_empty_message or
_winapi.PeekNamedPipe(self._handle)[0] != 0):
return True
return bool(wait([self], timeout))
def _get_more_data(self, ov, maxsize):
buf = ov.getbuffer()
f = io.BytesIO()
f.write(buf)
left = _winapi.PeekNamedPipe(self._handle)[1]
assert left > 0
if maxsize is not None and len(buf) + left > maxsize:
self._bad_message_length()
ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
rbytes, err = ov.GetOverlappedResult(True)
assert err == 0
assert rbytes == left
f.write(ov.getbuffer())
return f
class Connection(_ConnectionBase):
"""
Connection class based on an arbitrary file descriptor (Unix only), or
a socket handle (Windows).
"""
if _winapi:
def _close(self, _close=_multiprocessing.closesocket):
_close(self._handle)
_write = _multiprocessing.send
_read = _multiprocessing.recv
else:
def _close(self, _close=os.close):
_close(self._handle)
_write = os.write
_read = os.read
def _send(self, buf, write=_write):
remaining = len(buf)
while True:
try:
n = write(self._handle, buf)
except InterruptedError:
continue
remaining -= n
if remaining == 0:
break
buf = buf[n:]
def _recv(self, size, read=_read):
buf = io.BytesIO()
handle = self._handle
remaining = size
while remaining > 0:
try:
chunk = read(handle, remaining)
except InterruptedError:
continue
n = len(chunk)
if n == 0:
if remaining == size:
raise EOFError
else:
raise OSError("got end of file during message")
buf.write(chunk)
remaining -= n
return buf
def _send_bytes(self, buf):
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
if n > 16384:
# The payload is large so Nagle's algorithm won't be triggered
# and we'd better avoid the cost of concatenation.
chunks = [header, buf]
elif n > 0:
# Issue # 20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
chunks = [header + buf]
else:
# This code path is necessary to avoid "broken pipe" errors
# when sending a 0-length buffer if the other end closed the pipe.
chunks = [header]
for chunk in chunks:
self._send(chunk)
def _recv_bytes(self, maxsize=None):
buf = self._recv(4)
size, = struct.unpack("!i", buf.getvalue())
if maxsize is not None and size > maxsize:
return None
return self._recv(size)
def _poll(self, timeout):
r = wait([self], timeout)
return bool(r)
#
# Public functions
#
class Listener(object):
'''
Returns a listener object.
This is a wrapper for a bound socket which is 'listening' for
connections, or for a Windows named pipe.
'''
def __init__(self, address=None, family=None, backlog=1, authkey=None):
family = family or (address and address_type(address)) \
or default_family
address = address or arbitrary_address(family)
_validate_family(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
self._listener = SocketListener(address, family, backlog)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
self._authkey = authkey
def accept(self):
'''
Accept a connection on the bound socket or named pipe of `self`.
Returns a `Connection` object.
'''
if self._listener is None:
raise OSError('listener is closed')
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
answer_challenge(c, self._authkey)
return c
def close(self):
'''
Close the bound socket or named pipe of `self`.
'''
if self._listener is not None:
self._listener.close()
self._listener = None
address = property(lambda self: self._listener._address)
last_accepted = property(lambda self: self._listener._last_accepted)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def Client(address, family=None, authkey=None):
'''
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
_validate_family(family)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
c = SocketClient(address)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
if authkey is not None:
answer_challenge(c, authkey)
deliver_challenge(c, authkey)
return c
if sys.platform != 'win32':
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
if duplex:
s1, s2 = socket.socketpair()
s1.setblocking(True)
s2.setblocking(True)
c1 = Connection(s1.detach())
c2 = Connection(s2.detach())
else:
fd1, fd2 = os.pipe()
c1 = Connection(fd1, writable=False)
c2 = Connection(fd2, readable=False)
return c1, c2
else:
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
address = arbitrary_address('AF_PIPE')
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE
h1 = _winapi.CreateNamedPipe(
address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
_winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
# default security descriptor: the handle cannot be inherited
_winapi.NULL
)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
_winapi.SetNamedPipeHandleState(
h2, _winapi.PIPE_READMODE_MESSAGE, None, None
)
overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
_, err = overlapped.GetOverlappedResult(True)
assert err == 0
c1 = PipeConnection(h1, writable=duplex)
c2 = PipeConnection(h2, readable=duplex)
return c1, c2
#
# Definitions for connections based on sockets
#
class SocketListener(object):
'''
Representation of a socket which is bound to an address and listening
'''
def __init__(self, address, family, backlog=1):
self._socket = socket.socket(getattr(socket, family))
try:
# SO_REUSEADDR has different semantics on Windows (issue #2550).
if os.name == 'posix':
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self._socket.setblocking(True)
self._socket.bind(address)
self._socket.listen(backlog)
self._address = self._socket.getsockname()
except OSError:
self._socket.close()
raise
self._family = family
self._last_accepted = None
if family == 'AF_UNIX':
self._unlink = util.Finalize(
self, os.unlink, args=(address,), exitpriority=0
)
else:
self._unlink = None
def accept(self):
while True:
try:
s, self._last_accepted = self._socket.accept()
except InterruptedError:
pass
else:
break
s.setblocking(True)
return Connection(s.detach())
def close(self):
self._socket.close()
if self._unlink is not None:
self._unlink()
def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
family = address_type(address)
with socket.socket( getattr(socket, family) ) as s:
s.setblocking(True)
s.connect(address)
return Connection(s.detach())
#
# Definitions for connections based on named pipes
#
if sys.platform == 'win32':
class PipeListener(object):
'''
Representation of a named pipe
'''
def __init__(self, address, backlog=None):
self._address = address
self._handle_queue = [self._new_handle(first=True)]
self._last_accepted = None
util.sub_debug('listener created with address=%r', self._address)
self.close = util.Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
def _new_handle(self, first=False):
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
return _winapi.CreateNamedPipe(
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
_winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
)
def accept(self):
self._handle_queue.append(self._new_handle())
handle = self._handle_queue.pop(0)
try:
ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
except OSError as e:
if e.winerror != _winapi.ERROR_NO_DATA:
raise
# ERROR_NO_DATA can occur if a client has already connected,
# written data and then disconnected -- see Issue 14725.
else:
try:
res = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
except:
ov.cancel()
_winapi.CloseHandle(handle)
raise
finally:
_, err = ov.GetOverlappedResult(True)
assert err == 0
return PipeConnection(handle)
@staticmethod
def _finalize_pipe_listener(queue, address):
util.sub_debug('closing listener with address=%r', address)
for handle in queue:
_winapi.CloseHandle(handle)
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
'''
t = _init_timeout()
while 1:
try:
_winapi.WaitNamedPipe(address, 1000)
h = _winapi.CreateFile(
address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
except OSError as e:
if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
raise
else:
break
else:
raise
_winapi.SetNamedPipeHandleState(
h, _winapi.PIPE_READMODE_MESSAGE, None, None
)
return PipeConnection(h)
#
# Authentication stuff
#
MESSAGE_LENGTH = 20
CHALLENGE = b'#CHALLENGE#'
WELCOME = b'#WELCOME#'
FAILURE = b'#FAILURE#'
def deliver_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
digest = hmac.new(authkey, message, 'md5').digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
else:
connection.send_bytes(FAILURE)
raise AuthenticationError('digest received was wrong')
def answer_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
digest = hmac.new(authkey, message, 'md5').digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
raise AuthenticationError('digest sent was rejected')
#
# Support for using xmlrpclib for serialization
#
class ConnectionWrapper(object):
def __init__(self, conn, dumps, loads):
self._conn = conn
self._dumps = dumps
self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
def recv(self):
s = self._conn.recv_bytes()
return self._loads(s)
def _xml_dumps(obj):
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')
def _xml_loads(s):
(obj,), method = xmlrpclib.loads(s.decode('utf-8'))
return obj
class XmlListener(Listener):
def accept(self):
global xmlrpclib
import xmlrpc.client as xmlrpclib
obj = Listener.accept(self)
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
def XmlClient(*args, **kwds):
global xmlrpclib
import xmlrpc.client as xmlrpclib
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
#
# Wait
#
if sys.platform == 'win32':
def _exhaustive_wait(handles, timeout):
# Return ALL handles which are currently signalled. (Only
# returning the first signalled might create starvation issues.)
L = list(handles)
ready = []
while L:
res = _winapi.WaitForMultipleObjects(L, False, timeout)
if res == WAIT_TIMEOUT:
break
elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
res -= WAIT_OBJECT_0
elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
res -= WAIT_ABANDONED_0
else:
raise RuntimeError('Should not get here')
ready.append(L[res])
L = L[res+1:]
timeout = 0
return ready
_ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
if timeout is None:
timeout = INFINITE
elif timeout < 0:
timeout = 0
else:
timeout = int(timeout * 1000 + 0.5)
object_list = list(object_list)
waithandle_to_obj = {}
ov_list = []
ready_objects = set()
ready_handles = set()
try:
for o in object_list:
try:
fileno = getattr(o, 'fileno')
except AttributeError:
waithandle_to_obj[o.__index__()] = o
else:
# start an overlapped read of length zero
try:
ov, err = _winapi.ReadFile(fileno(), 0, True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err == _winapi.ERROR_IO_PENDING:
ov_list.append(ov)
waithandle_to_obj[ov.event] = o
else:
# If o.fileno() is an overlapped pipe handle and
# err == 0 then there is a zero length message
# in the pipe, but it HAS NOT been consumed.
ready_objects.add(o)
timeout = 0
ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
finally:
# request that overlapped reads stop
for ov in ov_list:
ov.cancel()
# wait for all overlapped reads to stop
for ov in ov_list:
try:
_, err = ov.GetOverlappedResult(True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err != _winapi.ERROR_OPERATION_ABORTED:
o = waithandle_to_obj[ov.event]
ready_objects.add(o)
if err == 0:
# If o.fileno() is an overlapped pipe handle then
# a zero length message HAS been consumed.
if hasattr(o, '_got_empty_message'):
o._got_empty_message = True
ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
return [o for o in object_list if o in ready_objects]
else:
import selectors
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_WaitSelector = selectors.PollSelector
else:
_WaitSelector = selectors.SelectSelector
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
with _WaitSelector() as selector:
for obj in object_list:
selector.register(obj, selectors.EVENT_READ)
if timeout is not None:
deadline = time.time() + timeout
while True:
ready = selector.select(timeout)
if ready:
return [key.fileobj for (key, events) in ready]
else:
if timeout is not None:
timeout = deadline - time.time()
if timeout < 0:
return ready
#
# Make connection and socket objects sharable if possible
#
if sys.platform == 'win32':
def reduce_connection(conn):
handle = conn.fileno()
with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
from . import resource_sharer
ds = resource_sharer.DupSocket(s)
return rebuild_connection, (ds, conn.readable, conn.writable)
def rebuild_connection(ds, readable, writable):
sock = ds.detach()
return Connection(sock.detach(), readable, writable)
reduction.register(Connection, reduce_connection)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = reduction.DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
handle = dh.detach()
return PipeConnection(handle, readable, writable)
reduction.register(PipeConnection, reduce_pipe_connection)
else:
def reduce_connection(conn):
df = reduction.DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable)
def rebuild_connection(df, readable, writable):
fd = df.detach()
return Connection(fd, readable, writable)
reduction.register(Connection, reduce_connection)
|
{
"content_hash": "1f4ff51d256fb48e2074922cf2db4fb1",
"timestamp": "",
"source": "github",
"line_count": 945,
"max_line_length": 84,
"avg_line_length": 32.15873015873016,
"alnum_prop": 0.5473181967752551,
"repo_name": "seishei/multiprocess",
"id": "a07c89d7c2c6433faa2a90588a20bbf04eb18f03",
"size": "30586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3.4/multiprocess/connection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "357144"
},
{
"name": "CSS",
"bytes": "38738"
},
{
"name": "HTML",
"bytes": "1079743"
},
{
"name": "Python",
"bytes": "1946608"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
import numpy
class TestWhileOp(unittest.TestCase):
def test_simple_forward(self):
d0 = layers.data(
"d0", shape=[10], append_batch_size=False, dtype='float32')
d1 = layers.data(
"d1", shape=[10], append_batch_size=False, dtype='float32')
d2 = layers.data(
"d2", shape=[10], append_batch_size=False, dtype='float32')
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i)
layers.array_write(d1, i, array=data_array)
i = layers.increment(i)
layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
array_len.stop_gradient = True
cond = layers.less_than(x=i, y=array_len)
j = layers.fill_constant(shape=[1], dtype='int64', value=1)
j.stop_gradient = True
array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
array_len2.stop_gradient = True
cond2 = layers.less_than(x=j, y=array_len2)
while_op = layers.While(cond=cond)
while_op2 = layers.While(cond=cond2)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True)
layers.array_write(result, i=i, array=mem_array)
layers.less_than(x=i, y=array_len, cond=cond)
with while_op2.block():
d2 = layers.array_read(array=data_array, i=j)
prev2 = layers.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True)
layers.array_write(result2, i=j, array=mem_array)
layers.less_than(x=j, y=array_len2, cond=cond2)
sum_result = layers.array_read(array=mem_array, i=j)
loss = layers.mean(sum_result)
append_backward(loss)
cpu = core.CPUPlace()
exe = Executor(cpu)
d = []
for i in range(3):
d.append(numpy.random.random(size=[10]).astype('float32'))
outs = exe.run(feed={'d0': d[0],
'd1': d[1],
'd2': d[2]},
fetch_list=[sum_result])
self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "464451a671543917405c08d6573f30a0",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 76,
"avg_line_length": 34.52873563218391,
"alnum_prop": 0.5709054593874834,
"repo_name": "baidu/Paddle",
"id": "43fd9d425bffb1e0198f4e845da959570a964990",
"size": "3615",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_while_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "217842"
},
{
"name": "C++",
"bytes": "2771237"
},
{
"name": "CMake",
"bytes": "113670"
},
{
"name": "Cuda",
"bytes": "424141"
},
{
"name": "M4",
"bytes": "40913"
},
{
"name": "Perl",
"bytes": "11412"
},
{
"name": "Python",
"bytes": "892636"
},
{
"name": "Shell",
"bytes": "64351"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.