text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
@author: Daniel Antunes & Cédric Hubert
"""
import vrep
import sys
import numpy as np
import random
#import time
import matplotlib.pyplot as plt
# declaration des constantes
DELAY = 150
NB_ITERATIONS = 1500
NB_ACTIONS_ECHANTILLONAGE = 40
NS = 250
K = 10
# declaration des variables
LE = [] #liste erreur à chaque pas de temps
LEm= [] #liste erreur moyenne à chaque pas de temps
data_MP = [] #de la forme [speedLeft,speedRight,frequence,erreur de prediction]
data_P = [] #de la forme [speedLeft,speedRight,frequence, distance de la balle]
vrep.simxFinish(-1) # fermeture de toutes les connexions ouvertes
clientID = vrep.simxStart('127.0.0.1',19999,True,True,5000,5) #etablissement de la connexion avec V-REP
if clientID == -1:
print("La connexion a échouée")
sys.exit("Connexion échouée")
else:
print("Connexion au seveur remote API établie")
# recuperation des "handlers" dans la scene
returnCode, robotHandle = vrep.simxGetObjectHandle(clientID,"Robot", vrep.simx_opmode_oneshot_wait)
returnCode, leftMotor = vrep.simxGetObjectHandle(clientID,"leftMotor", vrep.simx_opmode_oneshot_wait)
returnCode, rightMotor = vrep.simxGetObjectHandle(clientID,"rightMotor", vrep.simx_opmode_oneshot_wait)
returnCode, balle = vrep.simxGetObjectHandle(clientID,"balle", vrep.simx_opmode_oneshot_wait)
def execute_action(cID, leftHandle, rightHandle, action, botHandle, ballHandle):
vrep.simxSetJointTargetVelocity(cID,leftHandle,action[0],vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(cID,rightHandle,action[1],vrep.simx_opmode_oneshot)
if action[2] > 0.66 and action[2] <= 1:
# ball jumps to robot
returnCode1, pos1 = vrep.simxGetObjectPosition(cID,botHandle,-1,vrep.simx_opmode_oneshot_wait)
returnCode2, pos2 = vrep.simxGetObjectPosition(cID,ballHandle,-1,vrep.simx_opmode_oneshot_wait)
vrep.simxSetObjectPosition(cID,ballHandle,-1,[pos1[0],pos1[1],pos2[2]],vrep.simx_opmode_oneshot)
if action[2] >= 0 and action[2] <= 0.33:
# ball goes to random position
returnCode1, pos1 = vrep.simxGetObjectPosition(cID,botHandle,-1,vrep.simx_opmode_oneshot_wait)
returnCode2, pos2 = vrep.simxGetObjectPosition(cID,ballHandle,-1,vrep.simx_opmode_oneshot_wait)
vrep.simxSetObjectPosition(cID,ballHandle,-1,[random.uniform(-2.3,2.3),random.uniform(-2.3,2.3),pos2[2]],vrep.simx_opmode_oneshot)
def distance(cID):
retCode, dist = vrep.simxGetDistanceHandle(cID,"distance",vrep.simx_opmode_oneshot)
retCode, distance = vrep.simxReadDistance(cID,dist,vrep.simx_opmode_oneshot)
return distance
def bouclePrincipale():
t=0
while t < NB_ITERATIONS:
possibleActions = [] #liste d'actions possibles à ce step, tirées aléatoirement
LPActions= [] #liste des learning progress calculés pour chaque action
'''
Génération liste d'actions possibles
'''
for i in range(NB_ACTIONS_ECHANTILLONAGE):
possibleActions.append( [random.uniform(-1,1) , random.uniform(-1,1) , random.uniform(0,1)] )
'''
Selection de l'action
'''
for i in range(len(possibleActions)):
Ep = MetaPredictionMP(possibleActions[i]) #calcul de la prediction de l'erreur
tempLE = list(LE) #on clone LE
tempLE.append(Ep) #on rajoute à la liste clonée l'erreur prédite
if t == 0:
LP = Ep
else:
if t < DELAY:
Emp = np.mean(tempLE)
LP = -(Emp-LEm[0])
else:
Emp= np.mean(tempLE[-DELAY:])
LP = -(Emp-LEm[t-DELAY])
LPActions.append(LP)
if(random.random() > 0.1): #exploitation
indiceActionChoisie = np.argmax(LPActions)
else: #exploration
indiceActionChoisie = 0
'''
Prediction de la machine P
'''
S = PredictionP(possibleActions[indiceActionChoisie])
'''
Réalisation de l'action dans le simulateur
'''
execute_action(clientID,leftMotor,rightMotor,possibleActions[indiceActionChoisie],robotHandle,balle)
#time.sleep(1) #si on veut que le robot effectue des déplacements plus importants
'''
Vérification résultat action
'''
# calcul de la distance, capteur "parfait"
Sa = distance(clientID)
#sauvegarde dans data_P
ajoutData=list(possibleActions[indiceActionChoisie])
ajoutData.append(Sa)
data_P.append(ajoutData)
#calcul de l'erreur
E = abs(S-Sa)
#sauvegarde dans data_MP
ajoutData=list(possibleActions[indiceActionChoisie])
ajoutData.append(E)
data_MP.append(ajoutData)
#maj listes
LE.append(E)
if len(LE) < DELAY:
Em = 0
else:
Em = np.mean(LE[-DELAY:])
LEm.append(Em)
print(t)
t += 1
#Tracage de la courbe d'erreur moyenne
plt.plot(LEm)
plt.show()
return 0
def MetaPredictionMP(action):
d=[] #on va ranger dans cette liste l'écart entre notre action et chaque exemple de la bdd
res=0 #valeur moyenne des K plus proches voisins, à retourner
if len(data_MP) == 0:
return res
if len(data_MP) < K:
for i in range(len(data_MP)):
res += data_MP[i][3]
res = res / len(data_MP)
else:
for i in range(len(data_MP)):
d1=abs(data_MP[i][0]-action[0])
d2=abs(data_MP[i][1]-action[1])
d3=abs(data_MP[i][2]-action[2])
dtot=d1+d2+d3
d.append([dtot,i])
d.sort() #on trie dans l'ordre croissant des écarts
for i in range(K):
res += data_MP[d[i][1]][3]
res = res / K
return res
def PredictionP(action):
d=[] #on va ranger dans cette liste l'écart entre notre action et chaque exemple de la bdd
res=0 #valeur moyenne des K plus proches voisins, à retourner
if len(data_P) == 0:
return res
if len(data_P) < K:
for i in range(len(data_P)):
res += data_P[i][3]
res = res / len(data_P)
else:
for i in range(len(data_P)):
d1=abs(data_P[i][0]-action[0])
d2=abs(data_P[i][1]-action[1])
d3=abs(data_P[i][2]-action[2])
dtot=d1+d2+d3
d.append([dtot,i])
d.sort() #on trie dans l'ordre croissant des écarts
if K <= len(data_P):
for i in range(K):
res += data_P[d[i][1]][3]
res = res / K
return res
bouclePrincipale()
|
{
"content_hash": "d15e0b3d66937e81ed53729b2b8dd246",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 138,
"avg_line_length": 36.244680851063826,
"alnum_prop": 0.6061050777810391,
"repo_name": "dantunescost/Projet-IAR-intelligent-adaptive-curiosity",
"id": "703685290ce59cab9a744c361348a4e5d4d43801",
"size": "6865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "152297"
}
],
"symlink_target": ""
}
|
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This page lists the data posted by a form.
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table width="100%" border="1" cellspacing="0" bordercolor="#999999">
<tr style="FONT-WEIGHT: bold; COLOR: #dddddd; BACKGROUND-COLOR: #999999">
<td nowrap>Field Name </td>
<td>Value</td>
</tr>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<td valign="top" nowrap><b>%s</b></td>
<td width="100%%" style="white-space:pre">%s</td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
|
{
"content_hash": "ca27f204831200cdd8a192ace62bda13",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 76,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.6301369863013698,
"repo_name": "farra/apachecon-consite",
"id": "a66185f1e3823b1a2eccb13c7dcf098630b670e9",
"size": "2067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "public/javascripts/fckeditor/_samples/py/sampleposteddata.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""Support for HDMI CEC."""
from collections import defaultdict
from functools import partial, reduce
import logging
import multiprocessing
from pycec.cec import CecAdapter
from pycec.commands import CecCommand, KeyPressCommand, KeyReleaseCommand
from pycec.const import (
ADDR_AUDIOSYSTEM,
ADDR_BROADCAST,
ADDR_UNREGISTERED,
KEY_MUTE_OFF,
KEY_MUTE_ON,
KEY_MUTE_TOGGLE,
KEY_VOLUME_DOWN,
KEY_VOLUME_UP,
POWER_OFF,
POWER_ON,
STATUS_PLAY,
STATUS_STILL,
STATUS_STOP,
)
from pycec.network import HDMINetwork, PhysicalAddress
from pycec.tcp import TcpAdapter
import voluptuous as vol
from homeassistant.components.media_player import DOMAIN as MEDIA_PLAYER
from homeassistant.components.switch import DOMAIN as SWITCH
from homeassistant.const import (
CONF_DEVICES,
CONF_HOST,
CONF_PLATFORM,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import discovery, event
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
DOMAIN = "hdmi_cec"
_LOGGER = logging.getLogger(__name__)
DEFAULT_DISPLAY_NAME = "HA"
CONF_TYPES = "types"
ICON_UNKNOWN = "mdi:help"
ICON_AUDIO = "mdi:speaker"
ICON_PLAYER = "mdi:play"
ICON_TUNER = "mdi:radio"
ICON_RECORDER = "mdi:microphone"
ICON_TV = "mdi:television"
ICONS_BY_TYPE = {
0: ICON_TV,
1: ICON_RECORDER,
3: ICON_TUNER,
4: ICON_PLAYER,
5: ICON_AUDIO,
}
CEC_DEVICES = defaultdict(list)
CMD_UP = "up"
CMD_DOWN = "down"
CMD_MUTE = "mute"
CMD_UNMUTE = "unmute"
CMD_MUTE_TOGGLE = "toggle mute"
CMD_PRESS = "press"
CMD_RELEASE = "release"
EVENT_CEC_COMMAND_RECEIVED = "cec_command_received"
EVENT_CEC_KEYPRESS_RECEIVED = "cec_keypress_received"
ATTR_PHYSICAL_ADDRESS = "physical_address"
ATTR_TYPE_ID = "type_id"
ATTR_VENDOR_NAME = "vendor_name"
ATTR_VENDOR_ID = "vendor_id"
ATTR_DEVICE = "device"
ATTR_TYPE = "type"
ATTR_KEY = "key"
ATTR_DUR = "dur"
ATTR_SRC = "src"
ATTR_DST = "dst"
ATTR_CMD = "cmd"
ATTR_ATT = "att"
ATTR_RAW = "raw"
ATTR_DIR = "dir"
ATTR_ABT = "abt"
ATTR_NEW = "new"
ATTR_ON = "on"
ATTR_OFF = "off"
ATTR_TOGGLE = "toggle"
_VOL_HEX = vol.Any(vol.Coerce(int), lambda x: int(x, 16))
SERVICE_SEND_COMMAND = "send_command"
SERVICE_SEND_COMMAND_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_CMD): _VOL_HEX,
vol.Optional(ATTR_SRC): _VOL_HEX,
vol.Optional(ATTR_DST): _VOL_HEX,
vol.Optional(ATTR_ATT): _VOL_HEX,
vol.Optional(ATTR_RAW): vol.Coerce(str),
},
extra=vol.PREVENT_EXTRA,
)
SERVICE_VOLUME = "volume"
SERVICE_VOLUME_SCHEMA = vol.Schema(
{
vol.Optional(CMD_UP): vol.Any(CMD_PRESS, CMD_RELEASE, vol.Coerce(int)),
vol.Optional(CMD_DOWN): vol.Any(CMD_PRESS, CMD_RELEASE, vol.Coerce(int)),
vol.Optional(CMD_MUTE): vol.Any(ATTR_ON, ATTR_OFF, ATTR_TOGGLE),
},
extra=vol.PREVENT_EXTRA,
)
SERVICE_UPDATE_DEVICES = "update"
SERVICE_UPDATE_DEVICES_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({})}, extra=vol.PREVENT_EXTRA
)
SERVICE_SELECT_DEVICE = "select_device"
SERVICE_POWER_ON = "power_on"
SERVICE_STANDBY = "standby"
# pylint: disable=unnecessary-lambda
DEVICE_SCHEMA = vol.Schema(
{
vol.All(cv.positive_int): vol.Any(
lambda devices: DEVICE_SCHEMA(devices), cv.string
)
}
)
CONF_DISPLAY_NAME = "osd_name"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_DEVICES): vol.Any(
DEVICE_SCHEMA, vol.Schema({vol.All(cv.string): vol.Any(cv.string)})
),
vol.Optional(CONF_PLATFORM): vol.Any(SWITCH, MEDIA_PLAYER),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_DISPLAY_NAME): cv.string,
vol.Optional(CONF_TYPES, default={}): vol.Schema(
{cv.entity_id: vol.Any(MEDIA_PLAYER, SWITCH)}
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
WATCHDOG_INTERVAL = 120
EVENT_HDMI_CEC_UNAVAILABLE = "hdmi_cec_unavailable"
def pad_physical_address(addr):
"""Right-pad a physical address."""
return addr + [0] * (4 - len(addr))
def parse_mapping(mapping, parents=None):
"""Parse configuration device mapping."""
if parents is None:
parents = []
for addr, val in mapping.items():
if isinstance(addr, (str,)) and isinstance(val, (str,)):
yield (addr, PhysicalAddress(val))
else:
cur = parents + [addr]
if isinstance(val, dict):
yield from parse_mapping(val, cur)
elif isinstance(val, str):
yield (val, pad_physical_address(cur))
def setup(hass: HomeAssistant, base_config):
"""Set up the CEC capability."""
# Parse configuration into a dict of device name to physical address
# represented as a list of four elements.
device_aliases = {}
devices = base_config[DOMAIN].get(CONF_DEVICES, {})
_LOGGER.debug("Parsing config %s", devices)
device_aliases.update(parse_mapping(devices))
_LOGGER.debug("Parsed devices: %s", device_aliases)
platform = base_config[DOMAIN].get(CONF_PLATFORM, SWITCH)
loop = (
# Create own thread if more than 1 CPU
hass.loop
if multiprocessing.cpu_count() < 2
else None
)
host = base_config[DOMAIN].get(CONF_HOST)
display_name = base_config[DOMAIN].get(CONF_DISPLAY_NAME, DEFAULT_DISPLAY_NAME)
if host:
adapter = TcpAdapter(host, name=display_name, activate_source=False)
else:
adapter = CecAdapter(name=display_name[:12], activate_source=False)
hdmi_network = HDMINetwork(adapter, loop=loop)
def _adapter_watchdog(now=None):
_LOGGER.debug("Reached _adapter_watchdog")
event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog)
if not adapter.initialized:
_LOGGER.info("Adapter not initialized; Trying to restart")
hass.bus.fire(EVENT_HDMI_CEC_UNAVAILABLE)
adapter.init()
hdmi_network.set_initialized_callback(
partial(event.async_call_later, hass, WATCHDOG_INTERVAL, _adapter_watchdog)
)
def _volume(call):
"""Increase/decrease volume and mute/unmute system."""
mute_key_mapping = {
ATTR_TOGGLE: KEY_MUTE_TOGGLE,
ATTR_ON: KEY_MUTE_ON,
ATTR_OFF: KEY_MUTE_OFF,
}
for cmd, att in call.data.items():
if cmd == CMD_UP:
_process_volume(KEY_VOLUME_UP, att)
elif cmd == CMD_DOWN:
_process_volume(KEY_VOLUME_DOWN, att)
elif cmd == CMD_MUTE:
hdmi_network.send_command(
KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)
)
hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))
_LOGGER.info("Audio muted")
else:
_LOGGER.warning("Unknown command %s", cmd)
def _process_volume(cmd, att):
if isinstance(att, (str,)):
att = att.strip()
if att == CMD_PRESS:
hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))
elif att == CMD_RELEASE:
hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))
else:
att = 1 if att == "" else int(att)
for _ in range(0, att):
hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))
hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))
def _tx(call):
"""Send CEC command."""
data = call.data
if ATTR_RAW in data:
command = CecCommand(data[ATTR_RAW])
else:
if ATTR_SRC in data:
src = data[ATTR_SRC]
else:
src = ADDR_UNREGISTERED
if ATTR_DST in data:
dst = data[ATTR_DST]
else:
dst = ADDR_BROADCAST
if ATTR_CMD in data:
cmd = data[ATTR_CMD]
else:
_LOGGER.error("Attribute 'cmd' is missing")
return False
if ATTR_ATT in data:
if isinstance(data[ATTR_ATT], (list,)):
att = data[ATTR_ATT]
else:
att = reduce(lambda x, y: f"{x}:{y:x}", data[ATTR_ATT])
else:
att = ""
command = CecCommand(cmd, dst, src, att)
hdmi_network.send_command(command)
def _standby(call):
hdmi_network.standby()
def _power_on(call):
hdmi_network.power_on()
def _select_device(call):
"""Select the active device."""
addr = call.data[ATTR_DEVICE]
if not addr:
_LOGGER.error("Device not found: %s", call.data[ATTR_DEVICE])
return
if addr in device_aliases:
addr = device_aliases[addr]
else:
entity = hass.states.get(addr)
_LOGGER.debug("Selecting entity %s", entity)
if entity is not None:
addr = entity.attributes["physical_address"]
_LOGGER.debug("Address acquired: %s", addr)
if addr is None:
_LOGGER.error(
"Device %s has not physical address", call.data[ATTR_DEVICE]
)
return
if not isinstance(addr, (PhysicalAddress,)):
addr = PhysicalAddress(addr)
hdmi_network.active_source(addr)
_LOGGER.info("Selected %s (%s)", call.data[ATTR_DEVICE], addr)
def _update(call):
"""
Update if device update is needed.
Called by service, requests CEC network to update data.
"""
hdmi_network.scan()
def _new_device(device):
"""Handle new devices which are detected by HDMI network."""
key = f"{DOMAIN}.{device.name}"
hass.data[key] = device
ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)
discovery.load_platform(
hass,
ent_platform,
DOMAIN,
discovered={ATTR_NEW: [key]},
hass_config=base_config,
)
def _shutdown(call):
hdmi_network.stop()
def _start_cec(callback_event):
"""Register services and start HDMI network to watch for devices."""
hass.services.register(
DOMAIN, SERVICE_SEND_COMMAND, _tx, SERVICE_SEND_COMMAND_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_VOLUME, _volume, schema=SERVICE_VOLUME_SCHEMA
)
hass.services.register(
DOMAIN,
SERVICE_UPDATE_DEVICES,
_update,
schema=SERVICE_UPDATE_DEVICES_SCHEMA,
)
hass.services.register(DOMAIN, SERVICE_POWER_ON, _power_on)
hass.services.register(DOMAIN, SERVICE_STANDBY, _standby)
hass.services.register(DOMAIN, SERVICE_SELECT_DEVICE, _select_device)
hdmi_network.set_new_device_callback(_new_device)
hdmi_network.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_cec)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
return True
class CecEntity(Entity):
"""Representation of a HDMI CEC device entity."""
def __init__(self, device, logical) -> None:
"""Initialize the device."""
self._device = device
self._icon = None
self._state = None
self._logical_address = logical
self.entity_id = "%s.%d" % (DOMAIN, self._logical_address)
def _hdmi_cec_unavailable(self, callback_event):
# Change state to unavailable. Without this, entity would remain in
# its last state, since the state changes are pushed.
self._state = STATE_UNAVAILABLE
self.schedule_update_ha_state(False)
def update(self):
"""Update device status."""
device = self._device
if device.power_status in [POWER_OFF, 3]:
self._state = STATE_OFF
elif device.status == STATUS_PLAY:
self._state = STATE_PLAYING
elif device.status == STATUS_STOP:
self._state = STATE_IDLE
elif device.status == STATUS_STILL:
self._state = STATE_PAUSED
elif device.power_status in [POWER_ON, 4]:
self._state = STATE_ON
else:
_LOGGER.warning("Unknown state: %d", device.power_status)
async def async_added_to_hass(self):
"""Register HDMI callbacks after initialization."""
self._device.set_update_callback(self._update)
self.hass.bus.async_listen(
EVENT_HDMI_CEC_UNAVAILABLE, self._hdmi_cec_unavailable
)
def _update(self, device=None):
"""Device status changed, schedule an update."""
self.schedule_update_ha_state(True)
@property
def should_poll(self):
"""
Return false.
CecEntity.update() is called by the HDMI network when there is new data.
"""
return False
@property
def name(self):
"""Return the name of the device."""
return (
f"{self.vendor_name} {self._device.osd_name}"
if (
self._device.osd_name is not None
and self.vendor_name is not None
and self.vendor_name != "Unknown"
)
else "%s %d" % (self._device.type_name, self._logical_address)
if self._device.osd_name is None
else "%s %d (%s)"
% (self._device.type_name, self._logical_address, self._device.osd_name)
)
@property
def vendor_id(self):
"""Return the ID of the device's vendor."""
return self._device.vendor_id
@property
def vendor_name(self):
"""Return the name of the device's vendor."""
return self._device.vendor
@property
def physical_address(self):
"""Return the physical address of device in HDMI network."""
return str(self._device.physical_address)
@property
def type(self):
"""Return a string representation of the device's type."""
return self._device.type_name
@property
def type_id(self):
"""Return the type ID of device."""
return self._device.type
@property
def icon(self):
"""Return the icon for device by its type."""
return (
self._icon
if self._icon is not None
else ICONS_BY_TYPE.get(self._device.type)
if self._device.type in ICONS_BY_TYPE
else ICON_UNKNOWN
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
state_attr = {}
if self.vendor_id is not None:
state_attr[ATTR_VENDOR_ID] = self.vendor_id
state_attr[ATTR_VENDOR_NAME] = self.vendor_name
if self.type_id is not None:
state_attr[ATTR_TYPE_ID] = self.type_id
state_attr[ATTR_TYPE] = self.type
if self.physical_address is not None:
state_attr[ATTR_PHYSICAL_ADDRESS] = self.physical_address
return state_attr
|
{
"content_hash": "0cb3a0743be8b2cf7a1168bd985d0e85",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 87,
"avg_line_length": 31.470347648261757,
"alnum_prop": 0.5932159334589642,
"repo_name": "adrienbrault/home-assistant",
"id": "c7dfd335c3212f7c92d4e2c0229be5efae4447f5",
"size": "15389",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/hdmi_cec/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
import numpy as np
from nnabla.models.object_detection.draw_utils import DrawBoundingBoxes
from nnabla.utils.image_utils import imresize
def draw_bounding_boxes(img, bboxes, names, colors=None, thresh=0.5):
'''
The transformed cordinates are further used to draw bounding boxes for the detected objects.
Args:
img (numpy.ndarray) : Input image
bboxes (numpy.ndarray):
Transformed bounding box coorinates from the model.
names (list of str): Name of categories in the dataset
colors (list of tuple of 3 ints): Colors for bunding boxes
thresh (float): Threshold of bounding boxes.
'''
if colors is None:
rng = np.random.RandomState(1223)
colors = rng.randint(0, 256, (len(names), 3)).astype(np.uint8)
colors = [tuple(c.tolist()) for c in colors]
im_h, im_w = img.shape[:2]
draw = DrawBoundingBoxes(img, colors)
for bb in bboxes:
x, y, w, h = bb[:4]
dw = w / 2.
dh = h / 2.
x0 = int(np.clip(x - dw, 0, im_w))
y0 = int(np.clip(y - dh, 0, im_h))
x1 = int(np.clip(x + dw, 0, im_w))
y1 = int(np.clip(y + dh, 0, im_h))
det_ind = np.where(bb[5:] > thresh)[0]
if len(det_ind) == 0:
continue
prob = bb[5 + det_ind]
label = ', '.join("{}: {:.2f}%".format(
names[det_ind[j]], prob[j] * 100) for j in range(len(det_ind)))
print("[INFO] {}".format(label))
draw.draw((x0, y0, x1, y1), det_ind[0], label)
return draw.get()
def apply_inverse_letterbox_coordinate_transform(bboxes, im_w, im_h, letterbox_w, letterbox_h):
'''
The predicted bounding box coordinates from the model are not according to original image but the pre-processed image. This function transforms the coorinates
according to original image by applying inverse letterbox co-rdinate trasforms mathematically.
Args:
bboxes:
The bounding box coordinates predicted from the model.
im_w :
Width of original input image.
im_h :
Height of original input image.
'''
bboxes = bboxes.copy()
for bb in bboxes:
x, y, w, h = bb[:4]
x1 = (x - (1 - letterbox_w) / 2.) / letterbox_w * im_w
y1 = (y - (1 - letterbox_h) / 2.) / letterbox_h * im_h
w1 = w * im_w / letterbox_w
h1 = h * im_h / letterbox_h
bb[:4] = x1, y1, w1, h1
return bboxes
def letterbox(img_orig, h, w):
'''
Input image is pre-processed before passing it to the network in YoloV2. This function applies the pre-processing to input image.
Args:
img_orig: Input image
w : Desired width of output image after pre-processing. Should be a multiple of 32.
h : Desired height of output image after pre-processing. Should be a multiple of 32.
'''
assert img_orig.dtype == np.uint8
im_h, im_w, _ = img_orig.shape
if (w * 1.0 / im_w) < (h * 1. / im_h):
new_w = w
new_h = int((im_h * w) / im_w)
else:
new_h = h
new_w = int((im_w * h) / im_h)
patch = imresize(img_orig, (new_w, new_h))
img = np.ones((h, w, 3), np.uint8) * 127
# resize
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
img[y0:y0 + new_h, x0:x0 + new_w] = patch
return img, new_w, new_h
class LetterBoxTransform(object):
'''Create an object holding a new letterboxed image as `image` attribute.
Letterboxing is defined as scaling the input image to fit inside the
desired output image frame (letterbox) while preserving the aspect
ratio of the original image. The pixels that are not filled with the
original image pixels become 127.
The created object also provides a functionality to convert bounding box
coordinates back to the original image frame.
Args:
image (numpy.ndarray): An uint8 3-channel image
height (int): Letterbox height
width (int): Letterbox width
'''
def __init__(self, image, height, width):
self.height, self.width = height, width
self.im_h, self.im_w = image.shape[:2]
self.image, self.new_w, self.new_h = letterbox(image, height, width)
def inverse_coordinate_transform(self, coords):
'''Convert the bounding boxes back to the original image frame.
Args:
coords (numpy.ndarray):
`N` x `M` array where `M >= 4` and first 4 elements
of `M` are `x`, `y` (center coordinates of bounding box),
`w` and `h` (bouding box width and height).
'''
return apply_inverse_letterbox_coordinate_transform(
coords, self.im_w, self.im_h, self.new_w * 1.0 / self.width, self.new_h * 1.0 / self.height)
|
{
"content_hash": "362858629e096c4db119d794d0ee639e",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 162,
"avg_line_length": 36.00751879699248,
"alnum_prop": 0.5959490499060347,
"repo_name": "sony/nnabla",
"id": "b8d894a79521bbee42965d57fcea2d5543fe501a",
"size": "5421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/nnabla/models/object_detection/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
}
|
"""This module contains regression tests for stats API handlers."""
from grr.gui import api_regression_test_lib
from grr.gui.api_plugins import stats as stats_plugin
from grr.gui.api_plugins.report_plugins import report_plugins_test_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.aff4_objects import stats_store as aff4_stats_store
# TODO(user): Implement unit tests in addition to regression tests.
class ApiListStatsStoreMetricsMetadataHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
api_method = "ListStatsStoreMetricsMetadata"
handler = stats_plugin.ApiListStatsStoreMetricsMetadataHandler
def Run(self):
stats_collector = stats.StatsCollector()
stats_collector.RegisterCounterMetric(
"sample_counter", docstring="Sample counter metric.")
stats_collector.RegisterGaugeMetric(
"sample_gauge_value", str, docstring="Sample gauge metric.")
stats_collector.RegisterEventMetric(
"sample_event", docstring="Sample event metric.")
with utils.Stubber(stats, "STATS", stats_collector):
with aff4.FACTORY.Create(
None, aff4_stats_store.StatsStore, mode="w",
token=self.token) as stats_store:
stats_store.WriteStats(process_id="worker_1", sync=True)
self.Check(
"ListStatsStoreMetricsMetadata",
args=stats_plugin.ApiListStatsStoreMetricsMetadataArgs(
component="WORKER"))
class ApiGetStatsStoreMetricHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
api_method = "GetStatsStoreMetric"
handler = stats_plugin.ApiGetStatsStoreMetricHandler
def Run(self):
stats_collector = stats.StatsCollector()
stats_collector.RegisterCounterMetric(
"sample_counter", docstring="Sample counter metric.")
stats_collector.RegisterGaugeMetric(
"sample_gauge_value", float, docstring="Sample gauge metric.")
stats_collector.RegisterEventMetric(
"sample_event", docstring="Sample event metric.")
with utils.Stubber(stats, "STATS", stats_collector):
for i in range(10):
with test_lib.FakeTime(42 + i * 60):
stats_collector.IncrementCounter("sample_counter")
stats_collector.SetGaugeValue("sample_gauge_value", i * 0.5)
stats_collector.RecordEvent("sample_event", 0.42 + 0.5 * i)
with aff4.FACTORY.Create(
None, aff4_stats_store.StatsStore, mode="w",
token=self.token) as stats_store:
stats_store.WriteStats(process_id="worker_1", sync=True)
self.Check(
"GetStatsStoreMetric",
args=stats_plugin.ApiGetStatsStoreMetricArgs(
component="WORKER",
metric_name="sample_counter",
start=42000000,
end=3600000000))
self.Check(
"GetStatsStoreMetric",
args=stats_plugin.ApiGetStatsStoreMetricArgs(
component="WORKER",
metric_name="sample_counter",
start=42000000,
end=3600000000,
rate="1m"))
self.Check(
"GetStatsStoreMetric",
args=stats_plugin.ApiGetStatsStoreMetricArgs(
component="WORKER",
metric_name="sample_gauge_value",
start=42000000,
end=3600000000))
self.Check(
"GetStatsStoreMetric",
args=stats_plugin.ApiGetStatsStoreMetricArgs(
component="WORKER",
metric_name="sample_event",
start=42000000,
end=3600000000))
self.Check(
"GetStatsStoreMetric",
args=stats_plugin.ApiGetStatsStoreMetricArgs(
component="WORKER",
metric_name="sample_event",
start=42000000,
end=3600000000,
distribution_handling_mode="DH_COUNT"))
class ApiListReportsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
api_method = "ListReports"
handler = stats_plugin.ApiListReportsHandler
def Run(self):
with report_plugins_test_mocks.MockedReportPlugins():
self.Check("ListReports")
class ApiGetReportRegressionTest(api_regression_test_lib.ApiRegressionTest):
api_method = "GetReport"
handler = stats_plugin.ApiGetReportHandler
def Run(self):
with report_plugins_test_mocks.MockedReportPlugins():
self.Check(
"GetReport",
args=stats_plugin.ApiGetReportArgs(
name="BarReportPlugin",
start_time=rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")
.AsMicroSecondsFromEpoch(),
duration="4d"))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
{
"content_hash": "c30c11d3c87de1dfee9361f32a1972c6",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 77,
"avg_line_length": 31.058441558441558,
"alnum_prop": 0.6717541292076102,
"repo_name": "pidydx/grr",
"id": "946a0c5ad798a8c2cf4522738cdcbd223789106d",
"size": "4805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/gui/api_plugins/stats_regression_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3409"
},
{
"name": "C",
"bytes": "10658"
},
{
"name": "C++",
"bytes": "304935"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "26531"
},
{
"name": "HTML",
"bytes": "175613"
},
{
"name": "JavaScript",
"bytes": "25418"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Protocol Buffer",
"bytes": "308592"
},
{
"name": "Python",
"bytes": "6428769"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Ruby",
"bytes": "5604"
},
{
"name": "Shell",
"bytes": "40128"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.utils.safestring import mark_safe
from django.utils.text import slugify
# Create your models here.
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
def all(self, *args, **kwargs):
return self.get_queryset().active()
def get_related(self, instance):
products_one = self.get_queryset().filter(categories__in=instance.categories.all())
products_two = self.get_queryset().filter(default=instance.default)
qs = (products_one | products_two).exclude(id=instance.id).distinct()
return qs
class Product(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
price = models.DecimalField(decimal_places=2, max_digits=20)
active = models.BooleanField(default=True)
categories = models.ManyToManyField('Category', blank=True)
default = models.ForeignKey('Category', related_name='default_category', null=True, blank=True)
objects = ProductManager()
class Meta:
ordering = ["-title"]
def __unicode__(self): #def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("product_detail", kwargs={"pk": self.pk})
def get_image_url(self):
img = self.productimage_set.first()
if img:
return img.image.url
return img #None
class Variation(models.Model):
product = models.ForeignKey(Product)
title = models.CharField(max_length=120)
price = models.DecimalField(decimal_places=2, max_digits=20)
sale_price = models.DecimalField(decimal_places=2, max_digits=20, null=True, blank=True)
active = models.BooleanField(default=True)
inventory = models.IntegerField(null=True, blank=True) #refer none == unlimited amount
def __unicode__(self):
return self.title
def get_price(self):
if self.sale_price is not None:
return self.sale_price
else:
return self.price
def get_html_price(self):
if self.sale_price is not None:
html_text = "<span class='sale-price'>%s</span> <span class='og-price'>%s</span>" %(self.sale_price, self.price)
else:
html_text = "<span class='price'>%s</span>" %(self.price)
return mark_safe(html_text)
def get_absolute_url(self):
return self.product.get_absolute_url()
def add_to_cart(self):
return "%s?item=%s&qty=1" %(reverse("cart"), self.id)
def remove_from_cart(self):
return "%s?item=%s&qty=1&delete=True" %(reverse("cart"), self.id)
def get_title(self):
return "%s - %s" %(self.product.title, self.title)
def product_post_saved_receiver(sender, instance, created, *args, **kwargs):
product = instance
variations = product.variation_set.all()
if variations.count() == 0:
new_var = Variation()
new_var.product = product
new_var.title = "Default"
new_var.price = product.price
new_var.save()
post_save.connect(product_post_saved_receiver, sender=Product)
def image_upload_to(instance, filename):
title = instance.product.title
slug = slugify(title)
basename, file_extension = filename.split(".")
new_filename = "%s-%s.%s" %(slug, instance.id, file_extension)
return "products/%s/%s" %(slug, new_filename)
class ProductImage(models.Model):
product = models.ForeignKey(Product)
image = models.ImageField(upload_to=image_upload_to)
def __unicode__(self):
return self.product.title
# Product Category
class Category(models.Model):
title = models.CharField(max_length=120, unique=True)
slug = models.SlugField(unique=True)
description = models.TextField(null=True, blank=True)
active = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse("category_detail", kwargs={"slug": self.slug })
def image_upload_to_featured(instance, filename):
title = instance.product.title
slug = slugify(title)
basename, file_extension = filename.split(".")
new_filename = "%s-%s.%s" %(slug, instance.id, file_extension)
return "products/%s/featured/%s" %(slug, new_filename)
class ProductFeatured(models.Model):
product = models.ForeignKey(Product)
image = models.ImageField(upload_to=image_upload_to_featured)
title = models.CharField(max_length=120, null=True, blank=True)
text = models.CharField(max_length=220, null=True, blank=True)
text_right = models.BooleanField(default=False)
text_css_color = models.CharField(max_length=6, null=True, blank=True)
show_price = models.BooleanField(default=False)
make_image_background = models.BooleanField(default=False)
active = models.BooleanField(default=True)
def __unicode__(self):
return self.product.title
|
{
"content_hash": "00882ce26e41e81fdcd46a9879b8e89f",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 115,
"avg_line_length": 27.75862068965517,
"alnum_prop": 0.7269151138716357,
"repo_name": "codingforentrepreneurs/ecommerce-2",
"id": "f2b1b412743c0356b0ecd30fa2c6a1d8e36c6b7a",
"size": "4830",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/products/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43915"
},
{
"name": "HTML",
"bytes": "29425"
},
{
"name": "JavaScript",
"bytes": "79967"
},
{
"name": "Python",
"bytes": "74241"
}
],
"symlink_target": ""
}
|
"""The auditing system."""
import os
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
class TestAuditSystem(test_lib.FlowTestsBaseclass):
def testFlowExecution(self):
client_mock = action_mocks.ActionMock("ListDirectory", "StatFile")
for _ in test_lib.TestFlowHelper(
"ListDirectory", client_mock, client_id=self.client_id,
pathspec=rdfvalue.PathSpec(
path=os.path.join(self.base_path, "test_img.dd/test directory"),
pathtype=rdfvalue.PathSpec.PathType.OS),
token=self.token):
pass
fd = aff4.FACTORY.Open("aff4:/audit/log", token=self.token)
event = fd[0]
self.assertEqual(event.action, rdfvalue.AuditEvent.Action.RUN_FLOW)
self.assertEqual(event.flow_name, "ListDirectory")
self.assertEqual(event.user, self.token.username)
class FlowTestLoader(test_lib.GRRTestLoader):
base_class = TestAuditSystem
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv, testLoader=FlowTestLoader())
if __name__ == "__main__":
flags.StartMain(main)
|
{
"content_hash": "54acdd573c31aa2f88e0dbe3152aa29a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 26.25,
"alnum_prop": 0.7082251082251082,
"repo_name": "pchaigno/grreat",
"id": "1a71a7d1ee58064f091b126e3ada687723a6ab65",
"size": "1177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/flows/general/audit_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36573"
},
{
"name": "JavaScript",
"bytes": "831111"
},
{
"name": "Makefile",
"bytes": "5482"
},
{
"name": "Perl",
"bytes": "483"
},
{
"name": "Python",
"bytes": "4517593"
},
{
"name": "Shell",
"bytes": "31210"
}
],
"symlink_target": ""
}
|
"""Unit test for KNX/IP TimerNotify objects."""
from xknx.knxip import KNXIPFrame, TimerNotify
class TestKNXIPTimerNotify:
"""Test class for KNX/IP TimerNotify objects."""
def test_timer_notify(self):
"""Test parsing and streaming TimerNotify KNX/IP packet."""
message_authentication_code = bytes.fromhex(
"72 12 a0 3a aa e4 9d a8" "56 89 77 4c 1d 2b 4d a4"
)
raw = (
bytes.fromhex(
"06 10 09 55 00 24" # KNXnet/IP header - length 36 octets
"c0 c1 c2 c3 c4 c5" # timer value
"00 fa 12 34 56 78" # KNX serial number
"af fe" # message tag
)
+ message_authentication_code
)
knxipframe = KNXIPFrame()
knxipframe.from_knx(raw)
assert isinstance(knxipframe.body, TimerNotify)
assert knxipframe.body.timer_value == 211938428830917
assert knxipframe.body.serial_number == bytes.fromhex("00 fa 12 34 56 78")
assert knxipframe.body.message_tag == bytes.fromhex("af fe")
assert (
knxipframe.body.message_authentication_code == message_authentication_code
)
assert knxipframe.to_knx() == raw
timer_notify = TimerNotify(
timer_value=211938428830917,
serial_number=bytes.fromhex("00 fa 12 34 56 78"),
message_tag=bytes.fromhex("af fe"),
message_authentication_code=message_authentication_code,
)
knxipframe2 = KNXIPFrame.init_from_body(timer_notify)
assert knxipframe2.to_knx() == raw
|
{
"content_hash": "0875620774927047a0567b60f96dbb8e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 86,
"avg_line_length": 37.348837209302324,
"alnum_prop": 0.599626400996264,
"repo_name": "XKNX/xknx",
"id": "51c3c6be22e4afa2c3e512c3e899243d076f8f58",
"size": "1606",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/knxip_tests/timer_notify_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1545198"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
}
|
from sqlalchemy.orm import scoped_session, create_session
from flask import current_app
db_session = scoped_session(
lambda: create_session(bind=current_app.db_engine, autocommit=False, autoflush=False))
def db_sync():
from sqlalchemy import DDL, event
# Import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling db_sync()
import mushi.core.db.models
event.listen(
mushi.core.db.models.Issue.__table__,
'after_create',
DDL('ALTER TABLE %(table)s AUTO_INCREMENT = 1;').execute_if(
dialect=('postgresql', 'mysql'))
)
models.Base.metadata.create_all(bind=current_app.db_engine)
|
{
"content_hash": "c3b58729672400afb11346c26a6bd2a3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 90,
"avg_line_length": 30.72,
"alnum_prop": 0.6927083333333334,
"repo_name": "kyouko-taiga/trexmo",
"id": "4ea6221e9c8899fe98aedf123e788f339f8a8965",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trexmo/core/db/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "624"
},
{
"name": "CSS",
"bytes": "489"
},
{
"name": "HTML",
"bytes": "8983"
},
{
"name": "JavaScript",
"bytes": "87874"
},
{
"name": "Python",
"bytes": "74429"
},
{
"name": "Shell",
"bytes": "205"
}
],
"symlink_target": ""
}
|
from fluxgui import xfluxcontroller
class FluxController(xfluxcontroller.XfluxController):
"""
FluxController is the same as XfluxController except that it
requires a Settings instance and updates that instance when
relevant controller calls are made.
"""
def __init__(self, settings):
self.settings = settings
super(FluxController, self).__init__(
**self.settings.xflux_settings_dict())
def start(self):
if self.settings.zipcode == "" and self.settings.latitude == "":
raise ValueError("Cannot start xflux, missing zipcode and latitude")
super(FluxController, self).start()
# Controller methods that don't touch xflux
def set_autostart(self, autos):
self.settings.autostart = autos
# xflux methods that should also update settings
def set_xflux_latitude(self, lat):
self.settings.latitude = lat
super(FluxController, self).set_xflux_latitude(lat)
def set_xflux_longitude(self, longit):
self.settings.longitude = longit
super(FluxController, self).set_xflux_longitude(longit)
def set_xflux_zipcode(self, zipc):
self.settings.zipcode = zipc
super(FluxController, self).set_xflux_zipcode(zipc)
def _set_xflux_color(self, col):
self.settings.color = col
super(FluxController, self)._set_xflux_color(col)
def _get_xflux_color(self):
return super(FluxController, self)._get_xflux_color()
color=property(_get_xflux_color, _set_xflux_color)
|
{
"content_hash": "40beaf7612ad8f139535c3b79e57fc8b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 80,
"avg_line_length": 34.422222222222224,
"alnum_prop": 0.6726920593931569,
"repo_name": "NHellFire/f.lux-indicator-applet",
"id": "90f8f23e1a23dbe2b2827f5667d68ed058d6cf3d",
"size": "1549",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/fluxgui/fluxcontroller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26732"
}
],
"symlink_target": ""
}
|
from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer, mutually_exclusive
class Source(AWSProperty):
props = {
'Password': (basestring, False),
'Revision': (basestring, False),
'SshKey': (basestring, False),
'Type': (basestring, False),
'Url': (basestring, False),
'Username': (basestring, False),
}
class SslConfiguration(AWSProperty):
props = {
'Certificate': (basestring, True),
'Chain': (basestring, False),
'PrivateKey': (basestring, True),
}
class ChefConfiguration(AWSProperty):
props = {
'BerkshelfVersion': (basestring, False),
'ManageBerkshelf': (boolean, False),
}
class Recipes(AWSProperty):
props = {
'Configure': ([basestring], False),
'Deploy': ([basestring], False),
'Setup': ([basestring], False),
'Shutdown': ([basestring], False),
'Undeploy': ([basestring], False),
}
def validate_volume_type(volume_type):
volume_types = ('standard', 'io1', 'gp2')
if volume_type not in volume_types:
raise ValueError("VolumeType (given: %s) must be one of: %s" % (
volume_type, ', '.join(volume_types)))
return volume_type
class VolumeConfiguration(AWSProperty):
props = {
'Encrypted': (boolean, False),
'Iops': (integer, False),
'MountPoint': (basestring, True),
'NumberOfDisks': (integer, True),
'RaidLevel': (integer, False),
'Size': (integer, True),
'VolumeType': (validate_volume_type, False)
}
def validate(self):
volume_type = self.properties.get('VolumeType')
iops = self.properties.get('Iops')
if volume_type == 'io1' and not iops:
raise ValueError("Must specify Iops if VolumeType is 'io1'.")
if volume_type != 'io1' and iops:
raise ValueError("Cannot specify Iops if VolumeType is not 'io1'.")
class StackConfigurationManager(AWSProperty):
props = {
'Name': (basestring, False),
'Version': (basestring, False),
}
class TimeBasedAutoScaling(AWSProperty):
props = {
'Monday': (dict, False),
'Tuesday': (dict, False),
'Wednesday': (dict, False),
'Thursday': (dict, False),
'Friday': (dict, False),
'Saturday': (dict, False),
'Sunday': (dict, False),
}
class AutoScalingThresholds(AWSProperty):
props = {
'CpuThreshold': (float, False),
'IgnoreMetricsTime': (integer, False),
'InstanceCount': (integer, False),
'LoadThreshold': (float, False),
'MemoryThreshold': (float, False),
'ThresholdsWaitTime': (integer, False),
}
class Environment(AWSProperty):
props = {
'Key': (basestring, True),
'Secure': (bool, False),
'Value': (basestring, True),
}
class LoadBasedAutoScaling(AWSProperty):
props = {
'DownScaling': (AutoScalingThresholds, False),
'Enable': (bool, False),
'UpScaling': (AutoScalingThresholds, False),
}
def validate_data_source_type(data_source_type):
data_source_types = (
'AutoSelectOpsworksMysqlInstance',
'OpsworksMysqlInstance',
'RdsDbInstance'
)
if data_source_type not in data_source_types:
raise ValueError("Type (given: %s) must be one of: %s" % (
data_source_type, ', '.join(data_source_types)))
return data_source_type
class DataSource(AWSProperty):
props = {
'Arn': (basestring, False),
'DatabaseName': (basestring, False),
'Type': (validate_data_source_type, False)
}
class App(AWSObject):
resource_type = "AWS::OpsWorks::App"
props = {
'AppSource': (Source, False),
'Attributes': (dict, False),
'DataSources': ([DataSource], False),
'Description': (basestring, False),
'Domains': ([basestring], False),
'EnableSsl': (boolean, False),
'Environment': ([Environment], False),
'Name': (basestring, True),
'Shortname': (basestring, False),
'SslConfiguration': (SslConfiguration, False),
'StackId': (basestring, True),
'Type': (basestring, True),
}
class ElasticLoadBalancerAttachment(AWSObject):
resource_type = "AWS::OpsWorks::ElasticLoadBalancerAttachment"
props = {
'ElasticLoadBalancerName': (basestring, True),
'LayerId': (basestring, True),
'Tags': ((Tags, list), False),
}
class EbsBlockDevice(AWSProperty):
props = {
'DeleteOnTermination': (boolean, False),
'Iops': (integer, False),
'SnapshotId': (basestring, False),
'VolumeSize': (integer, False),
'VolumeType': (basestring, False),
}
class BlockDeviceMapping(AWSProperty):
props = {
'DeviceName': (basestring, False),
'Ebs': (EbsBlockDevice, False),
'NoDevice': (basestring, False),
'VirtualName': (basestring, False),
}
def validate(self):
conds = [
'Ebs',
'VirtualName',
]
mutually_exclusive(self.__class__.__name__, self.properties, conds)
class Instance(AWSObject):
resource_type = "AWS::OpsWorks::Instance"
props = {
'AgentVersion': (basestring, False),
'AmiId': (basestring, False),
'Architecture': (basestring, False),
'AutoScalingType': (basestring, False),
'AvailabilityZone': (basestring, False),
'BlockDeviceMappings': ([BlockDeviceMapping], False),
'EbsOptimized': (boolean, False),
'ElasticIps': ([basestring], False),
'Hostname': (basestring, False),
'InstallUpdatesOnBoot': (boolean, False),
'InstanceType': (basestring, True),
'LayerIds': ([basestring], True),
'Os': (basestring, False),
'RootDeviceType': (basestring, False),
'SshKeyName': (basestring, False),
'StackId': (basestring, True),
'SubnetId': (basestring, False),
'Tenancy': (basestring, False),
'TimeBasedAutoScaling': (TimeBasedAutoScaling, False),
'VirtualizationType': (basestring, False),
'Volumes': ([basestring], False),
}
class ShutdownEventConfiguration(AWSProperty):
props = {
'DelayUntilElbConnectionsDrained': (boolean, False),
'ExecutionTimeout': (integer, False),
}
class LifeCycleConfiguration(AWSProperty):
props = {
'ShutdownEventConfiguration': (ShutdownEventConfiguration, False),
}
class Layer(AWSObject):
resource_type = "AWS::OpsWorks::Layer"
props = {
'Attributes': (dict, False),
'AutoAssignElasticIps': (boolean, True),
'AutoAssignPublicIps': (boolean, True),
'CustomInstanceProfileArn': (basestring, False),
'CustomJson': ((basestring, dict), False),
'CustomRecipes': (Recipes, False),
'CustomSecurityGroupIds': ([basestring], False),
'EnableAutoHealing': (boolean, True),
'InstallUpdatesOnBoot': (boolean, False),
'LifecycleEventConfiguration': (LifeCycleConfiguration, False),
'LoadBasedAutoScaling': (LoadBasedAutoScaling, False),
'Name': (basestring, True),
'Packages': ([basestring], False),
'Shortname': (basestring, True),
'StackId': (basestring, True),
'Type': (basestring, True),
'VolumeConfigurations': ([VolumeConfiguration], False),
}
class RdsDbInstance(AWSProperty):
props = {
'DbPassword': (basestring, True),
'DbUser': (basestring, True),
'RdsDbInstanceArn': (basestring, True)
}
class ElasticIp(AWSProperty):
props = {
'Ip': (basestring, True),
'Name': (basestring, False),
}
class Stack(AWSObject):
resource_type = "AWS::OpsWorks::Stack"
props = {
'AgentVersion': (basestring, False),
'Attributes': (dict, False),
'ChefConfiguration': (ChefConfiguration, False),
'CloneAppIds': ([basestring], False),
'ClonePermissions': (boolean, False),
'ConfigurationManager': (StackConfigurationManager, False),
'CustomCookbooksSource': (Source, False),
'CustomJson': ((basestring, dict), False),
'DefaultAvailabilityZone': (basestring, False),
'DefaultInstanceProfileArn': (basestring, True),
'DefaultOs': (basestring, False),
'DefaultRootDeviceType': (basestring, False),
'DefaultSshKeyName': (basestring, False),
'DefaultSubnetId': (basestring, False),
'EcsClusterArn': (basestring, False),
'ElasticIps': ([ElasticIp], False),
'HostnameTheme': (basestring, False),
'Name': (basestring, True),
'RdsDbInstances': ([RdsDbInstance], False),
'ServiceRoleArn': (basestring, True),
'SourceStackId': (basestring, False),
'Tags': ((Tags, list), False),
'UseCustomCookbooks': (boolean, False),
'UseOpsworksSecurityGroups': (boolean, False),
'VpcId': (basestring, False),
}
def validate(self):
if 'VpcId' in self.properties and \
'DefaultSubnetId' not in self.properties:
raise ValueError('Using VpcId requires DefaultSubnetId to be'
'specified')
return True
class UserProfile(AWSObject):
resource_type = "AWS::OpsWorks::UserProfile"
props = {
'AllowSelfManagement': (boolean, False),
'IamUserArn': (basestring, True),
'SshPublicKey': (basestring, False),
'SshUsername': (basestring, False),
}
class Volume(AWSObject):
resource_type = "AWS::OpsWorks::Volume"
props = {
'Ec2VolumeId': (basestring, True),
'MountPoint': (basestring, False),
'Name': (basestring, False),
'StackId': (basestring, True),
}
class EngineAttribute(AWSProperty):
props = {
'Name': (basestring, False),
'Value': (basestring, False),
}
class Server(AWSObject):
resource_type = "AWS::OpsWorksCM::Server"
props = {
'AssociatePublicIpAddress': (boolean, False),
'BackupId': (basestring, False),
'BackupRetentionCount': (integer, False),
'DisableAutomatedBackup': (boolean, False),
'Engine': (basestring, False),
'EngineAttributes': ([EngineAttribute], False),
'EngineModel': (basestring, False),
'EngineVersion': (basestring, False),
'InstanceProfileArn': (basestring, True),
'InstanceType': (basestring, True),
'KeyPair': (basestring, False),
'PreferredBackupWindow': (basestring, False),
'PreferredMaintenanceWindow': (basestring, False),
'SecurityGroupIds': ([basestring], False),
'ServerName': (basestring, False),
'ServiceRoleArn': (basestring, True),
'SubnetIds': ([basestring], False),
}
|
{
"content_hash": "03c5cb594d2d18382f5faf7c35da988b",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 79,
"avg_line_length": 30.431754874651812,
"alnum_prop": 0.5956064073226545,
"repo_name": "johnctitus/troposphere",
"id": "cea522ffbceefdb2b96d25498ed1aad501786a3e",
"size": "11043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "troposphere/opsworks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1427"
},
{
"name": "Python",
"bytes": "663167"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
import sys
import io
NAME = 'ipwhois'
VERSION = '0.10.3'
AUTHOR = "Philip Hane"
AUTHOR_EMAIL = "secynic AT gmail DOT com"
DESCRIPTION = "Retrieve and parse whois data for IPv4 and IPv6 addresses."
KEYWORDS = [
"Python",
"WHOIS",
"RWhois",
"ASN",
"IP Address",
"IP",
"IPv4",
"IPv6",
"IETF",
"REST",
"Arin",
"Ripe",
"Apnic",
"Lacnic",
"Afrinic",
"NIC"
]
LONG_DESCRIPTION = '\n\n'.join([io.open('README.rst', 'r',
encoding='utf-8').read(),
io.open('CHANGES.rst', 'r',
encoding='utf-8').read()])
LICENSE = io.open('LICENSE.txt', 'r', encoding='utf-8').read()
URL = "https://github.com/secynic/ipwhois"
DOWNLOAD_URL = "https://github.com/secynic/ipwhois/tarball/master"
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet",
"Topic :: Software Development",
]
PACKAGES = ['ipwhois']
PACKAGE_DATA = {'ipwhois': ['data/*.xml', 'data/*.csv']}
INSTALL_REQUIRES = []
if sys.version_info >= (3,):
INSTALL_REQUIRES.append("dnspython3")
else:
INSTALL_REQUIRES.append("dnspython")
if sys.version_info < (3, 3,):
INSTALL_REQUIRES.append("ipaddr")
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
keywords=KEYWORDS,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
packages=PACKAGES,
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES
)
|
{
"content_hash": "cde086bce627bc3835bfa9d69167b496",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 74,
"avg_line_length": 26.05952380952381,
"alnum_prop": 0.6034719049794427,
"repo_name": "athrun/ipwhois",
"id": "15dc8d21fa35bb04a4514e63f1a3fe4e59e5fbf8",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6470"
},
{
"name": "Makefile",
"bytes": "6786"
},
{
"name": "Python",
"bytes": "95878"
}
],
"symlink_target": ""
}
|
class TrunkPayload(object):
"""Payload for trunk-related callback registry notifications."""
def __init__(self, context, trunk_id, current_trunk=None,
original_trunk=None, subports=None):
self.context = context
self.trunk_id = trunk_id
self.current_trunk = current_trunk
self.original_trunk = original_trunk
self.subports = subports if subports else []
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
|
{
"content_hash": "d126e451e030c4d6c4cd845bf38ade30",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 36.470588235294116,
"alnum_prop": 0.5967741935483871,
"repo_name": "eayunstack/neutron",
"id": "4e42a438da4a35961bd25fcde52330b161b00a04",
"size": "1232",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/services/trunk/callbacks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
}
|
"""This module contains stuff related to the datastore."""
from google.appengine.ext import db
class ZoneRun(db.Model):
"""A zone run is when you water a zone for some amount of time."""
zone = db.IntegerProperty()
runtime_seconds = db.IntegerProperty()
created_at = db.DateTimeProperty(auto_now_add=True)
def get_recent_zone_runs(num_zone_runs_to_show=100):
return list(ZoneRun.gql(
"ORDER BY created_at DESC LIMIT %s" % num_zone_runs_to_show))
|
{
"content_hash": "8c47badf6eede1bbead5cb9e724cf3b3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 30.933333333333334,
"alnum_prop": 0.7219827586206896,
"repo_name": "google/irrduino",
"id": "7e965ad451ce65cdc3d51e6f07725f3da7c6b510",
"size": "1038",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "IrrduinoServer/irrduinoserver/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "949"
},
{
"name": "Dart",
"bytes": "5728"
},
{
"name": "HTML",
"bytes": "6545"
},
{
"name": "Java",
"bytes": "26608"
},
{
"name": "JavaScript",
"bytes": "8285"
},
{
"name": "Processing",
"bytes": "32358"
},
{
"name": "Python",
"bytes": "22462"
}
],
"symlink_target": ""
}
|
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
import ntpath
import posixpath
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
__metaclass__ = type
if (3, 0) < sys.version_info < (3, 4):
raise RuntimeError("Python 3.4 or later is required")
if six.PY2:
# Those builtin exceptions are only defined in Python 3
PermissionError = None
NotADirectoryError = None
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Warnings
'PkgResourcesDeprecationWarning',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet:
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
path = self._get_metadata_path(name)
value = self._get(path)
if six.PY2:
return value
try:
return value.decode('utf-8')
except UnicodeDecodeError as exc:
# Include the path in the error message to simplify
# troubleshooting, and without changing the exception type.
exc.reason += ' in {} file at path: {}'.format(name, path)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
@staticmethod
def _validate_resource_path(path):
"""
Validate the resource paths according to the docs.
https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
>>> warned = getfixture('recwarn')
>>> warnings.simplefilter('always')
>>> vrp = NullProvider._validate_resource_path
>>> vrp('foo/bar.txt')
>>> bool(warned)
False
>>> vrp('../foo/bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('/foo/bar.txt')
>>> bool(warned)
True
>>> vrp('foo/../../bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('foo/f../bar.txt')
>>> bool(warned)
False
Windows path separators are straight-up disallowed.
>>> vrp(r'\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
>>> vrp(r'C:\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
Blank values are allowed
>>> vrp('')
>>> bool(warned)
False
Non-string values are not.
>>> vrp(None)
Traceback (most recent call last):
...
AttributeError: ...
"""
invalid = (
os.path.pardir in path.split(posixpath.sep) or
posixpath.isabs(path) or
ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
# Aggressively disallow Windows absolute paths
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
# for compatibility, warn; in future
# raise ValueError(msg)
warnings.warn(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
stacklevel=4,
)
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.7 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""
Return a dist_factory for a path_item and entry
"""
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root, entry, metadata, precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return path.lower().endswith('.egg')
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = (
"Missing 'Version:' header and/or {} file at path: {}"
).format(self.PKG_INFO, path)
raise ValueError(msg, self)
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker)
or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(
attr for attr in self._provider.__dir__()
if not attr.startswith('_')
)
)
if not hasattr(object, '__dir__'):
# python 2.7 not supported
del __dir__
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(
dist.activate(replace=False)
for dist in working_set
)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
|
{
"content_hash": "60229b081d4a469410478b2b8bcc2751",
"timestamp": "",
"source": "github",
"line_count": 3294,
"max_line_length": 92,
"avg_line_length": 32.853369763205826,
"alnum_prop": 0.6016226355815523,
"repo_name": "randyzingle/tools",
"id": "363a6309e556b72f81b1945d6c31406aaa244bc2",
"size": "108237",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kub/services/archive/cdk/python/sample-app/.env/lib/python3.6/site-packages/pip/_vendor/pkg_resources/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "773"
},
{
"name": "Go",
"bytes": "118058"
},
{
"name": "Groovy",
"bytes": "1555"
},
{
"name": "HTML",
"bytes": "4288"
},
{
"name": "Java",
"bytes": "818301"
},
{
"name": "JavaScript",
"bytes": "605048"
},
{
"name": "Makefile",
"bytes": "5063"
},
{
"name": "Python",
"bytes": "10438760"
},
{
"name": "Scala",
"bytes": "37406"
},
{
"name": "Shell",
"bytes": "8611"
}
],
"symlink_target": ""
}
|
from oslo_serialization import jsonutils
import six
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import api_models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.virt import hardware
@base.NovaObjectRegistry.register
class RequestSpec(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: ImageMeta version 1.6
# Version 1.2: SchedulerRetries version 1.1
# Version 1.3: InstanceGroup version 1.10
# Version 1.4: ImageMeta version 1.7
# Version 1.5: Added get_by_instance_uuid(), create(), save()
VERSION = '1.5'
fields = {
'id': fields.IntegerField(),
'image': fields.ObjectField('ImageMeta', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'project_id': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'flavor': fields.ObjectField('Flavor', nullable=False),
'num_instances': fields.IntegerField(default=1),
'ignore_hosts': fields.ListOfStringsField(nullable=True),
'force_hosts': fields.ListOfStringsField(nullable=True),
'force_nodes': fields.ListOfStringsField(nullable=True),
'retry': fields.ObjectField('SchedulerRetries', nullable=True),
'limits': fields.ObjectField('SchedulerLimits', nullable=True),
'instance_group': fields.ObjectField('InstanceGroup', nullable=True),
# NOTE(sbauza): Since hints are depending on running filters, we prefer
# to leave the API correctly validating the hints per the filters and
# just provide to the RequestSpec object a free-form dictionary
'scheduler_hints': fields.DictOfListOfStringsField(nullable=True),
'instance_uuid': fields.UUIDField(),
}
@property
def vcpus(self):
return self.flavor.vcpus
@property
def memory_mb(self):
return self.flavor.memory_mb
@property
def root_gb(self):
return self.flavor.root_gb
@property
def ephemeral_gb(self):
return self.flavor.ephemeral_gb
@property
def swap(self):
return self.flavor.swap
def _image_meta_from_image(self, image):
if isinstance(image, objects.ImageMeta):
self.image = image
elif isinstance(image, dict):
# NOTE(sbauza): Until Nova is fully providing an ImageMeta object
# for getting properties, we still need to hydrate it here
# TODO(sbauza): To be removed once all RequestSpec hydrations are
# done on the conductor side and if the image is an ImageMeta
self.image = objects.ImageMeta.from_dict(image)
else:
self.image = None
def _from_instance(self, instance):
if isinstance(instance, obj_instance.Instance):
# NOTE(sbauza): Instance should normally be a NovaObject...
getter = getattr
elif isinstance(instance, dict):
# NOTE(sbauza): ... but there are some cases where request_spec
# has an instance key as a dictionary, just because
# select_destinations() is getting a request_spec dict made by
# sched_utils.build_request_spec()
# TODO(sbauza): To be removed once all RequestSpec hydrations are
# done on the conductor side
getter = lambda x, y: x.get(y)
else:
# If the instance is None, there is no reason to set the fields
return
instance_fields = ['numa_topology', 'pci_requests', 'uuid',
'project_id', 'availability_zone']
for field in instance_fields:
if field == 'uuid':
setattr(self, 'instance_uuid', getter(instance, field))
elif field == 'pci_requests':
self._from_instance_pci_requests(getter(instance, field))
elif field == 'numa_topology':
self._from_instance_numa_topology(getter(instance, field))
else:
setattr(self, field, getter(instance, field))
def _from_instance_pci_requests(self, pci_requests):
if isinstance(pci_requests, dict):
pci_req_cls = objects.InstancePCIRequests
self.pci_requests = pci_req_cls.from_request_spec_instance_props(
pci_requests)
else:
self.pci_requests = pci_requests
def _from_instance_numa_topology(self, numa_topology):
if isinstance(numa_topology, dict):
self.numa_topology = hardware.instance_topology_from_instance(
dict(numa_topology=numa_topology))
else:
self.numa_topology = numa_topology
def _from_flavor(self, flavor):
if isinstance(flavor, objects.Flavor):
self.flavor = flavor
elif isinstance(flavor, dict):
# NOTE(sbauza): Again, request_spec is primitived by
# sched_utils.build_request_spec() and passed to
# select_destinations() like this
# TODO(sbauza): To be removed once all RequestSpec hydrations are
# done on the conductor side
self.flavor = objects.Flavor(**flavor)
def _from_retry(self, retry_dict):
self.retry = (SchedulerRetries.from_dict(self._context, retry_dict)
if retry_dict else None)
def _populate_group_info(self, filter_properties):
if filter_properties.get('instance_group'):
# New-style group information as a NovaObject, we can directly set
# the field
self.instance_group = filter_properties.get('instance_group')
elif filter_properties.get('group_updated') is True:
# Old-style group information having ugly dict keys containing sets
# NOTE(sbauza): Can be dropped once select_destinations is removed
policies = list(filter_properties.get('group_policies'))
hosts = list(filter_properties.get('group_hosts'))
members = list(filter_properties.get('group_members'))
self.instance_group = objects.InstanceGroup(policies=policies,
hosts=hosts,
members=members)
# hosts has to be not part of the updates for saving the object
self.instance_group.obj_reset_changes(['hosts'])
else:
# Set the value anyway to avoid any call to obj_attr_is_set for it
self.instance_group = None
def _from_limits(self, limits_dict):
self.limits = SchedulerLimits.from_dict(limits_dict)
def _from_hints(self, hints_dict):
if hints_dict is None:
self.scheduler_hints = None
return
self.scheduler_hints = {
hint: value if isinstance(value, list) else [value]
for hint, value in six.iteritems(hints_dict)}
@classmethod
def from_primitives(cls, context, request_spec, filter_properties):
"""Returns a new RequestSpec object by hydrating it from legacy dicts.
Deprecated. A RequestSpec object is created early in the boot process
using the from_components method. That object will either be passed to
places that require it, or it can be looked up with
get_by_instance_uuid. This method can be removed when there are no
longer any callers. Because the method is not remotable it is not tied
to object versioning.
That helper is not intended to leave the legacy dicts kept in the nova
codebase, but is rather just for giving a temporary solution for
populating the Spec object until we get rid of scheduler_utils'
build_request_spec() and the filter_properties hydratation in the
conductor.
:param context: a context object
:param request_spec: An old-style request_spec dictionary
:param filter_properties: An old-style filter_properties dictionary
"""
num_instances = request_spec.get('num_instances', 1)
spec = cls(context, num_instances=num_instances)
# Hydrate from request_spec first
image = request_spec.get('image')
spec._image_meta_from_image(image)
instance = request_spec.get('instance_properties')
spec._from_instance(instance)
flavor = request_spec.get('instance_type')
spec._from_flavor(flavor)
# Hydrate now from filter_properties
spec.ignore_hosts = filter_properties.get('ignore_hosts')
spec.force_hosts = filter_properties.get('force_hosts')
spec.force_nodes = filter_properties.get('force_nodes')
retry = filter_properties.get('retry', {})
spec._from_retry(retry)
limits = filter_properties.get('limits', {})
spec._from_limits(limits)
spec._populate_group_info(filter_properties)
scheduler_hints = filter_properties.get('scheduler_hints', {})
spec._from_hints(scheduler_hints)
return spec
def get_scheduler_hint(self, hint_name, default=None):
"""Convenient helper for accessing a particular scheduler hint since
it is hydrated by putting a single item into a list.
In order to reduce the complexity, that helper returns a string if the
requested hint is a list of only one value, and if not, returns the
value directly (ie. the list). If the hint is not existing (or
scheduler_hints is None), then it returns the default value.
:param hint_name: name of the hint
:param default: the default value if the hint is not there
"""
if (not self.obj_attr_is_set('scheduler_hints')
or self.scheduler_hints is None):
return default
hint_val = self.scheduler_hints.get(hint_name, default)
return (hint_val[0] if isinstance(hint_val, list)
and len(hint_val) == 1 else hint_val)
def _to_legacy_image(self):
return base.obj_to_primitive(self.image) if (
self.obj_attr_is_set('image') and self.image) else {}
def _to_legacy_instance(self):
# NOTE(sbauza): Since the RequestSpec only persists a few Instance
# fields, we can only return a dict.
instance = {}
instance_fields = ['numa_topology', 'pci_requests',
'project_id', 'availability_zone', 'instance_uuid']
for field in instance_fields:
if not self.obj_attr_is_set(field):
continue
if field == 'instance_uuid':
instance['uuid'] = getattr(self, field)
else:
instance[field] = getattr(self, field)
flavor_fields = ['root_gb', 'ephemeral_gb', 'memory_mb', 'vcpus']
if not self.obj_attr_is_set('flavor'):
return instance
for field in flavor_fields:
instance[field] = getattr(self.flavor, field)
return instance
def _to_legacy_group_info(self):
# NOTE(sbauza): Since this is only needed until the AffinityFilters are
# modified by using directly the RequestSpec object, we need to keep
# the existing dictionary as a primitive.
return {'group_updated': True,
'group_hosts': set(self.instance_group.hosts),
'group_policies': set(self.instance_group.policies)}
def to_legacy_request_spec_dict(self):
"""Returns a legacy request_spec dict from the RequestSpec object.
Since we need to manage backwards compatibility and rolling upgrades
within our RPC API, we need to accept to provide an helper for
primitiving the right RequestSpec object into a legacy dict until we
drop support for old Scheduler RPC API versions.
If you don't understand why this method is needed, please don't use it.
"""
req_spec = {}
if not self.obj_attr_is_set('num_instances'):
req_spec['num_instances'] = self.fields['num_instances'].default
else:
req_spec['num_instances'] = self.num_instances
req_spec['image'] = self._to_legacy_image()
req_spec['instance_properties'] = self._to_legacy_instance()
if self.obj_attr_is_set('flavor'):
req_spec['instance_type'] = self.flavor
else:
req_spec['instance_type'] = {}
return req_spec
def to_legacy_filter_properties_dict(self):
"""Returns a legacy filter_properties dict from the RequestSpec object.
Since we need to manage backwards compatibility and rolling upgrades
within our RPC API, we need to accept to provide an helper for
primitiving the right RequestSpec object into a legacy dict until we
drop support for old Scheduler RPC API versions.
If you don't understand why this method is needed, please don't use it.
"""
filt_props = {}
if self.obj_attr_is_set('ignore_hosts') and self.ignore_hosts:
filt_props['ignore_hosts'] = self.ignore_hosts
if self.obj_attr_is_set('force_hosts') and self.force_hosts:
filt_props['force_hosts'] = self.force_hosts
if self.obj_attr_is_set('force_nodes') and self.force_nodes:
filt_props['force_nodes'] = self.force_nodes
if self.obj_attr_is_set('retry') and self.retry:
filt_props['retry'] = self.retry.to_dict()
if self.obj_attr_is_set('limits') and self.limits:
filt_props['limits'] = self.limits.to_dict()
if self.obj_attr_is_set('instance_group') and self.instance_group:
filt_props.update(self._to_legacy_group_info())
if self.obj_attr_is_set('scheduler_hints') and self.scheduler_hints:
# NOTE(sbauza): We need to backport all the hints correctly since
# we had to hydrate the field by putting a single item into a list.
filt_props['scheduler_hints'] = {hint: self.get_scheduler_hint(
hint) for hint in self.scheduler_hints}
return filt_props
@classmethod
def from_components(cls, context, instance_uuid, image, flavor,
numa_topology, pci_requests, filter_properties, instance_group,
availability_zone):
"""Returns a new RequestSpec object hydrated by various components.
This helper is useful in creating the RequestSpec from the various
objects that are assembled early in the boot process. This method
creates a complete RequestSpec object with all properties set or
intentionally left blank.
:param context: a context object
:param instance_uuid: the uuid of the instance to schedule
:param image: a dict of properties for an image or volume
:param flavor: a flavor NovaObject
:param numa_topology: InstanceNUMATopology or None
:param pci_requests: InstancePCIRequests
:param filter_properties: a dict of properties for scheduling
:param instance_group: None or an instance group NovaObject
:param availability_zone: an availability_zone string
"""
spec_obj = cls(context)
spec_obj.num_instances = 1
spec_obj.instance_uuid = instance_uuid
spec_obj.instance_group = instance_group
spec_obj.project_id = context.project_id
spec_obj._image_meta_from_image(image)
spec_obj._from_flavor(flavor)
spec_obj._from_instance_pci_requests(pci_requests)
spec_obj._from_instance_numa_topology(numa_topology)
spec_obj.ignore_hosts = filter_properties.get('ignore_hosts')
spec_obj.force_hosts = filter_properties.get('force_hosts')
spec_obj.force_nodes = filter_properties.get('force_nodes')
spec_obj._from_retry(filter_properties.get('retry', {}))
spec_obj._from_limits(filter_properties.get('limits', {}))
spec_obj._from_hints(filter_properties.get('scheduler_hints', {}))
spec_obj.availability_zone = availability_zone
return spec_obj
@staticmethod
def _from_db_object(context, spec, db_spec):
spec_obj = spec.obj_from_primitive(jsonutils.loads(db_spec['spec']))
for key in spec.fields:
# Load these from the db model not the serialized object within,
# though they should match.
if key in ['id', 'instance_uuid']:
setattr(spec, key, db_spec[key])
else:
setattr(spec, key, getattr(spec_obj, key))
spec._context = context
spec.obj_reset_changes()
return spec
@staticmethod
@db.api_context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid):
db_spec = context.session.query(api_models.RequestSpec).filter_by(
instance_uuid=instance_uuid).first()
if not db_spec:
raise exception.RequestSpecNotFound(
instance_uuid=instance_uuid)
return db_spec
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_spec = cls._get_by_instance_uuid_from_db(context, instance_uuid)
return cls._from_db_object(context, cls(), db_spec)
@staticmethod
@db.api_context_manager.writer
def _create_in_db(context, updates):
db_spec = api_models.RequestSpec()
db_spec.update(updates)
db_spec.save(context.session)
return db_spec
def _get_update_primitives(self):
"""Serialize object to match the db model.
We store copies of embedded objects rather than
references to these objects because we want a snapshot of the request
at this point. If the references changed or were deleted we would
not be able to reschedule this instance under the same conditions as
it was originally scheduled with.
"""
updates = self.obj_get_changes()
# NOTE(alaski): The db schema is the full serialized object in a
# 'spec' column. If anything has changed we rewrite the full thing.
if updates:
db_updates = {'spec': jsonutils.dumps(self.obj_to_primitive())}
if 'instance_uuid' in updates:
db_updates['instance_uuid'] = updates['instance_uuid']
return db_updates
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self._get_update_primitives()
db_spec = self._create_in_db(self._context, updates)
self._from_db_object(self._context, self, db_spec)
@staticmethod
@db.api_context_manager.writer
def _save_in_db(context, instance_uuid, updates):
# FIXME(sbauza): Provide a classmethod when oslo.db bug #1520195 is
# fixed and released
db_spec = RequestSpec._get_by_instance_uuid_from_db(context,
instance_uuid)
db_spec.update(updates)
db_spec.save(context.session)
return db_spec
@base.remotable
def save(self):
updates = self._get_update_primitives()
db_spec = self._save_in_db(self._context, self.instance_uuid, updates)
self._from_db_object(self._context, self, db_spec)
self.obj_reset_changes()
@base.NovaObjectRegistry.register
class SchedulerRetries(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: ComputeNodeList version 1.14
VERSION = '1.1'
fields = {
'num_attempts': fields.IntegerField(),
# NOTE(sbauza): Even if we are only using host/node strings, we need to
# know which compute nodes were tried
'hosts': fields.ObjectField('ComputeNodeList'),
}
@classmethod
def from_dict(cls, context, retry_dict):
# NOTE(sbauza): We are not persisting the user context since it's only
# needed for hydrating the Retry object
retry_obj = cls()
if not ('num_attempts' and 'hosts') in retry_dict:
# NOTE(sbauza): We prefer to return an empty object if the
# primitive is not good enough
return retry_obj
retry_obj.num_attempts = retry_dict.get('num_attempts')
# NOTE(sbauza): each retry_dict['hosts'] item is a list of [host, node]
computes = [objects.ComputeNode(context=context, host=host,
hypervisor_hostname=node)
for host, node in retry_dict.get('hosts')]
retry_obj.hosts = objects.ComputeNodeList(objects=computes)
return retry_obj
def to_dict(self):
legacy_hosts = [[cn.host, cn.hypervisor_hostname] for cn in self.hosts]
return {'num_attempts': self.num_attempts,
'hosts': legacy_hosts}
@base.NovaObjectRegistry.register
class SchedulerLimits(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'numa_topology': fields.ObjectField('NUMATopologyLimits',
nullable=True,
default=None),
'vcpu': fields.IntegerField(nullable=True, default=None),
'disk_gb': fields.IntegerField(nullable=True, default=None),
'memory_mb': fields.IntegerField(nullable=True, default=None),
}
@classmethod
def from_dict(cls, limits_dict):
limits = cls(**limits_dict)
# NOTE(sbauza): Since the limits can be set for each field or not, we
# prefer to have the fields nullable, but default the value to None.
# Here we accept that the object is always generated from a primitive
# hence the use of obj_set_defaults exceptionally.
limits.obj_set_defaults()
return limits
def to_dict(self):
limits = {}
for field in self.fields:
if getattr(self, field) is not None:
limits[field] = getattr(self, field)
return limits
|
{
"content_hash": "06e98fae12a65235665ad89f04af3220",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 79,
"avg_line_length": 44.48906560636183,
"alnum_prop": 0.6258825632317455,
"repo_name": "dims/nova",
"id": "3c9e92288fbc7b72f09bfa85806deaea589bef88",
"size": "22987",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/objects/request_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16952469"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "317320"
}
],
"symlink_target": ""
}
|
def rotate(arr):
return [list(a) for a in zip(*reversed(arr))]
|
{
"content_hash": "0d30ea986501c7145086bb2d59b54b93",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 49,
"avg_line_length": 33.5,
"alnum_prop": 0.6417910447761194,
"repo_name": "the-zebulan/CodeWars",
"id": "adc3e8cdafe5570ffe783da53713be5ae3c0442f",
"size": "67",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/beta/ninety_degrees_rotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import fnmatch
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import pytest
from sunpy.database.commands import AddEntry, RemoveEntry, EditEntry,\
AddTag, RemoveTag, NoSuchEntryError, NonRemovableTagError,\
EmptyCommandStackError, CommandManager, CompositeOperation
from sunpy.database.tables import DatabaseEntry, Tag
@pytest.fixture
def session():
# always create an in-memory database with its own new table in each test
engine = create_engine('sqlite:///:memory:')
Session = sessionmaker()
DatabaseEntry.metadata.create_all(bind=engine)
return Session(bind=engine)
@pytest.fixture
def command_manager():
return CommandManager()
def test_add_entry_repr(session):
entry = DatabaseEntry(id=5)
repr_result = repr(AddEntry(session, entry))
expected_repr_result = (
'<AddEntry('
'session <sqlalchemy.orm.session.Session object at *>, '
'entry id 5)>'.format(id(session)))
assert fnmatch.fnmatch(repr_result, expected_repr_result)
def test_add_entry(session):
assert not session.new
entry = DatabaseEntry()
AddEntry(session, entry)()
assert len(session.new) == 1
assert entry.id is None
session.commit()
assert not session.new
assert entry.id == 1
def test_add_entry_undo(session):
entry = DatabaseEntry()
cmd = AddEntry(session, entry)
cmd()
assert session.query(DatabaseEntry).count() == 1
assert entry.id == 1
cmd.undo()
assert entry in session.deleted
assert session.query(DatabaseEntry).count() == 0
def test_add_removed_entry(session):
entry = DatabaseEntry()
AddEntry(session, entry)()
session.commit()
RemoveEntry(session, entry)()
session.commit()
AddEntry(session, entry)()
session.commit()
assert not session.new
assert entry.id == 1
def test_add_entry_undo_precommit(session):
entry = DatabaseEntry()
cmd = AddEntry(session, entry)
cmd()
cmd.undo()
session.commit()
assert session.query(DatabaseEntry).count() == 0
def test_edit_entry_repr():
entry = DatabaseEntry(id=7)
expected_repr_result = "<EditEntry(kwargs {'foo': 'bar'}, entry id 7)>"
assert fnmatch.fnmatch(repr(EditEntry(entry, foo='bar')), expected_repr_result)
def test_edit_entry_invalid(session):
with pytest.raises(ValueError):
EditEntry(DatabaseEntry())
def test_edit_entry(session):
entry = DatabaseEntry()
session.add(entry)
session.commit()
assert entry.id == 1
EditEntry(entry, id=42)()
assert entry.id == 42
def test_edit_entry_undo(session):
entry = DatabaseEntry()
session.add(entry)
session.commit()
cmd = EditEntry(entry, id=42)
cmd()
session.commit()
assert entry.id == 42
cmd.undo()
session.commit()
assert entry.id == 1
def test_remove_entry_repr(session):
entry = DatabaseEntry(id=3)
expected_repr_result = (
'<RemoveEntry('
'session <sqlalchemy.orm.session.Session object at *>, '
'entry <DatabaseEntry(id 3)>)>'.format(id(session)))
assert fnmatch.fnmatch(repr(RemoveEntry(session, entry)), expected_repr_result)
def test_remove_existing_entry(session):
entry = DatabaseEntry()
session.add(entry)
assert session.query(DatabaseEntry).count() == 1
assert entry.id == 1
RemoveEntry(session, entry)()
assert entry in session.deleted
assert session.query(DatabaseEntry).count() == 0
def test_remove_nonexisting_entry(session):
with pytest.raises(NoSuchEntryError):
RemoveEntry(session, DatabaseEntry())()
def test_remove_entry_undo(session):
entry = DatabaseEntry()
session.add(entry)
cmd = RemoveEntry(session, entry)
session.commit()
cmd()
assert session.query(DatabaseEntry).count() == 0
cmd.undo()
assert session.query(DatabaseEntry).count() == 1
def test_add_tag_repr(session):
entry = DatabaseEntry(id=12)
tag = Tag('spam')
expected_repr_result = (
"<AddTag("
"tag 'spam', "
"session <sqlalchemy.orm.session.Session object at *>, "
"entry id 12)>".format(id(session)))
assert fnmatch.fnmatch(repr(AddTag(session, entry, tag)), expected_repr_result)
def test_add_tag(session):
tag = Tag('tag')
entry = DatabaseEntry()
assert entry.tags == []
cmd = AddTag(session, entry, tag)
cmd()
assert tag in entry.tags
def test_add_removed_tag(session):
entry = DatabaseEntry()
tag = Tag('tag')
entry.tags.append(tag)
session.add(tag)
session.commit()
session.delete(tag)
AddTag(session, entry, tag)()
assert tag in entry.tags
def test_add_tag_undo_unsaved_entry(session):
tag = Tag('tag')
entry = DatabaseEntry()
cmd = AddTag(session, entry, tag)
cmd()
cmd.undo()
assert entry.tags == []
cmd()
assert tag in entry.tags
def test_remove_tag_repr(session):
entry = DatabaseEntry(id=8)
tag = Tag('foo')
expected_repr_result = (
"<RemoveTag("
"tag 'foo', "
"session <sqlalchemy.orm.session.Session object at *>, "
"entry id 8)>".format(id(session)))
assert fnmatch.fnmatch(repr(RemoveTag(session, entry, tag)), expected_repr_result)
def test_remove_nonexisting_tag(session):
cmd = RemoveTag(session, DatabaseEntry(), Tag('tag'))
with pytest.raises(NonRemovableTagError):
cmd()
def test_remove_tag_undo(session):
tag = Tag('tag')
entry = DatabaseEntry()
entry.tags.append(tag)
session.add(entry)
session.commit()
assert tag in entry.tags
cmd = RemoveTag(session, entry, tag)
cmd()
assert tag not in entry.tags
cmd.undo()
assert tag in entry.tags
def test_cmd_manager_pop_undo_cmd(session, command_manager):
cmd = AddEntry(session, DatabaseEntry())
command_manager.do(cmd)
popped_cmd = command_manager.pop_undo_command()
assert popped_cmd == cmd
def test_cmd_manager_pop_undo_cmd_empty_stack(command_manager):
with pytest.raises(EmptyCommandStackError):
command_manager.pop_undo_command()
def test_cmd_manager_pop_redo_cmd(command_manager):
with pytest.raises(EmptyCommandStackError):
command_manager.pop_redo_command()
def test_cmd_manager_pop_redo_cmd_empty_stack(session, command_manager):
cmd = AddEntry(session, DatabaseEntry())
command_manager.do(cmd)
command_manager.undo()
popped_cmd = command_manager.pop_redo_command()
assert popped_cmd == cmd
def test_cmd_manager_redo_stack_empty_after_call(session, command_manager):
command_manager.do(AddEntry(session, DatabaseEntry()))
command_manager.do(AddEntry(session, DatabaseEntry()))
assert len(command_manager.undo_commands) == 2
session.commit()
command_manager.undo(2)
assert not command_manager.undo_commands
assert len(command_manager.redo_commands) == 2
command_manager.do(AddEntry(session, DatabaseEntry()))
assert not command_manager.redo_commands
def test_cmd_manager_redo(session, command_manager):
assert command_manager.undo_commands == []
assert command_manager.redo_commands == []
command_manager.do(AddEntry(session, DatabaseEntry()))
command_manager.do(AddEntry(session, DatabaseEntry()))
assert len(command_manager.undo_commands) == 2
assert command_manager.redo_commands == []
session.commit()
command_manager.undo(2)
assert command_manager.undo_commands == []
assert len(command_manager.redo_commands) == 2
command_manager.redo(2)
assert len(command_manager.undo_commands) == 2
assert command_manager.redo_commands == []
def test_undo_redo_multiple_cmds_at_once(session, command_manager):
assert command_manager.undo_commands == []
command_manager.do(CompositeOperation([
AddEntry(session, DatabaseEntry()),
AddEntry(session, DatabaseEntry()),
AddEntry(session, DatabaseEntry())]))
assert len(command_manager.undo_commands) == 1
assert session.query(DatabaseEntry).count() == 3
command_manager.undo()
assert command_manager.undo_commands == []
assert session.query(DatabaseEntry).count() == 0
command_manager.redo()
assert command_manager.redo_commands == []
assert session.query(DatabaseEntry).count() == 3
|
{
"content_hash": "19fe13d74dbe2a72fdec39eda6a6f371",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 86,
"avg_line_length": 28.975778546712803,
"alnum_prop": 0.6750656794841176,
"repo_name": "Alex-Ian-Hamilton/sunpy",
"id": "2e481e70b40cc96c3f56ec238e09ac69daae5d5f",
"size": "8521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sunpy/database/tests/test_commands.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "72909"
},
{
"name": "Python",
"bytes": "1505795"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
}
|
import unittest
import os
import sys
if __name__ == '__main__':
srqi_containing_dir = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(srqi_containing_dir)
loader = unittest.TestLoader()
suite = loader.discover(os.getcwd(), top_level_dir=os.path.dirname(os.getcwd()))
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "5c3750c1b6081ea627b4d9391b265246",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 30.416666666666668,
"alnum_prop": 0.6712328767123288,
"repo_name": "tectronics/dicom-sr-qi",
"id": "35efabf9dd9c3c251c29df7882ffd51021df1f33",
"size": "365",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_all.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "830"
},
{
"name": "Python",
"bytes": "186707"
}
],
"symlink_target": ""
}
|
from pprint import pprint
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import os
import math
import argparse
parser = argparse.ArgumentParser("autoscaler.py")
parser.add_argument("-p", "--project_id", help="Project id", type=str)
parser.add_argument("-r", "--region", help="GCP region where the managed instance group is located", type=str)
parser.add_argument("-z", "--zone", help="Name of GCP zone where the managed instance group is located", type=str)
parser.add_argument("-g", "--group_manager", help="Name of the managed instance group", type=str)
parser.add_argument("-c", "--computeinstancelimit", help="Maximum number of compute instances", type=int)
parser.add_argument("-v", "--verbosity", help="Increase output verbosity. 1-show basic debug info. 2-show detail debug info", type=int, choices=[0, 1, 2])
args = parser.parse_args()
# Project ID
project = args.project_id # Ex:'slurm-var-demo'
# Region where the managed instance group is located
region = args.region # Ex: 'us-central1'
# Name of the zone where the managed instance group is located
zone = args.zone # Ex: 'us-central1-f'
# The name of the managed instance group.
instance_group_manager = args.group_manager # Ex: 'condor-compute-igm'
# Default number of cores per intance, will be replaced with actual value
cores_per_node = 4
# Default number of running instances that the managed instance group should maintain at any given time. This number will go up and down based on the load (number of jobs in the queue)
size = 0
# Debug level: 1-print debug information, 2 - print detail debug information
debug = 0
if (args.verbosity):
debug = args.verbosity
# Limit for the maximum number of compute instance. If zero (default setting), no limit will be enforced by the script
compute_instance_limit = 0
if (args.computeinstancelimit):
compute_instance_limit = abs(args.computeinstancelimit)
if debug > 1:
print 'Launching autoscaler.py with the following arguments:'
print 'project_id: ' + project
print 'region: ' + region
print 'zone: ' + zone
print 'group_manager: ' + instance_group_manager
print 'computeinstancelimit: ' + str(compute_instance_limit)
print 'debuglevel: ' + str(debug)
# Remove specified instance from MIG and decrease MIG size
def deleteFromMig(instance):
instanceUrl = 'https://www.googleapis.com/compute/v1/projects/' \
+ project + '/zones/' + zone + '/instances/' + instance
instances_to_delete = {'instances': [instanceUrl]}
requestDelInstance = \
service.instanceGroupManagers().deleteInstances(project=project,
zone=zone, instanceGroupManager=instance_group_manager,
body=instances_to_delete)
response = requestDelInstance.execute()
if debug > 0:
print 'Request to delete instance ' + instance
pprint(response)
return response
def getInstanceTemplateInfo():
requestTemplateName = \
service.instanceGroupManagers().get(project=project, zone=zone,
instanceGroupManager=instance_group_manager,
fields='instanceTemplate')
responseTemplateName = requestTemplateName.execute()
template_name = ''
if debug > 1:
print 'Request for the template name'
pprint(responseTemplateName)
if len(responseTemplateName) > 0:
template_url = responseTemplateName.get('instanceTemplate')
template_url_partitioned = template_url.split('/')
template_name = \
template_url_partitioned[len(template_url_partitioned) - 1]
requestInstanceTemplate = \
service.instanceTemplates().get(project=project,
instanceTemplate=template_name, fields='properties')
responseInstanceTemplateInfo = requestInstanceTemplate.execute()
if debug > 1:
print 'Template information'
pprint(responseInstanceTemplateInfo['properties'])
machine_type = responseInstanceTemplateInfo['properties']['machineType']
is_preemtible = responseInstanceTemplateInfo['properties']['scheduling']['preemptible']
if debug > 0:
print 'Machine Type: ' + machine_type
print 'Is preemtible: ' + str(is_preemtible)
request = service.machineTypes().get(project=project, zone=zone,
machineType=machine_type)
response = request.execute()
guest_cpus = response['guestCpus']
if debug > 1:
print 'Machine information'
pprint(responseInstanceTemplateInfo['properties'])
if debug > 0:
print 'Guest CPUs: ' + str(guest_cpus)
instanceTemlateInfo = {'machine_type': machine_type,
'is_preemtible': is_preemtible,
'guest_cpus': guest_cpus}
return instanceTemlateInfo
# Obtain credentials
credentials = GoogleCredentials.get_application_default()
service = discovery.build('compute', 'v1', credentials=credentials)
# Get total number of jobs in the queue that includes number of jos waiting as well as number of jobs already assigned to nodes
queue_length_req = 'condor_q -totals | tail -n 1'
queue_length_resp = os.popen(queue_length_req).read().split()
if len(queue_length_resp) > 1:
queue = int(queue_length_resp[0])
idle_jobs = int(queue_length_resp[6])
on_hold_jobs = int(queue_length_resp[10])
else:
queue = 0
idle_jobs = 0
print 'Total queue length: ' + str(queue)
print 'Idle jobs: ' + str(idle_jobs)
print 'Jobs on hold: ' + str(on_hold_jobs)
instanceTemlateInfo = getInstanceTemplateInfo()
if debug > 1:
print 'Information about the compute instance template'
pprint(instanceTemlateInfo)
cores_per_node = instanceTemlateInfo['guest_cpus']
print 'Number of CPU per compute node: ' + str(cores_per_node)
# Get state for for all jobs in Condor
name_req = 'condor_status -af name state'
slot_names = os.popen(name_req).read().splitlines()
if debug > 1:
print 'Currently running jobs in Condor'
print slot_names
# Adjust current queue length by the number of jos that are on-hold
queue -=on_hold_jobs
if on_hold_jobs>0:
print "Adjusted queue length: " + str(queue)
# Calculate number instances to satisfy current job queue length
if queue > 0:
size = int(math.ceil(float(queue) / float(cores_per_node)))
if debug>0:
print "Calucalting size of MIG: ⌈" + str(queue) + "/" + str(cores_per_node) + "⌉ = " + str(size)
else:
size = 0
# If compute instance limit is specified, can not start more instances then specified in the limit
if compute_instance_limit > 0 and size > compute_instance_limit:
size = compute_instance_limit;
print "MIG target size will be limited by " + str(compute_instance_limit)
print 'New MIG target size: ' + str(size)
# Get current number of instances in the MIG
requestGroupInfo = service.instanceGroupManagers().get(project=project,
zone=zone, instanceGroupManager=instance_group_manager)
responseGroupInfo = requestGroupInfo.execute()
currentTarget = int(responseGroupInfo['targetSize'])
print 'Current MIG target size: ' + str(currentTarget)
if debug > 1:
print 'MIG Information:'
print responseGroupInfo
if size == 0 and currentTarget == 0:
print 'No jobs in the queue and no compute instances running. Nothing to do'
exit()
if size == currentTarget:
print 'Running correct number of compute nodes to handle number of jobs in the queue'
exit()
if size < currentTarget:
print 'Scaling down. Looking for nodes that can be shut down'
# Find nodes that are not busy (all slots showing status as "Unclaimed")
node_busy = {}
for slot_name in slot_names:
name_status = slot_name.split()
if len(name_status) > 1:
name = name_status[0]
status = name_status[1]
slot_server = name.split('@')
slot = slot_server[0]
server = slot_server[1].split('.')[0]
if debug > 0:
print slot + ', ' + server + ', ' + status + '\n'
if server not in node_busy:
if status == 'Unclaimed':
node_busy[server] = False
else:
node_busy[server] = True
else:
if status != 'Unclaimed':
node_busy[server] = True
if debug > 1:
print 'Compuute node busy status:'
print node_busy
# Shut down nodes that are not busy
for node in node_busy:
if not node_busy[node]:
print 'Will shut down: ' + node + ' ...'
respDel = deleteFromMig(node)
if debug > 1:
print "Shut down request for compute node " + node
pprint(respDel)
if debug > 1:
print "Scaling down complete"
if size > currentTarget:
print "Scaling up. Need to increase number of instances to " + str(size)
#Request to resize
request = service.instanceGroupManagers().resize(project=project,
zone=zone,
instanceGroupManager=instance_group_manager,
size=size)
response = request.execute()
if debug > 1:
print 'Requesting to increase MIG size'
pprint(response)
print "Scaling up complete"
|
{
"content_hash": "7ef84018075f308647c1bf743cab4536",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 184,
"avg_line_length": 36.948,
"alnum_prop": 0.6703475154270867,
"repo_name": "aljim/deploymentmanager-samples",
"id": "2a0458c02199ca5315f3e3ae37e7f2050ffabcda",
"size": "9997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/v2/htcondor/autoscaler/autoscaler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "6428"
},
{
"name": "HTML",
"bytes": "106754"
},
{
"name": "JavaScript",
"bytes": "70015"
},
{
"name": "Makefile",
"bytes": "4430"
},
{
"name": "Python",
"bytes": "443622"
},
{
"name": "Shell",
"bytes": "251698"
}
],
"symlink_target": ""
}
|
import subprocess, tempfile, threading
class ShellOneLiner:
# oneliner : one liner script of shell
# input : input object
# outfile : output file
# reader : reader python function reads output from one liner
# writer : writer python function writes input to one liner
def fieldReader(self, line):
return line[0:-1].split(' ')
def fieldWriter(self, line):
return ' '.join([str(x) for x in line]) + '\n'
def doinput(self, input, inport, writer):
for line in input:
inport.write(writer(line))
inport.close()
def __init__(self, oneliner, input=False, outfile=False, reader=False, writer=False):
if reader:
self.reader = reader
else:
self.reader = self.fieldReader
if writer:
self.writer = writer
else:
self.writer = self.fieldWriter
# subprocess
if outfile:
# output to file
self.outff = True
self.outfn = outfile
self.outfd0 = open(self.outfn, 'w+b')
soutf = self.outfd0
else:
# output to pipe
self.outff = False
soutf = subprocess.PIPE
if input:
sinf = subprocess.PIPE
else:
sinf = None
self.p = subprocess.Popen(oneliner + '| self 1/NF',
shell=True,
stdin=sinf,
stdout=soutf)
# input thread
if input:
self.int = threading.Thread(target=self.doinput,
name="input",
args=(input, self.p.stdin, self.writer) )
self.int.start()
def __iter__(self):
if self.outff:
# wait for processing is done
# and designate the output file descripter as the output
self.p.wait()
self.outfd0.close()
self.outfd = open(self.outfn, 'r+b')
else:
# and designate the output pipe descripter as the output
self.outfd = self.p.stdout
return self
def next(self):
return self.reader(self.outfd.next())
|
{
"content_hash": "2d6f950518168775de54836761d31c66",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 86,
"avg_line_length": 21.642857142857142,
"alnum_prop": 0.6364136413641364,
"repo_name": "kaznak/shellOneLiner",
"id": "6904ea03f410ea1f004a3a0152dadfecf42c5642",
"size": "1820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shellOneLiner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3531"
},
{
"name": "Shell",
"bytes": "470"
}
],
"symlink_target": ""
}
|
import hashlib
import shlex
import tarfile
import subprocess
from fabric.contrib.files import is_link
from fabric.utils import abort
import os
from fabric.context_managers import show, settings, cd, prefix, lcd
from fabric.contrib import files
from fabric.operations import run, sudo, get, local, put, open_shell
from fabric.state import env
from fabric.api import task
PROJECT_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
REMOTE_REVISION = None
RELEASES_RELATIVE_PATH_DIR = 'releases'
env.use_ssh_config = True
# https://gist.github.com/lost-theory/1831706
class CommandFailed(Exception):
def __init__(self, message, result):
Exception.__init__(self, message)
self.result = result
def erun(*args, **kwargs):
with settings(warn_only=True):
result = run(*args, **kwargs)
if result.failed:
raise CommandFailed("args: %r, kwargs: %r, error code: %r" % (args, kwargs, result.return_code), result)
return result
def esudo(*args, **kwargs):
with settings(warn_only=True):
result = sudo(*args, **kwargs)
if result.failed:
raise CommandFailed("args: %r, kwargs: %r, error code: %r" % (args, kwargs, result.return_code), result)
return result
# http://docs.fabfile.org/en/latest/usage/execution.html#roles
def describe_revision(head='HEAD'):
with lcd(PROJECT_ROOT_DIR):
actual_tag = local('git describe --always %s' % head, capture=True)
return actual_tag
def get_dump_filepath(user, prefix=u'backups'):
return '%s/%s.sql' % (prefix, get_remote_revision(user))
def get_release_filename():
return '%s.tar.gz' % describe_revision()
def get_release_filepath():
return os.path.join(PROJECT_ROOT_DIR, RELEASES_RELATIVE_PATH_DIR, get_release_filename())
@task
def dump_db_snapshot(db_name, user):
remote_tmp_file_path = '/tmp/dump_db.sql' # FIXME: use temporary file
sudo('pg_dump %s > %s' % (db_name, remote_tmp_file_path), user='postgres')
get(remote_path=remote_tmp_file_path, local_path= get_dump_filepath(user))
def reset_db():
local('python manage.py reset_db')
@task
def load_db(user):
local('cat %s | python manage.py dbshell' % get_dump_filepath(user))
@task
def load_db_snapshot(db_name, username):
dump_db_snapshot(db_name, username)
reset_db()
load_db(username)
@task
def create_release_archive(head='HEAD'):
with lcd(PROJECT_ROOT_DIR):
local('mkdir -p %s' % RELEASES_RELATIVE_PATH_DIR)
local('git archive --worktree-attributes --format=tar.gz %s:ohr > %s' % (
head,
get_release_filepath()
))
def sync_virtualenv(virtualenv_path, requirements_path):
if not files.exists(virtualenv_path):
erun('virtualenv --no-site-packages %s' % virtualenv_path)
erun('source %s/bin/activate && pip install -r %s' % (
virtualenv_path,
requirements_path,
))
def virtualenv(virtualenv_path, *args, **kwargs):
prefix('source %s/bin/activate' % virtualenv_path)
def django_collectstatic(virtualenv_path):
erun('source %s/bin/activate && honcho --env ../.env run ./manage.py collectstatic --noinput' % virtualenv_path)
def django_migrate(virtualenv_path):
erun('source %s/bin/activate && honcho --env ../.env run ./manage.py migrate' % virtualenv_path)
def django_createcachetable(virtualenv_path):
erun('source %s/bin/activate && honcho --env ../.env run ./manage.py createcachetable -v 3' % virtualenv_path)
def validate_steps(steps):
'''
>>> func1 = lambda x:x
>>> steps = [func1, 'datetime.datetime']
>>> validate_steps(steps)
[func1, 'auaua']
'''
func_steps = []
for step in steps:
func_step = step
# if is a string then import it
if not callable(step) and isinstance(step, basestring):
last_dot = step.rindex('.')
module, func_name = step[:last_dot], step[last_dot + 1:]
func_step = getattr(__import__(module), func_name)
if not callable(func_step):
raise ValueError('You must pass a function')
func_steps.append(func_step)
return func_steps
# https://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file
def hashfile(afile, hasher, blocksize=65536):
with open(afile, 'r') as f:
buf = f.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(blocksize)
return hasher.hexdigest()
@task
def _release(archive, revision=None, web_root=None, **kwargs):
'''
Main task
its role is to decompress an archive to the web root into a directory
named 'app-X' where X identifies the revision; by default the revision
is calculated from the sha256 of the archive when not indicated.
:param version:
:param archive:
:param web_root:
:param kwargs:
:return:
'''
previous_revision = None
cwd = erun('pwd').stdout if not web_root else web_root
if not os.path.exists(archive):
raise CommandFailed('Archive \'%s\' doesn\'t exist' % archive)
revision = revision or hashfile(archive, hashlib.sha256())
remote_filepath = '%s-%s' % (archive, revision)
app_dir = os.path.join(cwd, 'app-%s' % revision)
app_symlink = os.path.join(cwd, 'app')
put(local_path=archive, remote_path=remote_filepath)
try:
# if exists remove dir
if files.exists(app_dir):
erun('rm -vfr %s' % (
app_dir,
))
# create the remote dir
erun('mkdir -p %s' % app_dir)
erun('tar xf %s -C %s' % (
remote_filepath,
app_dir,
))
# find the previous release and move/unlink it
if files.exists(app_symlink) and is_link(app_symlink):
# TODO: move old deploy in an 'archive' directory
previous_deploy_path = erun('basename $(readlink -f %s)' % app_symlink).stdout
idx = previous_deploy_path.index('-')
previous_revision = previous_deploy_path[idx + 1:]
if previous_revision != revision:
erun('unlink %s' % app_symlink)
erun('mkdir -p old && mv -f %s old/' % previous_deploy_path)
elif files.exists(app_symlink):
raise CommandFailed('app directory already exists and is not a symlink')
erun('ln -s %s %s' % (app_dir, app_symlink))
except CommandFailed as e:
print 'An error occoured: %s' % e
print '''
%s --> %s
''' % (previous_revision or '?', revision)
open_shell('cd %s' % (
app_dir,
))
# TODO: factorize also steps related to python steps (e.g. virtualenv)
# probably with pre-steps and post-steps
@task
def release(head='HEAD', web_root=None, requirements=u'requirements.txt', envpath='.env', steps=None):
'''Main task for releasing.
Unarchive the release in the webroot, sync_virtualenv and update the app/ directory
to point to the new release and archive in old/.
'''
steps = validate_steps(steps) if steps else []
cwd = erun('pwd').stdout if not web_root else web_root
abs_envpath = os.path.abspath(os.path.join(cwd, envpath))
if not files.exists(abs_envpath):
raise abort('%s doesn\'t exist, create it before release using configure_env task!!!' % abs_envpath)
# locally we create the archive with the app code
create_release_archive(head)
release_filename = get_release_filename()
local_release_filepath = get_release_filepath()
actual_version = describe_revision(head)
previous_version = None
# check that the archive contains the requirements file
tf = tarfile.open(local_release_filepath)
try:
tf.getmember(requirements)
except KeyError as e:
abort('file \'%s\' doesn\'t exist, indicate a requirements file contained into the release archive' % requirements)
finally:
tf.close()
# and upload it to the server
if not files.exists(release_filename):
put(local_path=local_release_filepath)
app_dir = os.path.abspath(os.path.join(cwd, 'app-%s' % describe_revision(head)))
virtualenv_path = os.path.abspath(os.path.join(cwd, '.virtualenv'))
try:
# if exists remove dir
if files.exists(app_dir):
erun('rm -vfr %s' % (
app_dir,
))
# create the remote dir
erun('mkdir -p %s' % app_dir)
erun('tar xf %s -C %s' % (
release_filename,
app_dir,
))
sync_virtualenv(virtualenv_path, '%s/%s' % (app_dir, requirements,))# parametrize
with cd(app_dir):
for step in steps:
step(virtualenv_path)
# find the previous release and move/unlink it
if is_link('app'):
# TODO: move old deploy in an 'archive' directory
previous_deploy_path = erun('basename $(readlink -f app)').stdout
idx = previous_deploy_path.index('-')
previous_version = previous_deploy_path[idx + 1:]
if previous_version != actual_version:
erun('unlink app')
erun('mkdir -p old && mv -f %s old/' % previous_deploy_path)
erun('ln -s %s app' % app_dir)
except CommandFailed as e:
print 'An error occoured: %s' % e
print '''
%s --> %s
Use 'honcho --env ../.env start' inside a screen session
''' % (previous_version, actual_version)
open_shell('cd %s && source %s/bin/activate' % (
app_dir,
virtualenv_path,
))
@task
def shell(revision=None):
'''Open a shell into an app's environment (the enabled one as default)'''
cwd = erun('pwd').stdout
virtualenv_path = os.path.abspath(os.path.join(cwd, '.virtualenv'))
open_shell('cd %s && source %s/bin/activate' % (
'app' if not revision else ('app-%s' % revision),
virtualenv_path,
))
@task
def release_django(head='HEAD', web_root=None, requirements=u'requirements.txt', envpath='.env'):
release(head, web_root, requirements, steps=[django_migrate, django_createcachetable, django_collectstatic,], envpath=envpath)
def get_remote_revision(user):
global REMOTE_REVISION
if not REMOTE_REVISION:
current_app_dir = esudo('cd && basename $(readlink -f app)', user=user)
try:
_, REMOTE_REVISION = current_app_dir.split('-')
except Exception as e:
print e
REMOTE_REVISION = 'unknown'
return REMOTE_REVISION
@task
def configure_env(envpath='.env', local_copy_path='.remote.env', web_root=None):
cwd = erun('pwd').stdout if not web_root else web_root
envpath = os.path.abspath(os.path.join(cwd, envpath))
# first of all, get the file remote side
if files.exists(envpath):
get(remote_path=envpath, local_path=local_copy_path)
# just in case the remote file doesn't exist
with open(local_copy_path, 'a+') as f:
pass
subprocess.call(shlex.split("vim %s" % local_copy_path))
put(local_path=local_copy_path, remote_path=envpath)
os.remove(local_copy_path)
|
{
"content_hash": "e06e388f1b0bd495e506fd9d961cec73",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 130,
"avg_line_length": 32.36151603498542,
"alnum_prop": 0.626936936936937,
"repo_name": "gipi/OHR",
"id": "0ed3ec980909da100d6252a826370857d36b1f81",
"size": "11100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "provision/fabfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "957"
},
{
"name": "HTML",
"bytes": "28782"
},
{
"name": "JavaScript",
"bytes": "1956"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "57250"
},
{
"name": "Ruby",
"bytes": "1430"
},
{
"name": "Shell",
"bytes": "3272"
}
],
"symlink_target": ""
}
|
'''
This thread interacts with a openflow floodligth controller to create dataplane connections
'''
__author__="Pablo Montes, Alfonso Tierno"
__date__ ="17-jul-2015"
#import json
import threading
import time
import Queue
import requests
import logging
class FlowBadFormat(Exception):
'''raise when a bad format of flow is found'''
def change_of2db(flow):
'''Change 'flow' dictionary from openflow format to database format
Basically the change consist of changing 'flow[actions] from a list of
double tuple to a string
from [(A,B),(C,D),..] to "A=B,C=D" '''
action_str_list=[]
if type(flow)!=dict or "actions" not in flow:
raise FlowBadFormat("Bad input parameters, expect dictionary with 'actions' as key")
try:
for action in flow['actions']:
action_str_list.append( action[0] + "=" + str(action[1]) )
flow['actions'] = ",".join(action_str_list)
except:
raise FlowBadFormat("Unexpected format at 'actions'")
def change_db2of(flow):
'''Change 'flow' dictionary from database format to openflow format
Basically the change consist of changing 'flow[actions]' from a string to
a double tuple list
from "A=B,C=D,..." to [(A,B),(C,D),..]
raise FlowBadFormat '''
actions=[]
if type(flow)!=dict or "actions" not in flow or type(flow["actions"])!=str:
raise FlowBadFormat("Bad input parameters, expect dictionary with 'actions' as key")
action_list = flow['actions'].split(",")
for action_item in action_list:
action_tuple = action_item.split("=")
if len(action_tuple) != 2:
raise FlowBadFormat("Expected key=value format at 'actions'")
if action_tuple[0].strip().lower()=="vlan":
if action_tuple[1].strip().lower() in ("none", "strip"):
actions.append( ("vlan",None) )
else:
try:
actions.append( ("vlan", int(action_tuple[1])) )
except:
raise FlowBadFormat("Expected integer after vlan= at 'actions'")
elif action_tuple[0].strip().lower()=="out":
actions.append( ("out", str(action_tuple[1])) )
else:
raise FlowBadFormat("Unexpected '%s' at 'actions'"%action_tuple[0])
flow['actions'] = actions
class of_test_connector():
'''This is a fake openflow connector for testing.
It does nothing and it is used for running openvim without an openflow controller
'''
def __init__(self, params):
self.name = "ofc_test"
self.rules={}
self.logger = logging.getLogger('vim.OF.TEST')
self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR") ) )
def get_of_switches(self):
return 0, ()
def obtain_port_correspondence(self):
return 0, ()
def del_flow(self, flow_name):
if flow_name in self.rules:
self.logger.debug("del_flow OK")
del self.rules[flow_name]
return 0, None
else:
self.logger.warning("del_flow not found")
return -1, "flow %s not found"
def new_flow(self, data):
self.rules[ data["name"] ] = data
self.logger.debug("new_flow OK")
return 0, None
def get_of_rules(self, translate_of_ports=True):
return 0, self.rules
def clear_all_flows(self):
self.logger.debug("clear_all_flows OK")
self.rules={}
return 0, None
class openflow_thread(threading.Thread):
def __init__(self, OF_connector, db, db_lock, of_test, pmp_with_same_vlan, debug='ERROR'):
threading.Thread.__init__(self)
self.db = db
self.pmp_with_same_vlan = pmp_with_same_vlan
self.name = "openflow"
self.test = of_test
self.db_lock = db_lock
self.OF_connector = OF_connector
self.logger = logging.getLogger('vim.OF')
self.logger.setLevel( getattr(logging, debug) )
self.queueLock = threading.Lock()
self.taskQueue = Queue.Queue(2000)
def insert_task(self, task, *aditional):
try:
self.queueLock.acquire()
task = self.taskQueue.put( (task,) + aditional, timeout=5)
self.queueLock.release()
return 1, None
except Queue.Full:
return -1, "timeout inserting a task over openflow thread " + self.name
def run(self):
while True:
self.queueLock.acquire()
if not self.taskQueue.empty():
task = self.taskQueue.get()
else:
task = None
self.queueLock.release()
if task is None:
time.sleep(1)
continue
if task[0] == 'update-net':
r,c = self.update_of_flows(task[1])
#update database status
self.db_lock.acquire()
if r<0:
UPDATE={'status':'ERROR', 'last_error': str(c)}
self.logger.error("processing task 'update-net' %s: %s", str(task[1]), c)
else:
UPDATE={'status':'ACTIVE', 'last_error': None}
self.logger.debug("processing task 'update-net' %s: OK", str(task[1]))
self.db.update_rows('nets', UPDATE, WHERE={'uuid':task[1]})
self.db_lock.release()
elif task[0] == 'clear-all':
r,c = self.clear_all_flows()
if r<0:
self.logger.error("processing task 'clear-all': %s", c)
else:
self.logger.debug("processing task 'clear-all': OK")
elif task[0] == 'exit':
self.logger.debug("exit from openflow_thread")
self.terminate()
return 0
else:
self.logger.error("unknown task %s", str(task))
def terminate(self):
pass
#print self.name, ": exit from openflow_thread"
def update_of_flows(self, net_id):
ports=()
self.db_lock.acquire()
select_= ('type','admin_state_up', 'vlan', 'provider', 'bind_net','bind_type','uuid')
result, nets = self.db.get_table(FROM='nets', SELECT=select_, WHERE={'uuid':net_id} )
#get all the networks binding to this
if result > 0:
if nets[0]['bind_net']:
bind_id = nets[0]['bind_net']
else:
bind_id = net_id
#get our net and all bind_nets
result, nets = self.db.get_table(FROM='nets', SELECT=select_,
WHERE_OR={'bind_net':bind_id, 'uuid':bind_id} )
self.db_lock.release()
if result < 0:
return -1, "DB error getting net: " + nets
#elif result==0:
#net has been deleted
ifaces_nb = 0
database_flows = []
for net in nets:
net_id = net["uuid"]
if net['admin_state_up'] == 'false':
net['ports'] = ()
else:
self.db_lock.acquire()
nb_ports, net_ports = self.db.get_table(
FROM='ports',
SELECT=('switch_port','vlan','uuid','mac','type','model'),
WHERE={'net_id':net_id, 'admin_state_up':'true', 'status':'ACTIVE'} )
self.db_lock.release()
if nb_ports < 0:
#print self.name, ": update_of_flows() ERROR getting ports", ports
return -1, "DB error getting ports from net '%s': %s" % (net_id, net_ports)
#add the binding as an external port
if net['provider'] and net['provider'][:9]=="openflow:":
external_port={"type":"external","mac":None}
external_port['uuid'] = net_id + ".1" #fake uuid
if net['provider'][-5:]==":vlan":
external_port["vlan"] = net["vlan"]
external_port["switch_port"] = net['provider'][9:-5]
else:
external_port["vlan"] = None
external_port["switch_port"] = net['provider'][9:]
net_ports = net_ports + (external_port,)
nb_ports += 1
net['ports'] = net_ports
ifaces_nb += nb_ports
# Get the name of flows that will be affected by this NET
self.db_lock.acquire()
result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':net_id})
self.db_lock.release()
if result < 0:
#print self.name, ": update_of_flows() ERROR getting flows from database", database_flows
return -1, "DB error getting flows from net '%s': %s" %(net_id, database_net_flows)
database_flows += database_net_flows
# Get the name of flows where net_id==NULL that means net deleted (At DB foreign key: On delete set null)
self.db_lock.acquire()
result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':None})
self.db_lock.release()
if result < 0:
#print self.name, ": update_of_flows() ERROR getting flows from database", database_flows
return -1, "DB error getting flows from net 'null': %s" %(database_net_flows)
database_flows += database_net_flows
#Get the existing flows at openflow controller
result, of_flows = self.OF_connector.get_of_rules()
if result < 0:
#print self.name, ": update_of_flows() ERROR getting flows from controller", of_flows
return -1, "OF error getting flows: " + of_flows
if ifaces_nb < 2:
pass
elif net['type'] == 'ptp':
if ifaces_nb > 2:
#print self.name, 'Error, network '+str(net_id)+' has been defined as ptp but it has '+\
# str(ifaces_nb)+' interfaces.'
return -1, "'ptp' type network cannot connect %d interfaces, only 2" % ifaces_nb
elif net['type'] == 'data':
if ifaces_nb > 2 and self.pmp_with_same_vlan:
# check all ports are VLAN (tagged) or none
vlan_tag = None
for port in ports:
if port["type"]=="external":
if port["vlan"] != None:
if port["vlan"]!=net["vlan"]:
text="External port vlan-tag and net vlan-tag must be the same when flag 'of_controller_nets_with_same_vlan' is True"
#print self.name, "Error", text
return -1, text
if vlan_tag == None:
vlan_tag=True
elif vlan_tag==False:
text="Passthrough and external port vlan-tagged can not be connected when flag 'of_controller_nets_with_same_vlan' is True"
#print self.name, "Error", text
return -1, text
else:
if vlan_tag == None:
vlan_tag=False
elif vlan_tag == True:
text="SR-IOV and external port not vlan-tagged can not be connected when flag 'of_controller_nets_with_same_vlan' is True"
#print self.name, "Error", text
return -1, text
elif port["model"]=="PF" or port["model"]=="VFnotShared":
if vlan_tag == None:
vlan_tag=False
elif vlan_tag==True:
text="Passthrough and SR-IOV ports cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
#print self.name, "Error", text
return -1, text
elif port["model"] == "VF":
if vlan_tag == None:
vlan_tag=True
elif vlan_tag==False:
text="Passthrough and SR-IOV ports cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
#print self.name, "Error", text
return -1, text
else:
return -1, 'Only ptp and data networks are supported for openflow'
# calculate new flows to be inserted
result, new_flows = self._compute_net_flows(nets)
if result < 0:
return result, new_flows
#modify database flows format and get the used names
used_names=[]
for flow in database_flows:
try:
change_db2of(flow)
except FlowBadFormat as e:
self.logger.error("Exception FlowBadFormat: '%s', flow: '%s'",str(e), str(flow))
continue
used_names.append(flow['name'])
name_index=0
#insert at database the new flows, change actions to human text
for flow in new_flows:
#1 check if an equal flow is already present
index = self._check_flow_already_present(flow, database_flows)
if index>=0:
database_flows[index]["not delete"]=True
self.logger.debug("Skipping already present flow %s", str(flow))
continue
#2 look for a non used name
flow_name=flow["net_id"]+"."+str(name_index)
while flow_name in used_names or flow_name in of_flows:
name_index += 1
flow_name=flow["net_id"]+"."+str(name_index)
used_names.append(flow_name)
flow['name'] = flow_name
#3 insert at openflow
result, content = self.OF_connector.new_flow(flow)
if result < 0:
#print self.name, ": Error '%s' at flow insertion" % c, flow
return -1, content
#4 insert at database
try:
change_of2db(flow)
except FlowBadFormat as e:
#print self.name, ": Error Exception FlowBadFormat '%s'" % str(e), flow
return -1, str(e)
self.db_lock.acquire()
result, content = self.db.new_row('of_flows', flow)
self.db_lock.release()
if result < 0:
#print self.name, ": Error '%s' at database insertion" % content, flow
return -1, content
#delete not needed old flows from openflow and from DDBB,
#check that the needed flows at DDBB are present in controller or insert them otherwise
for flow in database_flows:
if "not delete" in flow:
if flow["name"] not in of_flows:
#not in controller, insert it
result, content = self.OF_connector.new_flow(flow)
if result < 0:
#print self.name, ": Error '%s' at flow insertion" % c, flow
return -1, content
continue
#Delete flow
if flow["name"] in of_flows:
result, content = self.OF_connector.del_flow(flow['name'])
if result<0:
self.logger.error("cannot delete flow '%s' from OF: %s", flow['name'], content )
continue #skip deletion from database
#delete from database
self.db_lock.acquire()
result, content = self.db.delete_row_by_key('of_flows', 'id', flow['id'])
self.db_lock.release()
if result<0:
self.logger.error("cannot delete flow '%s' from DB: %s", flow['name'], content )
return 0, 'Success'
def clear_all_flows(self):
try:
if not self.test:
self.OF_connector.clear_all_flows()
#remove from database
self.db_lock.acquire()
self.db.delete_row_by_key('of_flows', None, None) #this will delete all lines
self.db_lock.release()
return 0, None
except requests.exceptions.RequestException as e:
#print self.name, ": clear_all_flows Exception:", str(e)
return -1, str(e)
flow_fields=('priority', 'vlan', 'ingress_port', 'actions', 'dst_mac', 'src_mac', 'net_id')
def _check_flow_already_present(self, new_flow, flow_list):
'''check if the same flow is already present in the flow list
The flow is repeated if all the fields, apart from name, are equal
Return the index of matching flow, -1 if not match'''
index=0
for flow in flow_list:
equal=True
for f in self.flow_fields:
if flow.get(f) != new_flow.get(f):
equal=False
break
if equal:
return index
index += 1
return -1
def _compute_net_flows(self, nets):
new_flows=[]
new_broadcast_flows={}
nb_ports = 0
# Check switch_port information is right
self.logger.debug("_compute_net_flows nets: %s", str(nets))
for net in nets:
for port in net['ports']:
nb_ports += 1
if not self.test and str(port['switch_port']) not in self.OF_connector.pp2ofi:
error_text= "switch port name '%s' is not valid for the openflow controller" % str(port['switch_port'])
#print self.name, ": ERROR " + error_text
return -1, error_text
for net_src in nets:
net_id = net_src["uuid"]
for net_dst in nets:
vlan_net_in = None
vlan_net_out = None
if net_src == net_dst:
#intra net rules
priority = 1000
elif net_src['bind_net'] == net_dst['uuid']:
if net_src.get('bind_type') and net_src['bind_type'][0:5] == "vlan:":
vlan_net_out = int(net_src['bind_type'][5:])
priority = 1100
elif net_dst['bind_net'] == net_src['uuid']:
if net_dst.get('bind_type') and net_dst['bind_type'][0:5] == "vlan:":
vlan_net_in = int(net_dst['bind_type'][5:])
priority = 1100
else:
#nets not binding
continue
for src_port in net_src['ports']:
vlan_in = vlan_net_in
if vlan_in == None and src_port['vlan'] != None:
vlan_in = src_port['vlan']
elif vlan_in != None and src_port['vlan'] != None:
#TODO this is something that we can not do. It requires a double VLAN check
#outer VLAN should be src_port['vlan'] and inner VLAN should be vlan_in
continue
# BROADCAST:
broadcast_key = src_port['uuid'] + "." + str(vlan_in)
if broadcast_key in new_broadcast_flows:
flow_broadcast = new_broadcast_flows[broadcast_key]
else:
flow_broadcast = {'priority': priority,
'net_id': net_id,
'dst_mac': 'ff:ff:ff:ff:ff:ff',
"ingress_port": str(src_port['switch_port']),
'actions': []
}
new_broadcast_flows[broadcast_key] = flow_broadcast
if vlan_in is not None:
flow_broadcast['vlan_id'] = str(vlan_in)
for dst_port in net_dst['ports']:
vlan_out = vlan_net_out
if vlan_out == None and dst_port['vlan'] != None:
vlan_out = dst_port['vlan']
elif vlan_out != None and dst_port['vlan'] != None:
#TODO this is something that we can not do. It requires a double VLAN set
#outer VLAN should be dst_port['vlan'] and inner VLAN should be vlan_out
continue
#if src_port == dst_port:
# continue
if src_port['switch_port'] == dst_port['switch_port'] and vlan_in == vlan_out:
continue
flow = {
"priority": priority,
'net_id': net_id,
"ingress_port": str(src_port['switch_port']),
'actions': []
}
if vlan_in is not None:
flow['vlan_id'] = str(vlan_in)
# allow that one port have no mac
if dst_port['mac'] is None or nb_ports==2: # point to point or nets with 2 elements
flow['priority'] = priority-5 # less priority
else:
flow['dst_mac'] = str(dst_port['mac'])
if vlan_out == None:
if vlan_in != None:
flow['actions'].append( ('vlan',None) )
else:
flow['actions'].append( ('vlan', vlan_out ) )
flow['actions'].append( ('out', str(dst_port['switch_port'])) )
if self._check_flow_already_present(flow, new_flows) >= 0:
self.logger.debug("Skipping repeated flow '%s'", str(flow))
continue
new_flows.append(flow)
# BROADCAST:
if nb_ports <= 2: # point to multipoint or nets with more than 2 elements
continue
out = (vlan_out, str(dst_port['switch_port']))
if out not in flow_broadcast['actions']:
flow_broadcast['actions'].append( out )
#BROADCAST
for flow_broadcast in new_broadcast_flows.values():
if len(flow_broadcast['actions'])==0:
continue #nothing to do, skip
flow_broadcast['actions'].sort()
if 'vlan_id' in flow_broadcast:
previous_vlan = 0 # indicates that a packet contains a vlan, and the vlan
else:
previous_vlan = None
final_actions=[]
action_number = 0
for action in flow_broadcast['actions']:
if action[0] != previous_vlan:
final_actions.append( ('vlan', action[0]) )
previous_vlan = action[0]
if self.pmp_with_same_vlan and action_number:
return -1, "Can not interconnect different vlan tags in a network when flag 'of_controller_nets_with_same_vlan' is True."
action_number += 1
final_actions.append( ('out', action[1]) )
flow_broadcast['actions'] = final_actions
if self._check_flow_already_present(flow_broadcast, new_flows) >= 0:
self.logger.debug("Skipping repeated flow '%s'", str(flow_broadcast))
continue
new_flows.append(flow_broadcast)
#UNIFY openflow rules with the same input port and vlan and the same output actions
#These flows differ at the dst_mac; and they are unified by not filtering by dst_mac
#this can happen if there is only two ports. It is converted to a point to point connection
flow_dict={} # use as key vlan_id+ingress_port and as value the list of flows matching these values
for flow in new_flows:
key = str(flow.get("vlan_id"))+":"+flow["ingress_port"]
if key in flow_dict:
flow_dict[key].append(flow)
else:
flow_dict[key]=[ flow ]
new_flows2=[]
for flow_list in flow_dict.values():
convert2ptp=False
if len (flow_list)>=2:
convert2ptp=True
for f in flow_list:
if f['actions'] != flow_list[0]['actions']:
convert2ptp=False
break
if convert2ptp: # add only one unified rule without dst_mac
self.logger.debug("Convert flow rules to NON mac dst_address " + str(flow_list) )
flow_list[0].pop('dst_mac')
flow_list[0]["priority"] -= 5
new_flows2.append(flow_list[0])
else: # add all the rules
new_flows2 += flow_list
return 0, new_flows2
|
{
"content_hash": "eb86b8593baeb0589575f08f5abc680d",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 155,
"avg_line_length": 45.90217391304348,
"alnum_prop": 0.495264030310206,
"repo_name": "nfvlabs/openmano",
"id": "a002b894dd583eee74ad67103f43995fc4022aa4",
"size": "26168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openvim/openflow_thread.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10986"
},
{
"name": "Groff",
"bytes": "2335"
},
{
"name": "HTML",
"bytes": "523"
},
{
"name": "JavaScript",
"bytes": "308315"
},
{
"name": "PHP",
"bytes": "72261"
},
{
"name": "PLpgSQL",
"bytes": "44029"
},
{
"name": "Python",
"bytes": "1135240"
},
{
"name": "Shell",
"bytes": "256275"
}
],
"symlink_target": ""
}
|
"""Python bindings for Mesos."""
from __future__ import print_function
import sys
__all__ = (
'Executor',
'ExecutorDriver'
'Scheduler',
'SchedulerDriver',
)
class Scheduler(object):
"""
Base class for Mesos schedulers. Users' schedulers should extend this
class to get default implementations of methods they don't override.
"""
def registered(self, driver, frameworkId, masterInfo):
"""
Invoked when the scheduler successfully registers with a Mesos master.
It is called with the frameworkId, a unique ID generated by the
master, and the masterInfo which is information about the master
itself.
"""
def reregistered(self, driver, masterInfo):
"""
Invoked when the scheduler re-registers with a newly elected Mesos
master. This is only called when the scheduler has previously been
registered. masterInfo contains information about the newly elected
master.
"""
def disconnected(self, driver):
"""
Invoked when the scheduler becomes disconnected from the master, e.g.
the master fails and another is taking over.
"""
def resourceOffers(self, driver, offers):
"""
Invoked when resources have been offered to this framework. A single
offer will only contain resources from a single slave. Resources
associated with an offer will not be re-offered to _this_ framework
until either (a) this framework has rejected those resources (see
SchedulerDriver.launchTasks) or (b) those resources have been
rescinded (see Scheduler.offerRescinded). Note that resources may be
concurrently offered to more than one framework at a time (depending
on the allocator being used). In that case, the first framework to
launch tasks using those resources will be able to use them while the
other frameworks will have those resources rescinded (or if a
framework has already launched tasks with those resources then those
tasks will fail with a TASK_LOST status and a message saying as much).
"""
def offerRescinded(self, driver, offerId):
"""
Invoked when an offer is no longer valid (e.g., the slave was lost or
another framework used resources in the offer.) If for whatever reason
an offer is never rescinded (e.g., dropped message, failing over
framework, etc.), a framwork that attempts to launch tasks using an
invalid offer will receive TASK_LOST status updats for those tasks
(see Scheduler.resourceOffers).
"""
def statusUpdate(self, driver, status):
"""
Invoked when the status of a task has changed (e.g., a slave is lost
and so the task is lost, a task finishes and an executor sends a
status update saying so, etc.) Note that returning from this callback
acknowledges receipt of this status update. If for whatever reason
the scheduler aborts during this callback (or the process exits)
another status update will be delivered. Note, however, that this is
currently not true if the slave sending the status update is lost or
fails during that time.
"""
def frameworkMessage(self, driver, executorId, slaveId, message):
"""
Invoked when an executor sends a message. These messages are best
effort; do not expect a framework message to be retransmitted in any
reliable fashion.
"""
def slaveLost(self, driver, slaveId):
"""
Invoked when a slave has been determined unreachable (e.g., machine
failure, network partition.) Most frameworks will need to reschedule
any tasks launched on this slave on a new slave.
"""
def executorLost(self, driver, executorId, slaveId, status):
"""
Invoked when an executor has exited/terminated. Note that any tasks
running will have TASK_LOST status updates automatically generated.
"""
def error(self, driver, message):
"""
Invoked when there is an unrecoverable error in the scheduler or
scheduler driver. The driver will be aborted BEFORE invoking this
callback.
"""
print("Error from Mesos: %s " % message, file=sys.stderr)
class SchedulerDriver(object):
"""
Interface for Mesos scheduler drivers. Users may wish to implement this
class in mock objects for tests.
"""
def start(self):
"""
Starts the scheduler driver. This needs to be called before any other
driver calls are made.
"""
def stop(self, failover=False):
"""
Stops the scheduler driver. If the 'failover' flag is set to False
then it is expected that this framework will never reconnect to Mesos
and all of its executors and tasks can be terminated. Otherwise, all
executors and tasks will remain running (for some framework specific
failover timeout) allowing the scheduler to reconnect (possibly in the
same process, or from a different process, for example, on a different
machine.)
"""
def abort(self):
"""
Aborts the driver so that no more callbacks can be made to the
scheduler. The semantics of abort and stop have deliberately been
separated so that code can detect an aborted driver (i.e., via the
return status of SchedulerDriver.join), and instantiate and start
another driver if desired (from within the same process.)
"""
def join(self):
"""
Waits for the driver to be stopped or aborted, possibly blocking the
current thread indefinitely. The return status of this function can
be used to determine if the driver was aborted (see mesos.proto for a
description of Status).
"""
def run(self):
"""
Starts and immediately joins (i.e., blocks on) the driver.
"""
def requestResources(self, requests):
"""
Requests resources from Mesos (see mesos.proto for a description of
Request and how, for example, to request resources from specific
slaves.) Any resources available are offered to the framework via
Scheduler.resourceOffers callback, asynchronously.
"""
def launchTasks(self, offerIds, tasks, filters=None):
"""
Launches the given set of tasks. Any resources remaining (i.e., not
used by the tasks or their executors) will be considered declined.
The specified filters are applied on all unused resources (see
mesos.proto for a description of Filters.) Invoking this function with
an empty collection of tasks declines the offers in entirety (see
Scheduler.declineOffer). Note that passing a single offer is also
supported.
"""
def killTask(self, taskId):
"""
Kills the specified task. Note that attempting to kill a task is
currently not reliable. If, for example, a scheduler fails over while
it was attempting to kill a task it will need to retry in the future.
Likewise, if unregistered / disconnected, the request will be dropped
dropped (these semantics may be changed in the future).
"""
def declineOffer(self, offerId, filters=None):
"""
Declines an offer in its entirety and applies the specified
filters on the resources (see mesos.proto for a description of
Filters). Note that this can be done at any time, it is not
necessary to do this within the Scheduler::resourceOffers
callback.
"""
def reviveOffers(self):
"""
Removes all filters previously set by the framework (via
launchTasks()). This enables the framework to receive offers from
those filtered slaves.
"""
def sendFrameworkMessage(self, executorId, slaveId, data):
"""
Sends a message from the framework to one of its executors. These
messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
# TODO(bmahler): Add reconcileTasks!
class Executor(object):
"""
Base class for Mesos executors. Users' executors should extend this
class to get default implementations of methods they don't override.
"""
def registered(self, driver, executorInfo, frameworkInfo, slaveInfo):
"""
Invoked once the executor driver has been able to successfully connect
with Mesos. In particular, a scheduler can pass some data to its
executors through the FrameworkInfo.ExecutorInfo's data field.
"""
def reregistered(self, driver, slaveInfo):
"""
Invoked when the executor re-registers with a restarted slave.
"""
def disconnected(self, driver):
"""
Invoked when the executor becomes "disconnected" from the slave (e.g.,
the slave is being restarted due to an upgrade).
"""
def launchTask(self, driver, task):
"""
Invoked when a task has been launched on this executor (initiated via
Scheduler.launchTasks). Note that this task can be realized with a
thread, a process, or some simple computation, however, no other
callbacks will be invoked on this executor until this callback has
returned.
"""
def killTask(self, driver, taskId):
"""
Invoked when a task running within this executor has been killed (via
SchedulerDriver.killTask). Note that no status update will be sent on
behalf of the executor, the executor is responsible for creating a new
TaskStatus (i.e., with TASK_KILLED) and invoking ExecutorDriver's
sendStatusUpdate.
"""
def frameworkMessage(self, driver, message):
"""
Invoked when a framework message has arrived for this executor. These
messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
def shutdown(self, driver):
"""
Invoked when the executor should terminate all of its currently
running tasks. Note that after Mesos has determined that an executor
has terminated any tasks that the executor did not send terminal
status updates for (e.g., TASK_KILLED, TASK_FINISHED, TASK_FAILED,
etc) a TASK_LOST status update will be created.
"""
def error(self, driver, message):
"""
Invoked when a fatal error has occured with the executor and/or
executor driver. The driver will be aborted BEFORE invoking this
callback.
"""
print("Error from Mesos: %s" % message, file=sys.stderr)
class ExecutorDriver(object):
"""
Interface for Mesos executor drivers. Users may wish to extend this
class in mock objects for tests.
"""
def start(self):
"""
Starts the executor driver. This needs to be called before any other
driver calls are made.
"""
def stop(self):
"""
Stops the executor driver.
"""
def abort(self):
"""
Aborts the driver so that no more callbacks can be made to the
executor. The semantics of abort and stop have deliberately been
separated so that code can detect an aborted driver (i.e., via the
return status of ExecutorDriver.join), and instantiate and start
another driver if desired (from within the same process, although this
functionality is currently not supported for executors).
"""
def join(self):
"""
Waits for the driver to be stopped or aborted, possibly blocking the
current thread indefinitely. The return status of this function can
be used to determine if the driver was aborted (see mesos.proto for a
description of Status).
"""
def run(self):
"""
Starts and immediately joins (i.e., blocks on) the driver.
"""
def sendStatusUpdate(self, status):
"""
Sends a status update to the framework scheduler, retrying as
necessary until an acknowledgement has been received or the executor
is terminated (in which case, a TASK_LOST status update will be sent).
See Scheduler.statusUpdate for more information about status update
acknowledgements.
"""
def sendFrameworkMessage(self, data):
"""
Sends a message to the framework scheduler. These messages are best
effort; do not expect a framework message to be retransmitted in any
reliable fashion.
"""
|
{
"content_hash": "be19a4d38522463670f613a148658266",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 76,
"avg_line_length": 37.27522935779817,
"alnum_prop": 0.6935761752399705,
"repo_name": "jmanero/mesos-service",
"id": "818f41b97221abaca9f15d530365937f8515287c",
"size": "13120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesos-0.20.0/src/python/interface/src/mesos/interface/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AGS Script",
"bytes": "5016"
},
{
"name": "C",
"bytes": "702382"
},
{
"name": "C++",
"bytes": "4231114"
},
{
"name": "CSS",
"bytes": "40879"
},
{
"name": "Java",
"bytes": "3691420"
},
{
"name": "JavaScript",
"bytes": "1245478"
},
{
"name": "Perl",
"bytes": "298868"
},
{
"name": "Python",
"bytes": "271683"
},
{
"name": "Ruby",
"bytes": "22808"
},
{
"name": "Shell",
"bytes": "762921"
},
{
"name": "XSLT",
"bytes": "6024"
}
],
"symlink_target": ""
}
|
import time
import logging
logger = logging.getLogger()
import os.path
import hwsim_utils
import hostapd
from utils import HwsimSkip
from wlantest import Wlantest
def check_cipher(dev, ap, cipher):
if cipher not in dev.get_capability("pairwise"):
raise HwsimSkip("Cipher %s not supported" % cipher)
params = { "ssid": "test-wpa2-psk",
"wpa_passphrase": "12345678",
"wpa": "2",
"wpa_key_mgmt": "WPA-PSK",
"rsn_pairwise": cipher }
hapd = hostapd.add_ap(ap['ifname'], params)
dev.connect("test-wpa2-psk", psk="12345678",
pairwise=cipher, group=cipher, scan_freq="2412")
hwsim_utils.test_connectivity(dev, hapd)
def check_group_mgmt_cipher(dev, ap, cipher):
wt = Wlantest()
wt.flush()
wt.add_passphrase("12345678")
if cipher not in dev.get_capability("group_mgmt"):
raise HwsimSkip("Cipher %s not supported" % cipher)
params = { "ssid": "test-wpa2-psk-pmf",
"wpa_passphrase": "12345678",
"wpa": "2",
"ieee80211w": "2",
"wpa_key_mgmt": "WPA-PSK-SHA256",
"rsn_pairwise": "CCMP",
"group_mgmt_cipher": cipher }
hapd = hostapd.add_ap(ap['ifname'], params)
dev.connect("test-wpa2-psk-pmf", psk="12345678", ieee80211w="2",
key_mgmt="WPA-PSK-SHA256",
pairwise="CCMP", group="CCMP", scan_freq="2412")
hwsim_utils.test_connectivity(dev, hapd)
hapd.request("DEAUTHENTICATE ff:ff:ff:ff:ff:ff")
dev.wait_disconnected()
if wt.get_bss_counter('valid_bip_mmie', ap['bssid']) < 1:
raise Exception("No valid BIP MMIE seen")
if wt.get_bss_counter('bip_deauth', ap['bssid']) < 1:
raise Exception("No valid BIP deauth seen")
if cipher == "AES-128-CMAC":
group_mgmt = "BIP"
else:
group_mgmt = cipher
res = wt.info_bss('group_mgmt', ap['bssid']).strip()
if res != group_mgmt:
raise Exception("Unexpected group mgmt cipher: " + res)
def test_ap_cipher_tkip(dev, apdev):
"""WPA2-PSK/TKIP connection"""
check_cipher(dev[0], apdev[0], "TKIP")
def test_ap_cipher_tkip_countermeasures_ap(dev, apdev):
"""WPA-PSK/TKIP countermeasures (detected by AP)"""
testfile = "/sys/kernel/debug/ieee80211/%s/netdev:%s/tkip_mic_test" % (dev[0].get_driver_status_field("phyname"), dev[0].ifname)
if not os.path.exists(testfile):
raise HwsimSkip("tkip_mic_test not supported in mac80211")
params = { "ssid": "tkip-countermeasures",
"wpa_passphrase": "12345678",
"wpa": "1",
"wpa_key_mgmt": "WPA-PSK",
"wpa_pairwise": "TKIP" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("tkip-countermeasures", psk="12345678",
pairwise="TKIP", group="TKIP", scan_freq="2412")
dev[0].dump_monitor()
with open(testfile, "w") as f:
f.write(apdev[0]['bssid'])
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection on first Michael MIC failure")
with open(testfile, "w") as f:
f.write("ff:ff:ff:ff:ff:ff")
ev = dev[0].wait_disconnected(timeout=10,
error="No disconnection after two Michael MIC failures")
if "reason=14" not in ev:
raise Exception("Unexpected disconnection reason: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection during TKIP countermeasures")
def test_ap_cipher_tkip_countermeasures_sta(dev, apdev):
"""WPA-PSK/TKIP countermeasures (detected by STA)"""
params = { "ssid": "tkip-countermeasures",
"wpa_passphrase": "12345678",
"wpa": "1",
"wpa_key_mgmt": "WPA-PSK",
"wpa_pairwise": "TKIP" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
testfile = "/sys/kernel/debug/ieee80211/%s/netdev:%s/tkip_mic_test" % (hapd.get_driver_status_field("phyname"), apdev[0]['ifname'])
if not os.path.exists(testfile):
raise HwsimSkip("tkip_mic_test not supported in mac80211")
dev[0].connect("tkip-countermeasures", psk="12345678",
pairwise="TKIP", group="TKIP", scan_freq="2412")
dev[0].dump_monitor()
with open(testfile, "w") as f:
f.write(dev[0].p2p_dev_addr())
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection on first Michael MIC failure")
with open(testfile, "w") as f:
f.write("ff:ff:ff:ff:ff:ff")
ev = dev[0].wait_disconnected(timeout=10,
error="No disconnection after two Michael MIC failures")
if "reason=14 locally_generated=1" not in ev:
raise Exception("Unexpected disconnection reason: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection during TKIP countermeasures")
def test_ap_cipher_ccmp(dev, apdev):
"""WPA2-PSK/CCMP connection"""
check_cipher(dev[0], apdev[0], "CCMP")
def test_ap_cipher_gcmp(dev, apdev):
"""WPA2-PSK/GCMP connection"""
check_cipher(dev[0], apdev[0], "GCMP")
def test_ap_cipher_ccmp_256(dev, apdev):
"""WPA2-PSK/CCMP-256 connection"""
check_cipher(dev[0], apdev[0], "CCMP-256")
def test_ap_cipher_gcmp_256(dev, apdev):
"""WPA2-PSK/GCMP-256 connection"""
check_cipher(dev[0], apdev[0], "GCMP-256")
def test_ap_cipher_mixed_wpa_wpa2(dev, apdev):
"""WPA2-PSK/CCMP/ and WPA-PSK/TKIP mixed configuration"""
ssid = "test-wpa-wpa2-psk"
passphrase = "12345678"
params = { "ssid": ssid,
"wpa_passphrase": passphrase,
"wpa": "3",
"wpa_key_mgmt": "WPA-PSK",
"rsn_pairwise": "CCMP",
"wpa_pairwise": "TKIP" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect(ssid, psk=passphrase, proto="WPA2",
pairwise="CCMP", group="TKIP", scan_freq="2412")
status = dev[0].get_status()
if status['key_mgmt'] != 'WPA2-PSK':
raise Exception("Incorrect key_mgmt reported")
if status['pairwise_cipher'] != 'CCMP':
raise Exception("Incorrect pairwise_cipher reported")
if status['group_cipher'] != 'TKIP':
raise Exception("Incorrect group_cipher reported")
bss = dev[0].get_bss(apdev[0]['bssid'])
if bss['ssid'] != ssid:
raise Exception("Unexpected SSID in the BSS entry")
if "[WPA-PSK-TKIP]" not in bss['flags']:
raise Exception("Missing BSS flag WPA-PSK-TKIP")
if "[WPA2-PSK-CCMP]" not in bss['flags']:
raise Exception("Missing BSS flag WPA2-PSK-CCMP")
hwsim_utils.test_connectivity(dev[0], hapd)
dev[1].connect(ssid, psk=passphrase, proto="WPA",
pairwise="TKIP", group="TKIP", scan_freq="2412")
status = dev[1].get_status()
if status['key_mgmt'] != 'WPA-PSK':
raise Exception("Incorrect key_mgmt reported")
if status['pairwise_cipher'] != 'TKIP':
raise Exception("Incorrect pairwise_cipher reported")
if status['group_cipher'] != 'TKIP':
raise Exception("Incorrect group_cipher reported")
hwsim_utils.test_connectivity(dev[1], hapd)
hwsim_utils.test_connectivity(dev[0], dev[1])
def test_ap_cipher_bip(dev, apdev):
"""WPA2-PSK with BIP"""
check_group_mgmt_cipher(dev[0], apdev[0], "AES-128-CMAC")
def test_ap_cipher_bip_gmac_128(dev, apdev):
"""WPA2-PSK with BIP-GMAC-128"""
check_group_mgmt_cipher(dev[0], apdev[0], "BIP-GMAC-128")
def test_ap_cipher_bip_gmac_256(dev, apdev):
"""WPA2-PSK with BIP-GMAC-256"""
check_group_mgmt_cipher(dev[0], apdev[0], "BIP-GMAC-256")
def test_ap_cipher_bip_cmac_256(dev, apdev):
"""WPA2-PSK with BIP-CMAC-256"""
check_group_mgmt_cipher(dev[0], apdev[0], "BIP-CMAC-256")
|
{
"content_hash": "6f083f3f24fbecf4f20ca7505873827d",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 135,
"avg_line_length": 40.36180904522613,
"alnum_prop": 0.6078187250996016,
"repo_name": "wangybgit/Chameleon",
"id": "a1ddcaddb9111ccbf050d62f3ef6843751802aeb",
"size": "8204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hostapd-OpenWrt/tests/hwsim/test_ap_ciphers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "16438785"
},
{
"name": "C++",
"bytes": "156647"
},
{
"name": "Gnuplot",
"bytes": "254"
},
{
"name": "Java",
"bytes": "42014"
},
{
"name": "M4",
"bytes": "4564"
},
{
"name": "Makefile",
"bytes": "128574"
},
{
"name": "Objective-C",
"bytes": "5563"
},
{
"name": "PHP",
"bytes": "26703"
},
{
"name": "Python",
"bytes": "2121905"
},
{
"name": "QMake",
"bytes": "1487"
},
{
"name": "Shell",
"bytes": "87205"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import zlib
import ctypes
import os.path
import sqlite3
try:
import pefile
except ImportError, err:
print "Error while importing pefile module: %s" % str(err)
print "Please make sure it is installed: http://code.google.com/p/pefile/"
sys.exit(1)
#This is a list of interesting dll's to use if not traversing a directory
INTERESTING_DLLS = [
'kernel32.dll', 'comctl32.dll', 'advapi32.dll', 'comdlg32.dll',
'gdi32.dll', 'msvcrt.dll', 'netapi32.dll', 'ntdll.dll',
'ntoskrnl.exe', 'oleaut32.dll', 'psapi.dll', 'shell32.dll',
'shlwapi.dll', 'srsvc.dll', 'urlmon.dll', 'user32.dll',
'winhttp.dll', 'wininet.dll', 'ws2_32.dll', 'wship6.dll',
'advpack.dll',
]
VERBOSE=False
############################################################
# SQL queries
############################################################
sql_testTableExists='''
SELECT name
FROM sqlite_master
WHERE name=?;
'''
sql_create_tables='''
create table symbol_hashes (
hash_key integer primary key,
hash_val integer,
hash_type integer,
lib_key integer,
symbol_name varchar(256)
);
create table source_libs (
lib_key integer primary key,
lib_name varchar(256)
);
create table hash_types (
hash_type integer primary key,
hash_size integer,
hash_name varchar(256),
hash_code text
);
--Index just the hash vals for when we don't know the hash type
create index idx_hash_val on symbol_hashes (hash_val);
--Index with hash_type prefix for when we know the type we're
-- looking for
create index idx_hash_type_hash_val on symbol_hashes (hash_type, hash_val);
'''
sql_add_hash_type='''
insert into hash_types (
hash_size,
hash_name,
hash_code
) values (?,?,?);
'''
sql_get_hash_type='''
select
hash_type
from hash_types
where hash_name=?;
'''
sql_get_hash_type_hash_size='''
select
hash_type
from hash_types
where hash_name=? and hash_size=?;
'''
sql_add_source_lib='''
insert into source_libs (
lib_name
) values (?);
'''
sql_add_symbol_hash='''
insert into symbol_hashes (
hash_val,
hash_type,
lib_key,
symbol_name
) values (?,?,?,?);
'''
sql_lookup_hash_value='''
select
hash_key,
hash_val,
hash_type,
source_lib,
symbol_name
from symbol_hashes
where hash_val=?;
'''
sql_lookup_hash_value_hash_type='''
select
hash_key,
hash_val,
hash_type,
source_lib,
symbol_name
from symbol_hashes
where hash_val=? and hash_type=?;
'''
sql_find_source_lib_by_name='''
select
lib_key
from source_libs
where lib_name=?;
'''
sql_find_symbol_hash_type_lib_symbol='''
select
hash_key
from symbol_hashes
where hash_val=? and hash_type=? and lib_key=? and symbol_name=?;
'''
############################################################
# Start of functions to implement operator primitives
############################################################
ROTATE_BITMASK = {
8 : 0xff,
16 : 0xffff,
32 : 0xffffffff,
64 : 0xffffffffffffffff,
}
def rcr(inVal, numShifts, cb, dataSize=32):
'''rotate carry right instruction emulation'''
if numShifts == 0:
return inVal
if (numShifts < 0) or (numShifts > dataSize):
raise ValueError('Bad numShifts')
#make sure carry in bit is only 0 or 1
cb = cb & 1
if (dataSize != 8) and (dataSize != 16) and (dataSize != 32) and (dataSize != 64):
raise ValueError('Bad dataSize')
#or the carry value in there
bitMask = ROTATE_BITMASK[dataSize]
inVal = inVal | (cb << dataSize)
x = (dataSize - numShifts) + 1
res = (inVal >> numShifts) | (inVal << x)
return (bitMask & res, 1 & (res >> dataSize))
def ror(inVal, numShifts, dataSize=32):
'''rotate right instruction emulation'''
if numShifts == 0:
return inVal
if (numShifts < 0) or (numShifts > dataSize):
raise ValueError('Bad numShifts')
if (dataSize != 8) and (dataSize != 16) and (dataSize != 32) and (dataSize != 64):
raise ValueError('Bad dataSize')
bitMask = ROTATE_BITMASK[dataSize]
return bitMask & ((inVal >> numShifts) | (inVal << (dataSize-numShifts)))
def rol(inVal, numShifts, dataSize=32):
'''rotate left instruction emulation'''
if numShifts == 0:
return inVal
if (numShifts < 0) or (numShifts > dataSize):
raise ValueError('Bad numShifts')
if (dataSize != 8) and (dataSize != 16) and (dataSize != 32) and (dataSize != 64):
raise ValueError('Bad dataSize')
bitMask = ROTATE_BITMASK[dataSize]
currVal = inVal
return bitMask & ((inVal << numShifts) | (inVal >> (dataSize-numShifts)))
############################################################
# Start of hash implementations
############################################################
def poisonIvyHash(inStr,fName):
#need a null at the end of the string
if inStr[-1] != '\x00':
inStr = inStr + '\x00'
cx = 0xffff
dx = 0xffff
for b1 in inStr:
bx = 0
ax = ord(b1) ^ (cx & 0xff)
cx = ((cx>>8)&0xff) | ((dx&0xff)<<8)
dx = ((dx>>8)&0xff) | 0x800
while (dx & 0xff00) != 0:
c_in = bx & 1
bx = bx >> 1
ax, c_out = rcr(ax, 1, c_in, 16)
if c_out != 0:
ax = ax ^ 0x8320
bx = bx ^ 0xedb8
dx = (dx&0xff) | (((((dx>>8)&0xff)-1)&0xff)<<8)
cx = cx ^ ax
dx = dx ^ bx
dx = 0xffff & ~dx
cx = 0xffff & ~cx
return 0xffffffff & ((dx<<16) | cx)
pseudocode_poisonIvyHash = '''Too hard to explain.\nString hash function from POISON IVY RAT.\nSee code for information'''
def rol3XorEax(inString,fName):
if inString is None:
return 0
ecx = 0
eax = 0
for i in inString:
eax = eax | ord(i)
ecx = ecx ^ eax
ecx = rol(ecx, 0x3, 32)
ecx += 1
eax = 0xffffffff & (eax << 8)
return ecx
pseudocode_rol3XorEax = '''eax := 0;
ecx := 0;
for c in input_string {
eax := eax | c ;
ecx := ecx ^ eax;
ecx := ROL(ecx, 0x3);
ecx : ecx + 1;
eax := 0xffffffff & (eax << 8);
};
return ecx;
'''
def rol7AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x7, 32)
val += ord(i)
return val
pseudocode_rol7AddHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 7):
acc := acc + c;
}
'''
def rol5AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x5, 32)
val += ord(i)
return val
pseudocode_rol5AddHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 5):
acc := acc + c;
}
'''
def ror7AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0x7, 32)
val += ord(i)
return val
pseudocode_ror7AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 7):
acc := acc + c;
}
'''
def ror9AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0x9, 32)
val += ord(i)
return val
pseudocode_ror9AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 9);
acc := acc + c;
}
'''
def ror11AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0xb, 32)
val += ord(i)
return val
pseudocode_ror11AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 11);
acc := acc + c;
}
'''
def ror13AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0xd, 32)
val += ord(i)
return val
pseudocode_ror13AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 13);
acc := acc + c;
}
'''
def ror13AddWithNullHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString + "\x00":
val = ror(val, 0xd, 32)
val += ord(i)
return val
pseudocode_ror13AddWithNullHash32 = '''acc := 0;
for c in input_string_with_trailing_NULL {
acc := ROR(acc, 13);
acc := acc + c;
}
'''
def ror13AddHash32Sub1(inString,fName):
'''Same as ror13AddHash32, but subtract 1 afterwards'''
return ror13AddHash32(inString,fName) - 1
pseudocode_ror13AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 13);
acc := acc + c;
}
acc := acc - 1;
'''
def shl7shr19Hash32(inString,fName):
val = 0
for i in inString:
edx = 0xffffffff & (val << 7)
ecx = 0xffffffff & (val >> 0x19)
eax = edx | ecx
t = 0xff & (ord(i) ^ 0xf4)
val = eax ^ t
return val
pseudocode_shl7shr19Hash32 = '''acc := 0;
for c in input_string {
t0 = (acc << 7);
t1 = (acc >> 0x19);
t2 = t0 | t1;
acc = t2 ^ c ^ 0xf4;
}
'''
def sll1AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
b = ord(i)
b = 0xff & (b | 0x60)
val = val + b
val = val << 1
val = 0xffffffff & val
return val
pseudocode_sll1AddHash32 = '''acc := 0;
for c in input_string {
acc = acc + (c | 0x60);
acc = acc << 1;
}
'''
def playWith0xedb88320Hash(inString,fName):
esi = 0xFFFFFFFF
for d in inString:
c = ord(d)
for i in range(8):
eax = c
eax ^= esi
b0 = eax & 0xFF
b0 &= 0x01
b0 = -b0
if b0 % 2 == 0: # sbb eax, eax
eax = 0
else:
eax = 0xFFFFFFFF
eax &= 0xedb88320
esi >>= 1
esi ^= eax
c >>= 1
return esi ^ 0xFFFFFFFF
pseudocode_playWith0xedb88320Hash = \
'''Too hard to explain, AND's with 0xedb88320, though.
String hash function from Gatak sample.
See code for information'''
def crc32(inString,fName):
return 0xffffffff & (zlib.crc32(inString))
def ror13AddHash32AddDll(inString,fName):
dllHash = 0
for c in fName:
dllHash = ror(dllHash, 0xd, 32)
if ord(c) < 97:
dllHash = int(dllHash) + ord(c)
else:
dllHash = int(dllHash) + ord(c) - 32
dllHash = ror(dllHash, 0xd, 32)
dllHash = ror(dllHash, 0xd, 32)
dllHash = ror(dllHash, 0xd, 32)
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0xd, 32)
val += ord(i)
val = ror(val, 0xd, 32)
val += dllHash
if val >= 4294967296:
val -= 4294967296
return val
pseudocode_ror13AddHash32AddDll = '''acc := 0;
for c in input_string {
acc := ROR(acc, 13);
acc := acc + c;
}
acc := acc + ror13add(DllName);
'''
def mult21AddHash32(inString,fName):
acc = 0
for i in inString:
acc = 0xffffffff & (acc * 0x21)
acc = 0xffffffff & (acc + ord(i))
return acc
pseudocode_hashMult21 = '''acc := 0;
for c in input_string {
acc := acc * 0x21;
acc := acc + c;
}
'''
def add1505Shl5Hash32(inString,fName):
val = 0x1505
for ch in inString:
val += (val << 5)
val &= 0xFFFFFFFF
val += ord(ch)
val &= 0xFFFFFFFF
return val
pseudocode_add1505Shl5Hash32 = '''val := 0x1505;
for c in input_string {
val := val + (val << 5);
val := val + c;
}
'''
# The list of tuples of (supported hash name, hash size, pseudo_code)
HASH_TYPES = [
('ror13AddHash32', 32, pseudocode_ror13AddHash32),
('ror13AddWithNullHash32', 32, pseudocode_ror13AddWithNullHash32),
('ror13AddHash32AddDll', 32, pseudocode_ror13AddHash32AddDll),
('poisonIvyHash', 32, pseudocode_poisonIvyHash),
('rol7AddHash32', 32, pseudocode_rol7AddHash32),
('rol5AddHash32', 32, pseudocode_rol5AddHash32),
('rol3XorEax', 32, pseudocode_rol3XorEax),
('ror7AddHash32', 32, pseudocode_ror7AddHash32),
('ror9AddHash32', 32, pseudocode_ror9AddHash32),
('ror11AddHash32', 32, pseudocode_ror11AddHash32),
('ror13AddHash32Sub1', 32, pseudocode_ror13AddHash32),
('shl7shr19Hash32', 32, pseudocode_shl7shr19Hash32),
('sll1AddHash32', 32, pseudocode_sll1AddHash32),
('playWith0xedb88320Hash', 32, pseudocode_playWith0xedb88320Hash),
('crc32', 32, 'Standard crc32'),
('mult21AddHash32', 32, pseudocode_hashMult21),
('add1505Shl5Hash32', 32, pseudocode_add1505Shl5Hash32),
]
############################################################
# Database creator
############################################################
class ShellcodeDbCreator(object):
def __init__(self, dbPath, dirName):
self.dbPath = dbPath
self.dirName = dirName
self.conn = sqlite3.connect(dbPath)
self.initDb()
self.initHashesDict()
def close(self):
self.conn.close()
self.conn = None
def run(self):
#process all the files in the given directory
self.processDir(self.dirName)
def initDb(self):
#check for tables, create if not present
if not self.checkForTable('symbol_hashes'):
cur = self.conn.executescript(sql_create_tables)
self.conn.commit()
#add the known hashtypes
for hashName, hashSize, hashCode in HASH_TYPES:
self.addHashType(hashName, hashSize, hashCode)
def initHashesDict(self):
#The hashes dict will store tuple (hashtype_key, dyn method),
# indexed by name. used to iterate over when processing export names.
self.hashes = {}
for hashName, hashSize, hashCode in HASH_TYPES:
try:
meth = globals()[hashName]
hashType = self.getHashTypeByName(hashName)
self.hashes[hashName] = (hashType, meth)
except AttributeError, err:
print "Could not find method %s" % hashName
def processDir(self, dirName):
for fName in os.listdir(dirName):
filePath = os.path.join(dirName, fName)
if not os.path.isfile(filePath):
#print "Could not find file: %s. Skipping" % fName
continue
try:
peFile = pefile.PE(filePath)
if ((not hasattr(peFile, "DIRECTORY_ENTRY_EXPORT")) or (peFile.DIRECTORY_ENTRY_EXPORT is None)):
if VERBOSE:
print "No exports: %s" % filePath
else:
#add the library to the lib table
print "Processing file %s" % filePath
time1 = time.time()
libKey = self.addSourceLib(fName)
symCount = 0
for sym in peFile.DIRECTORY_ENTRY_EXPORT.symbols:
if sym.name is not None:
symCount += 1
for hashName in self.hashes.keys():
hashType, hashMeth = self.hashes[hashName]
#print "Trying to hash: %s:%s" % (hashName, sym.name)
symHash = hashMeth(sym.name,fName)
#print " Done hashing: %08x:%s" % (symHash, sym.name)
if symHash is not None:
self.addSymbolHash(symHash, hashType, libKey, sym.name)
#commit outstanding transaction
self.conn.commit()
time2 = time.time()
timeDiff = time2 - time1
print "Processed %d export symbols in %.02f seconds: %s" % (symCount, timeDiff, filePath)
except pefile.PEFormatError, err:
if VERBOSE:
print "Skipping non-PE file %s: %s" % (filePath, str(err))
except Exception, err:
if VERBOSE:
print "Skipping %s: %s" % (filePath, str(err))
raise
def addHashType(self, hashName, hashSize, code):
#check if the hashname already exists
cur = self.conn.execute(sql_get_hash_type_hash_size, (hashName, hashSize))
retList = cur.fetchall()
if len(retList) > 0:
return
cur = self.conn.execute(sql_add_hash_type, (hashSize, hashName, code))
self.conn.commit()
if cur is None:
raise RuntimeError("Cursor is None following hash type insert")
if cur.lastrowid is None:
raise RuntimeError("lastrowid is None following hash type insert")
return cur.lastrowid
def getHashTypeByName(self, hashName):
'''
Returns None if the hashName is not found, else returns
the integer hash type key for the requested hash
'''
cur = self.conn.execute(sql_get_hash_type, (hashName, ))
retList = cur.fetchall()
if len(retList) == 0:
return None
elif len(retList) > 1:
print "ERROR: database in odd state. Multiple entries for hash name: %s" % hashName
#always return first entry, even on error
return retList[0][0]
def getSourceLibByName(self, libName):
'''
Returns None if the libName is not found, else returns
the integer key for the requested souce lib.
'''
cur = self.conn.execute(sql_find_source_lib_by_name, (libName, ))
retList = cur.fetchall()
if len(retList) == 0:
return None
elif len(retList) > 1:
print "ERROR: database in odd state. Multiple entries for source lib: %s" % libName
#always return first entry, even on error
return retList[0][0]
def addSourceLib(self, libName):
'''
Adds the given source lib to the db (if not already present) & returns the lib key.
'''
#lookup the library, insert if it doesn't exist
libKey = self.getSourceLibByName(libName)
if libKey is None:
cur = self.conn.execute(sql_add_source_lib, (libName, ))
self.conn.commit()
if cur is None:
raise RuntimeError("Cursor is None following source lib insert")
if cur.lastrowid is None:
raise RuntimeError("lastrowid is None following source lib insert")
return cur.lastrowid
else:
return libKey
def addSymbolHash(self, hashVal, hashType, libKey, symbolName):
'''Note: requires explicit commit afterwards by caller'''
#determine if tuple (hashVal, hashType, libKey, symbolName) already exists or not
#print "Trying to add symbol: %s %s, %s %s, %s %s, %s %s" % (
# type(hashVal), str(hashVal),
# type(hashType), str(hashType),
# type(libKey), str(libKey),
# type(symbolName), str(symbolName))
cur = self.conn.execute(sql_find_symbol_hash_type_lib_symbol,
(hashVal, hashType, libKey, symbolName)
)
retList = cur.fetchall()
if len(retList) == 0:
#insert it now
cur = self.conn.execute(sql_add_symbol_hash,
(hashVal, hashType, libKey, symbolName)
)
if cur is None:
raise RuntimeError("Cursor is None following symbol hash insert")
if cur.lastrowid is None:
raise RuntimeError("lastrowid is None following symbol hash insert")
return cur.lastrowid
else:
#print "Skipping duplicate hash: %08x %08x %08x %s" % (hashVal, hashType, libKey, symbolName)
pass
def checkForTable(self, tableName):
'''
Returns True if the given table name already exists, else returns False.
'''
cur = self.conn.execute(sql_testTableExists, (tableName,))
row = cur.fetchone()
if row is None:
#raise UnpreparedDatabaseException("Missing database table: %s" % tableName)
return False
return True
if __name__ == '__main__':
if len(sys.argv) != 3:
print "python %s <db_path> <dll_dir>" % sys.argv[0]
sys.exit(1)
dbPath = sys.argv[1]
walkPath = sys.argv[2]
hasher = ShellcodeDbCreator(dbPath, walkPath)
hasher.run()
hasher.close()
print "Done with symbol name hashing"
|
{
"content_hash": "a4bf578568a6cbedc6a4f7d4d8a707ab",
"timestamp": "",
"source": "github",
"line_count": 713,
"max_line_length": 122,
"avg_line_length": 28.82608695652174,
"alnum_prop": 0.5552960638349632,
"repo_name": "meilinxiaoxue/flare-ida",
"id": "e748bf41c08c2f63bc08ca20817fe3670ecc9ace",
"size": "21583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shellcode_hashes/make_sc_hash_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "205802"
}
],
"symlink_target": ""
}
|
from .settings import *
DEBUG = os.environ.get('DEBUG', False)
EMBEDLY_KEY = os.environ.get('EMBEDLY_KEY')
SECRET_KEY = os.environ.get('SECRET_KEY')
# Amazon credentials
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'moscowdjango-staging'
AWS_QUERYSTRING_AUTH = False
AWS_CALLING_FORMAT = 2 # SUBDOMAIN
AWS_S3_SECURE_URLS = True
# Media & static
DEFAULT_FILE_STORAGE = 'moscowdjango.amazon.DefaultStorage'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
DEFAULT_S3_PATH = "media"
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = 'https://%s.s3.amazonaws.com/media/' % AWS_STORAGE_BUCKET_NAME
# Django compressor
COMPRESS_ENABLED = False
|
{
"content_hash": "57a98fe40da117f1c4b8be13ccc2934a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 32.041666666666664,
"alnum_prop": 0.7464239271781534,
"repo_name": "VladimirFilonov/moscowdjango",
"id": "4249b99ba1671553a7e98c423696b48c5152b056",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moscowdjango/settings_staging.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84132"
},
{
"name": "HTML",
"bytes": "59019"
},
{
"name": "JavaScript",
"bytes": "43423"
},
{
"name": "Python",
"bytes": "71927"
}
],
"symlink_target": ""
}
|
"""
Functionality for different replica types in the cloud storage system.
"""
##########################################################################
## Imports
##########################################################################
from .base import *
from .store import *
from .access import *
from .consensus import RaftReplica
from .consensus import TagReplica
from .consensus import FloatedRaftReplica
from .consensus import TieredRaftReplica
from .eventual import EventualReplica
from .federated import FederatedRaftReplica
from .federated import FederatedEventualReplica
from .federated import StentorEventualReplica
from cloudscope.config import settings
from cloudscope.exceptions import ImproperlyConfigured
##########################################################################
## Type Factory
##########################################################################
ReplicaTypes = {
'default': {
Consistency.STRONG: RaftReplica,
Consistency.EVENTUAL: EventualReplica,
Consistency.STENTOR: StentorEventualReplica,
Consistency.TAG: TagReplica,
Consistency.RAFT: RaftReplica,
},
'tiered': {
Consistency.STRONG: TieredRaftReplica,
},
'floated': {
Consistency.STRONG: FloatedRaftReplica,
},
'federated': {
Consistency.STRONG: FederatedRaftReplica,
Consistency.EVENTUAL: FederatedEventualReplica,
Consistency.STENTOR: StentorEventualReplica,
Consistency.RAFT: FederatedRaftReplica,
},
}
def replica_factory(simulation, **kwargs):
"""
Factory to create a replica with the correct type, based on consistency.
"""
# Determine the consistency level of the simulation
consistency = Consistency.get(kwargs.get(
'consistency', settings.simulation.default_consistency
))
# Determine the integration level of the simulation
integration = settings.simulation.integration
if integration not in ReplicaTypes:
raise ImproperlyConfigured(
'Integration "{}" not recognized, use one of {}'.format(
integration, ", ".join(ReplicaTypes.keys())
)
)
# Check that the desired consistenty matches the integration level
# If not fall back to the default replica type with a warning
if consistency not in ReplicaTypes[integration]:
simulation.logger.warn(
'Consistency level "{}" not implemented in {}'.format(
consistency, integration
)
)
integration = 'default'
# Return a replica with the given consistency level for the integration.
return ReplicaTypes[integration][consistency](simulation, **kwargs)
|
{
"content_hash": "ccaca74eca266f630a4eafbd4ce111db",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 32.734939759036145,
"alnum_prop": 0.6231137283768863,
"repo_name": "bbengfort/cloudscope",
"id": "b25325ac4d9ff46e8b997dc9f5367a96a6dd831b",
"size": "3045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudscope/replica/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2096"
},
{
"name": "HTML",
"bytes": "14259"
},
{
"name": "JavaScript",
"bytes": "30593"
},
{
"name": "Jupyter Notebook",
"bytes": "22404160"
},
{
"name": "Makefile",
"bytes": "832"
},
{
"name": "Python",
"bytes": "757413"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from .base import Operation
class SeparateDatabaseAndState(Operation):
"""
Takes two lists of operations - ones that will be used for the database,
and ones that will be used for the state change. This allows operations
that don't support state change to have it applied, or have operations
that affect the state or not the database, or so on.
"""
def __init__(self, database_operations=None, state_operations=None):
self.database_operations = database_operations or []
self.state_operations = state_operations or []
def deconstruct(self):
kwargs = {}
if self.database_operations:
kwargs['database_operations'] = self.database_operations
if self.state_operations:
kwargs['state_operations'] = self.state_operations
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
for database_operation in self.database_operations:
to_state = from_state.clone()
database_operation.state_forwards(app_label, to_state)
database_operation.database_forwards(app_label, schema_editor, from_state, to_state)
from_state = to_state
def database_backwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
base_state = to_state
for pos, database_operation in enumerate(reversed(self.database_operations)):
to_state = base_state.clone()
for dbop in self.database_operations[:-(pos + 1)]:
dbop.state_forwards(app_label, to_state)
from_state = base_state.clone()
database_operation.state_forwards(app_label, from_state)
database_operation.database_backwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Custom state/database change combination"
class RunSQL(Operation):
"""
Runs some raw SQL. A reverse SQL statement may be provided.
Also accepts a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ''
def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None):
self.sql = sql
self.reverse_sql = reverse_sql
self.state_operations = state_operations or []
self.hints = hints or {}
def deconstruct(self):
kwargs = {
'sql': self.sql,
}
if self.reverse_sql is not None:
kwargs['reverse_sql'] = self.reverse_sql
if self.state_operations:
kwargs['state_operations'] = self.state_operations
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_sql is not None
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if self.allowed_to_migrate(schema_editor.connection.alias, None, hints=self.hints):
self._run_sql(schema_editor, self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_sql is None:
raise NotImplementedError("You cannot reverse this operation")
if self.allowed_to_migrate(schema_editor.connection.alias, None, hints=self.hints):
self._run_sql(schema_editor, self.reverse_sql)
def describe(self):
return "Raw SQL operation"
def _run_sql(self, schema_editor, sqls):
if isinstance(sqls, (list, tuple)):
for sql in sqls:
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
schema_editor.execute(sql, params=params)
elif sqls != RunSQL.noop:
statements = schema_editor.connection.ops.prepare_sql_script(sqls)
for statement in statements:
schema_editor.execute(statement, params=None)
class RunPython(Operation):
"""
Runs Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False
def __init__(self, code, reverse_code=None, atomic=True, hints=None):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError("RunPython must be supplied with a callable")
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError("RunPython must be supplied with callable arguments")
self.reverse_code = reverse_code
self.hints = hints or {}
def deconstruct(self):
kwargs = {
'code': self.code,
}
if self.reverse_code is not None:
kwargs['reverse_code'] = self.reverse_code
if self.atomic is not True:
kwargs['atomic'] = self.atomic
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if self.allowed_to_migrate(schema_editor.connection.alias, None, hints=self.hints):
# We now execute the Python code in a context that contains a 'models'
# object, representing the versioned models as an app registry.
# We could try to override the global cache, but then people will still
# use direct imports, so we go with a documentation approach instead.
self.code(from_state.apps, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError("You cannot reverse this operation")
if self.allowed_to_migrate(schema_editor.connection.alias, None, hints=self.hints):
self.reverse_code(from_state.apps, schema_editor)
def describe(self):
return "Raw Python operation"
@staticmethod
def noop(apps, schema_editor):
return None
|
{
"content_hash": "d7aaf5f513ad6e6407e2642db94a2aa9",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 97,
"avg_line_length": 38.15625,
"alnum_prop": 0.6179361179361179,
"repo_name": "runekaagaard/django-contrib-locking",
"id": "91261f724fa93668c2a82281880e291e90375ae3",
"size": "7326",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django/db/migrations/operations/special.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53566"
},
{
"name": "JavaScript",
"bytes": "106009"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10638047"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
import asyncio
from asyncio import coroutine
from zeroservices import BaseService
from zeroservices.resources import NoActionHandler
from ..utils import test_medium, TestCase, _async_test, _create_test_resource_service
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
class _BaseCollectionTestCase(TestCase):
def setUp(self):
asyncio.set_event_loop(None)
self.loop = asyncio.new_event_loop()
self.service = _create_test_resource_service('test_service', self.loop)
self.loop.run_until_complete(self.service.start())
self.service2 = _create_test_resource_service('test_listener', self.loop)
self.loop.run_until_complete(self.service2.start())
# Resource
self.resource_id = 'UUID-1'
self.resource_data = {'field1': 1, 'field2': 2}
self.resource_name = 'test_collection'
self.event_payload = {'resource_id': self.resource_id,
'resource_name': self.resource_name}
self.maxDiff = None
def tearDown(self):
self.service.close()
self.service2.close()
self.loop.stop()
self.loop.close()
self.service.medium.check_leak()
self.service2.medium.check_leak()
def _create(self, resource_data, resource_id):
message = {'action': 'create', 'resource_id': resource_id,
'resource_data': resource_data}
yield from self.collection.on_message(**message)
@_async_test
def test_create(self):
message = {'action': 'create', 'resource_id': self.resource_id,
'resource_data': self.resource_data}
result = yield from self.collection.on_message(**message)
self.assertEqual(result, {'resource_id': self.resource_id})
expected_payload = self.event_payload.copy()
expected_payload.update({'action': 'create',
'resource_data': self.resource_data})
event_topic = '%s.create.%s' % (self.resource_name, self.resource_id)
self.service2.on_event_mock.assert_called_once_with(event_topic,
**expected_payload)
self.service2.on_event_mock.reset_mock()
@_async_test
def test_get(self):
yield from self.test_create()
message = {'action': 'get', 'resource_id': self.resource_id}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
{'resource_id': self.resource_id,
'resource_data': self.resource_data})
@_async_test
def test_update(self):
yield from self.test_create()
patch = {'field3': 3, 'field4': 4}
query = {'$set': patch}
message = {'action': 'patch', 'resource_id': self.resource_id,
'patch': query}
expected_document = self.resource_data.copy()
expected_document.update(patch)
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
expected_document)
message = {'action': 'get', 'resource_id': self.resource_id}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
{'resource_id': self.resource_id,
'resource_data': expected_document})
expected_payload = self.event_payload.copy()
expected_payload.update({'action': 'patch', 'patch': query})
event_topic = '%s.patch.%s' % (self.resource_name, self.resource_id)
self.service2.on_event_mock.assert_called_once_with(event_topic,
**expected_payload)
@_async_test
def test_delete(self):
yield from self.test_create()
message = {'action': 'delete', 'resource_id': self.resource_id}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
'OK')
message = {'action': 'get', 'resource_id': self.resource_id}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
'NOK')
expected_payload = self.event_payload.copy()
expected_payload.update({'action': 'delete'})
event_topic = '%s.delete.%s' % (self.resource_name, self.resource_id)
self.service2.on_event_mock.assert_called_once_with(event_topic,
**expected_payload)
# Add another link on same relation
@_async_test
def test_add_link(self):
yield from self.test_create()
relation = 'relation_type'
target_id = ['collection', 'target']
title = 'title'
message = {'action': 'add_link', 'resource_id': self.resource_id,
'relation': relation, 'target_id': target_id,
'title': title}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
"OK")
# Check that document is updated
expected_data = self.resource_data.copy()
expected_data.update({'_links':
{relation: [{"target_id": target_id,
"title": title}],
'latest': {target_id[0]: target_id}}})
expected_document = {'resource_id': self.resource_id,
'resource_data': expected_data}
message = {'action': 'get', 'resource_id': self.resource_id}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
expected_document)
# Check event payload
expected_payload = self.event_payload.copy()
expected_payload.update({'action': 'add_link', 'target_id': target_id,
'title': title, 'relation': relation})
event_topic = '%s.add_link.%s' % (self.resource_name, self.resource_id)
self.service2.on_event_mock.assert_called_once_with(event_topic,
**expected_payload)
# Add another link on same relation
relation = 'relation_type'
target_id2 = ['collection', 'target2']
title2 = 'title2'
message = {'action': 'add_link', 'resource_id': self.resource_id,
'relation': relation, 'target_id': target_id2,
'title': title2}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
"OK")
# Check that document is updated
expected_data = self.resource_data.copy()
expected_data.update({'_links':
{relation: [{"target_id": target_id,
"title": title},
{"target_id": target_id2,
"title": title2}],
'latest': {target_id2[0]: target_id2}}})
expected_document = {'resource_id': self.resource_id,
'resource_data': expected_data}
message = {'action': 'get', 'resource_id': self.resource_id}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
expected_document)
# Add a third link on another relation
relation2 = 'relation_type2'
target_id3 = ['foo', 'bar']
title3 = 'title3'
message = {'action': 'add_link', 'resource_id': self.resource_id,
'relation': relation2, 'target_id': target_id3,
'title': title3}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
"OK")
# Check that document is updated
expected_data = self.resource_data.copy()
expected_data.update({'_links':
{relation: [{"target_id": target_id,
"title": title},
{"target_id": target_id2,
"title": title2}],
relation2: [{"target_id": target_id3,
"title": title3}],
'latest': {target_id2[0]: target_id2,
target_id3[0]: target_id3}}})
expected_document = {'resource_id': self.resource_id,
'resource_data': expected_data}
message = {'action': 'get', 'resource_id': self.resource_id}
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
expected_document)
@_async_test
def test_list(self):
message = {'action': 'list'}
# Check that list doesn't return anything
result = yield from self.collection.on_message(**message)
self.assertEqual(result, [])
# Create a doc
yield from self.test_create()
# Check that list return the document
result = yield from self.collection.on_message(**message)
self.assertEqual(result,
[{'resource_id': self.resource_id,
'resource_data': self.resource_data}])
@_async_test
def test_list_filter(self):
doc_1 = ({'field1': 1, 'field2': 2}, 'UUID-1')
doc_2 = ({'field1': 3, 'field2': 2}, 'UUID-2')
doc_3 = ({'field1': 1, 'field2': 4}, 'UUID-3')
docs = (doc_1, doc_2, doc_3)
for doc in docs:
yield from self._create(*doc)
# All docs
message = {'action': 'list'}
expected = [{'resource_id': x[1], 'resource_data': x[0]} for x in
docs]
result = yield from self.collection.on_message(**message)
self.assertItemsEqual(result,
expected)
# Field1 = 1
message = {'action': 'list', 'where': {'field1': 1}}
expected = [{'resource_id': x[1], 'resource_data': x[0]} for x in
docs if x[0]['field1'] == 1]
result = yield from self.collection.on_message(**message)
self.assertItemsEqual(result,
expected)
# Field1 = 3
message = {'action': 'list', 'where': {'field1': 3}}
expected = [{'resource_id': x[1], 'resource_data': x[0]} for x in
docs if x[0]['field1'] == 3]
result = yield from self.collection.on_message(**message)
self.assertItemsEqual(result,
expected)
# Field2 = 2
message = {'action': 'list', 'where': {'field2': 2}}
expected = [{'resource_id': x[1], 'resource_data': x[0]} for x in
docs if x[0]['field2'] == 2]
result = yield from self.collection.on_message(**message)
self.assertItemsEqual(result,
expected)
@_async_test
def test_bad_action(self):
message = {'action': 'unknown', 'resource_id': self.resource_id,
'resource_data': self.resource_data}
with self.assertRaises(NoActionHandler):
yield from self.collection.on_message(**message)
|
{
"content_hash": "ad4ba316e119f370dae0f120da2f281f",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 85,
"avg_line_length": 39.28668941979522,
"alnum_prop": 0.5287985405264529,
"repo_name": "Lothiraldan/ZeroServices",
"id": "01c74c6d72e9911aa772dead2ec9622fbecec538",
"size": "11511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/backend/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1716"
},
{
"name": "Python",
"bytes": "127573"
}
],
"symlink_target": ""
}
|
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TensorTypeAndShape(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsTensorTypeAndShape(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TensorTypeAndShape()
x.Init(buf, n + offset)
return x
@classmethod
def TensorTypeAndShapeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# TensorTypeAndShape
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TensorTypeAndShape
def ElemType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# TensorTypeAndShape
def Shape(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.experimental.fbs.Shape import Shape
obj = Shape()
obj.Init(self._tab.Bytes, x)
return obj
return None
def TensorTypeAndShapeStart(builder): builder.StartObject(2)
def TensorTypeAndShapeAddElemType(builder, elemType): builder.PrependInt32Slot(0, elemType, 0)
def TensorTypeAndShapeAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def TensorTypeAndShapeEnd(builder): return builder.EndObject()
|
{
"content_hash": "4f0ccd4dc33abb484291eac961bd5c1e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 144,
"avg_line_length": 38.75,
"alnum_prop": 0.6862170087976539,
"repo_name": "ryfeus/lambda-packs",
"id": "481a1b2bf178dc84762593632d3db0c94e58d112",
"size": "1793",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ONNX/lambda-onnx/onnxruntime/tools/ort_format_model/ort_flatbuffers_py/experimental/fbs/TensorTypeAndShape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
from ._compat import unittest
from ._adapt import DEFAULT_URI, drop, IS_MSSQL, IS_IMAP, IS_GAE, IS_TERADATA, IS_ORACLE
from pydal import DAL, Field
from pydal._compat import PY2
@unittest.skipIf(IS_IMAP, "Reference not Null unsupported on IMAP")
@unittest.skipIf(IS_ORACLE, "Reference Not Null unsupported on Oracle")
class TestReferenceNOTNULL(unittest.TestCase):
# 1:N not null
def testRun(self):
for ref, bigint in [("reference", False), ("big-reference", True)]:
db = DAL(DEFAULT_URI, check_reserved=["all"], bigint_id=bigint)
if bigint and "big-id" not in db._adapter.types:
continue
db.define_table("tt", Field("vv"))
db.define_table(
"ttt", Field("vv"), Field("tt_id", "%s tt" % ref, notnull=True)
)
self.assertRaises(Exception, db.ttt.insert, vv="pydal")
# The following is mandatory for backends as PG to close the aborted transaction
db.commit()
drop(db.ttt)
drop(db.tt)
db.close()
@unittest.skipIf(IS_IMAP, "Reference Unique unsupported on IMAP")
@unittest.skipIf(IS_GAE, "Reference Unique unsupported on GAE")
@unittest.skipIf(IS_ORACLE, "Reference Unique unsupported on Oracle")
class TestReferenceUNIQUE(unittest.TestCase):
# 1:1 relation
def testRun(self):
for ref, bigint in [("reference", False), ("big-reference", True)]:
db = DAL(DEFAULT_URI, check_reserved=["all"], bigint_id=bigint)
if bigint and "big-id" not in db._adapter.types:
continue
db.define_table("tt", Field("vv"))
db.define_table(
"ttt",
Field("vv"),
Field("tt_id", "%s tt" % ref, unique=True),
Field("tt_uq", "integer", unique=True),
)
id_1 = db.tt.insert(vv="pydal")
id_2 = db.tt.insert(vv="pydal")
# Null tt_id
db.ttt.insert(vv="pydal", tt_uq=1)
# first insert is OK
db.ttt.insert(tt_id=id_1, tt_uq=2)
self.assertRaises(Exception, db.ttt.insert, tt_id=id_1, tt_uq=3)
self.assertRaises(Exception, db.ttt.insert, tt_id=id_2, tt_uq=2)
# The following is mandatory for backends as PG to close the aborted transaction
db.commit()
drop(db.ttt)
drop(db.tt)
db.close()
@unittest.skipIf(IS_IMAP, "Reference Unique not Null unsupported on IMAP")
@unittest.skipIf(IS_GAE, "Reference Unique not Null unsupported on GAE")
@unittest.skipIf(IS_ORACLE, "Reference Unique not Null unsupported on Oracle")
class TestReferenceUNIQUENotNull(unittest.TestCase):
# 1:1 relation not null
def testRun(self):
for ref, bigint in [("reference", False), ("big-reference", True)]:
db = DAL(DEFAULT_URI, check_reserved=["all"], bigint_id=bigint)
if bigint and "big-id" not in db._adapter.types:
continue
db.define_table("tt", Field("vv"))
db.define_table(
"ttt",
Field("vv"),
Field("tt_id", "%s tt" % ref, unique=True, notnull=True),
)
self.assertRaises(Exception, db.ttt.insert, vv="pydal")
db.commit()
id_i = db.tt.insert(vv="pydal")
# first insert is OK
db.ttt.insert(tt_id=id_i)
self.assertRaises(Exception, db.ttt.insert, tt_id=id_i)
# The following is mandatory for backends as PG to close the aborted transaction
db.commit()
drop(db.ttt)
drop(db.tt)
db.close()
@unittest.skipIf(IS_IMAP, "Skip unicode on IMAP")
@unittest.skipIf(IS_MSSQL and not PY2, "Skip unicode on py3 and MSSQL")
class TestUnicode(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=["all"])
db.define_table("tt", Field("vv"))
vv = "ἀγοραζε"
id_i = db.tt.insert(vv=vv)
row = db(db.tt.id == id_i).select().first()
self.assertEqual(row.vv, vv)
db.commit()
drop(db.tt)
db.close()
class TestParseDateTime(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=["all"])
#: skip for adapters that use drivers for datetime parsing
if db._adapter.parser.registered.get("datetime") is None:
return
parse = lambda v: db._adapter.parser.parse(v, "datetime", "datetime")
dt = parse("2015-09-04t12:33:36.223245")
self.assertEqual(dt.microsecond, 223245)
self.assertEqual(dt.hour, 12)
dt = parse("2015-09-04t12:33:36.223245Z")
self.assertEqual(dt.microsecond, 223245)
self.assertEqual(dt.hour, 12)
dt = parse("2015-09-04t12:33:36.223245-2:0")
self.assertEqual(dt.microsecond, 223245)
self.assertEqual(dt.hour, 10)
dt = parse("2015-09-04t12:33:36+1:0")
self.assertEqual(dt.microsecond, 0)
self.assertEqual(dt.hour, 13)
dt = parse("2015-09-04t12:33:36.123")
self.assertEqual(dt.microsecond, 123000)
dt = parse("2015-09-04t12:33:36.00123")
self.assertEqual(dt.microsecond, 1230)
dt = parse("2015-09-04t12:33:36.1234567890")
self.assertEqual(dt.microsecond, 123456)
db.close()
@unittest.skipIf(IS_IMAP, "chained join unsupported on IMAP")
@unittest.skipIf(IS_TERADATA, "chained join unsupported on TERADATA")
class TestChainedJoinUNIQUE(unittest.TestCase):
# 1:1 relation
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=["all"])
db.define_table("aa", Field("name"))
db.define_table("bb", Field("aa", "reference aa"), Field("name"))
for k in ("x", "y", "z"):
i = db.aa.insert(name=k)
for j in ("u", "v", "w"):
db.bb.insert(aa=i, name=k + j)
db.commit()
rows = db(db.aa).select()
rows.join(db.bb.aa, fields=[db.bb.name], orderby=[db.bb.name])
self.assertEqual(rows[0].bb[0].name, "xu")
self.assertEqual(rows[0].bb[1].name, "xv")
self.assertEqual(rows[0].bb[2].name, "xw")
self.assertEqual(rows[1].bb[0].name, "yu")
self.assertEqual(rows[1].bb[1].name, "yv")
self.assertEqual(rows[1].bb[2].name, "yw")
self.assertEqual(rows[2].bb[0].name, "zu")
self.assertEqual(rows[2].bb[1].name, "zv")
self.assertEqual(rows[2].bb[2].name, "zw")
rows = db(db.bb).select()
rows.join(db.aa.id, fields=[db.aa.name])
self.assertEqual(rows[0].aa.name, "x")
self.assertEqual(rows[1].aa.name, "x")
self.assertEqual(rows[2].aa.name, "x")
self.assertEqual(rows[3].aa.name, "y")
self.assertEqual(rows[4].aa.name, "y")
self.assertEqual(rows[5].aa.name, "y")
self.assertEqual(rows[6].aa.name, "z")
self.assertEqual(rows[7].aa.name, "z")
self.assertEqual(rows[8].aa.name, "z")
rows_json = rows.as_json()
drop(db.bb)
drop(db.aa)
db.close()
class TestNullAdapter(unittest.TestCase):
# Test that NullAdapter can define tables
def testRun(self):
db = DAL(None)
db.define_table("no_table", Field("aa"))
self.assertIsInstance(db.no_table.aa, Field)
self.assertIsInstance(db.no_table["aa"], Field)
db.close()
|
{
"content_hash": "66c25dcc2f399d80d74d7d9d8dadd3f7",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 92,
"avg_line_length": 37.83756345177665,
"alnum_prop": 0.5818352562382614,
"repo_name": "willimoa/pydal",
"id": "96ce79985f51dfaa69cdff27a108ecb9e146fd99",
"size": "7487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "334"
},
{
"name": "Python",
"bytes": "1291647"
}
],
"symlink_target": ""
}
|
from taskplus.core.shared.action import Action
from taskplus.core.shared.request import Request
from taskplus.core.shared.response import ResponseSuccess
class DeleteTaskStatusAction(Action):
def __init__(self, repo):
super().__init__()
self.statuses_repo = repo
def process_request(self, request):
self._call_before_execution_hooks(dict(request=request, status=None))
status_id = request.id
status = self.statuses_repo.delete(status_id)
self._call_after_execution_hooks(dict(request=request, status=status))
return ResponseSuccess(status)
class DeleteTaskStatusRequest(Request):
def __init__(self, id):
super().__init__()
self.id = id
self._validate()
def _validate(self):
self.errors = []
if not self.id:
self._add_error('id', 'is required')
|
{
"content_hash": "bb095d05c669ad6cbe7abe514e50075f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 27.4375,
"alnum_prop": 0.6469248291571754,
"repo_name": "Himon-SYNCRAFT/taskplus",
"id": "bf0b372ed9190ce9526d536f5c0219ea25fb8b19",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskplus/core/actions/delete_task_status.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "246743"
}
],
"symlink_target": ""
}
|
import json
from writers import template_writer
def GetWriter(config):
'''Factory method for creating RegWriter objects.
See the constructor of TemplateWriter for description of
arguments.
'''
return RegWriter(['win', 'win7'], config)
class RegWriter(template_writer.TemplateWriter):
'''Class for generating policy example files in .reg format (for Windows).
The generated files will define all the supported policies with example
values set for them. This class is used by PolicyTemplateGenerator to
write .reg files.
'''
NEWLINE = '\r\n'
def _QuoteAndEscapeString(self, string):
assert isinstance(string, str)
return json.dumps(string)
def _StartBlock(self, key, suffix, list):
key = 'HKEY_LOCAL_MACHINE\\' + key
if suffix:
key = key + '\\' + suffix
if key != self._last_key.get(id(list), None):
list.append('')
list.append('[%s]' % key)
self._last_key[id(list)] = key
def PreprocessPolicies(self, policy_list):
return self.FlattenGroupsAndSortPolicies(policy_list,
self.GetPolicySortingKey)
def GetPolicySortingKey(self, policy):
'''Extracts a sorting key from a policy. These keys can be used for
list.sort() methods to sort policies.
See TemplateWriter.SortPoliciesGroupsFirst for usage.
'''
is_list = policy['type'] in ('list', 'string-enum-list')
# Lists come after regular policies.
return (is_list, policy['name'])
def _WritePolicy(self, policy, key, list):
example_value = policy['example_value']
if policy['type'] in ('list', 'string-enum-list'):
self._StartBlock(key, policy['name'], list)
i = 1
for item in example_value:
list.append('"%d"=%s' % (i, self._QuoteAndEscapeString(item)))
i = i + 1
else:
self._StartBlock(key, None, list)
if policy['type'] in ('string', 'string-enum'):
example_value_str = self._QuoteAndEscapeString(example_value)
elif policy['type'] in ('dict', 'external'):
example_value_str = self._QuoteAndEscapeString(
json.dumps(example_value, sort_keys=True))
elif policy['type'] in ('main', 'int', 'int-enum'):
example_value_str = 'dword:%08x' % int(example_value)
else:
raise Exception('unknown policy type %s:' % policy['type'])
list.append('"%s"=%s' % (policy['name'], example_value_str))
def WriteComment(self, comment):
self._prefix.append('; ' + comment)
def WritePolicy(self, policy):
if self.CanBeMandatory(policy):
self._WritePolicy(policy, self._winconfig['reg_mandatory_key_name'],
self._mandatory)
def WriteRecommendedPolicy(self, policy):
self._WritePolicy(policy, self._winconfig['reg_recommended_key_name'],
self._recommended)
def BeginTemplate(self):
pass
def EndTemplate(self):
pass
def Init(self):
self._mandatory = []
self._recommended = []
self._last_key = {}
self._prefix = []
self._winconfig = self.config['win_config']['win']
def GetTemplateText(self):
self._prefix.append('Windows Registry Editor Version 5.00')
if self._GetChromiumVersionString() is not None:
self.WriteComment(self.config['build'] + ' version: ' + \
self._GetChromiumVersionString())
all = self._prefix + self._mandatory + self._recommended
return self.NEWLINE.join(all)
|
{
"content_hash": "bb8e6b14bb0fde7330d697802810991a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 76,
"avg_line_length": 33.37864077669903,
"alnum_prop": 0.6410703897614892,
"repo_name": "ric2b/Vivaldi-browser",
"id": "3fbd0a6b3583b7ddb6c3c0072ba982d2adc725dd",
"size": "3628",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/components/policy/tools/template_writers/writers/reg_writer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from poppy.model.helpers import domain
def load_from_json(json_data):
domain_name = json_data.get('domain')
protocol = json_data.get('protocol', 'http')
certification_option = json_data.get('certificate', None)
return domain.Domain(domain_name, protocol, certification_option)
|
{
"content_hash": "8e575fbab99a6ab502dfb8a13d4ac1a1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 36.875,
"alnum_prop": 0.7254237288135593,
"repo_name": "obulpathi/poppy",
"id": "d4d03069b9ceb8857a2d83ca384458ba9ab9ba3a",
"size": "880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poppy/transport/pecan/models/request/domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1281"
},
{
"name": "PigLatin",
"bytes": "808"
},
{
"name": "Python",
"bytes": "1265113"
},
{
"name": "Shell",
"bytes": "12042"
}
],
"symlink_target": ""
}
|
from jsonrpc import jsonrpc_method
from gallery.models import Event, Picture
@jsonrpc_method('events.get_all', authenticated=True)
def get_events(request, null):
return [i.to_json() for i in Event.objects.all()]
#marker: https://docs.djangoproject.com/en/1.8/topics/db/queries/#lookups-that-span-relationships
@jsonrpc_method('pictures.find', authenticated=True)
def get_pictures(request, events, offset, limit, order_by):
print('Offset=%d Limit=%d' % (offset, limit))
query=Picture.objects
if events:
query = query.filter(event_id__in=events)
query = query.filter(exiftag__name='DateTimeOriginal')
query = query.order_by('-exiftag__value')
return [i.to_json() for i in query[offset:offset+limit]]
|
{
"content_hash": "c723df09f061dfc7b372d43248066ccc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 97,
"avg_line_length": 33.95238095238095,
"alnum_prop": 0.7419354838709677,
"repo_name": "Noneus/NoPidi",
"id": "2ec4a4255402ca9c8faf3d99059956fc7e3dbdc7",
"size": "713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gallery/views/jsonrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44788"
},
{
"name": "HTML",
"bytes": "4805"
},
{
"name": "JavaScript",
"bytes": "80384"
},
{
"name": "Python",
"bytes": "30211"
}
],
"symlink_target": ""
}
|
'''
This script reads data from xlsx file and store in MongoDB.
'''
import pyexcel
import logging
import models
from transform_date import *
from accent_remover import *
logging.basicConfig(filename='logs/apc.info.txt', level=logging.INFO)
logger = logging.getLogger(__name__)
# Add Access count for journals
def apc(filename):
apc = pyexcel.get_sheet(
file_name=filename,
sheet_name='import',
name_columns_by_row=0)
apc_json = apc.to_records()
for rec in apc_json:
# remove empty keys
# rec = {k: v for k, v in rec.items() if v or v == 0}
print(rec['issn'])
query = models.Scielo.objects.filter(issn_scielo=rec['issn'])
if len(query) == 1:
doc = query[0]
print(query[0]['issn_scielo'])
data = {'apc': {}}
data['apc'] = dict(rec)
if data:
doc.modify(**data)
doc.save()
def main():
# SciELO APC xlsx
apc('data/scielo/APC-SciELO-Brasil.xlsx')
if __name__ == "__main__":
main()
|
{
"content_hash": "ff3fbe07313cc4b5d6c88488429a7a6f",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 69,
"avg_line_length": 22.70212765957447,
"alnum_prop": 0.569821930646673,
"repo_name": "scieloorg/journals-catalog",
"id": "9dbc10655188f4652bb28b2493ea5492311db480",
"size": "1083",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "jcatalog/transform/scielo_apc_update.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "112941"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_ith_pants_field_15.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "4c51717b9e656bf1998a32d18ed1be3d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 91,
"avg_line_length": 24.615384615384617,
"alnum_prop": 0.7,
"repo_name": "obi-two/Rebelion",
"id": "ee39845c4ec95e7f732073ef3142f437fb5e164e",
"size": "465",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_pants_field_15.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import datetime
import unittest
import perlinpinpin
class TestPerlinpinpin(unittest.TestCase):
def _make_date(self):
class MockDate(datetime.date):
@classmethod
def today(cls):
return datetime.date(2009, 3, 6)
return MockDate
def setUp(self):
self.old_date = datetime.date
datetime.date = self._make_date()
self.perlinpinpin = perlinpinpin.perlinpinpin
def tearDown(self):
datetime.date = self.old_date
def test_exception(self):
self.assertRaises(ValueError, self.perlinpinpin, u"4")
self.assertRaises(ValueError, self.perlinpinpin, u"35 Jnaier")
self.assertRaises(ValueError, self.perlinpinpin, u"Luni prochain")
self.assertRaises(ValueError, self.perlinpinpin, u"supercalifragilisticexpialidocious")
def test_now(self):
self.assertEqual(self.perlinpinpin(u"maintenant"), datetime.date(2009, 3, 6))
def test_today(self):
self.assertEqual(self.perlinpinpin(u"aujourd'hui"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"aujourdhui"), datetime.date(2009, 3, 6))
def test_this_morning(self):
self.assertEqual(self.perlinpinpin(u"matin"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"ce matin"), datetime.date(2009, 3, 6))
def test_this_afternoon(self):
self.assertEqual(self.perlinpinpin(u"apres-midi"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"cet apres-midi"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"apresmidi"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"cet apresmidi"), datetime.date(2009, 3, 6))
def test_this_evening(self):
self.assertEqual(self.perlinpinpin(u"soir"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"ce soir"), datetime.date(2009, 3, 6))
def test_yesterday(self):
self.assertEqual(self.perlinpinpin(u"hier"), datetime.date(2009, 3, 5))
def test_before_yesterday(self):
self.assertEqual(self.perlinpinpin(u"avant-hier"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"avant hier"), datetime.date(2009, 3, 4))
def test_tomorrow(self):
self.assertEqual(self.perlinpinpin(u"demain"), datetime.date(2009, 3, 7))
def test_after_tomorrow(self):
self.assertEqual(self.perlinpinpin(u"après-demain"), datetime.date(2009, 3, 8))
self.assertEqual(self.perlinpinpin(u"après demain"), datetime.date(2009, 3, 8))
def test_last_tuesday(self):
self.assertEqual(self.perlinpinpin(u"mardi dernier"), datetime.date(2009, 3, 3))
def test_next_tuesday(self):
self.assertEqual(self.perlinpinpin(u"mardi prochain"), datetime.date(2009, 3, 10))
self.assertEqual(self.perlinpinpin(u"mardi suivant"), datetime.date(2009, 3, 10))
def test_last_week(self):
self.assertEqual(self.perlinpinpin(u"la semaine dernière"), datetime.date(2009, 2, 27))
self.assertEqual(self.perlinpinpin(u"semaine dernière"), datetime.date(2009, 2, 27))
def test_next_week(self):
self.assertEqual(self.perlinpinpin(u"la semaine prochaine"), datetime.date(2009, 3, 13))
self.assertEqual(self.perlinpinpin(u"semaine prochaine"), datetime.date(2009, 3, 13))
def test_day(self):
self.assertEqual(self.perlinpinpin(u"vendredi 4"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"le 4"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"le vendredi 4"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"le 1er"), datetime.date(2009, 3, 1))
self.assertEqual(self.perlinpinpin(u"le 1 er"), datetime.date(2009, 3, 1))
self.assertEqual(self.perlinpinpin(u"le 1ier"), datetime.date(2009, 3, 1))
def test_day_and_month(self):
self.assertEqual(self.perlinpinpin(u"4 Avril"), datetime.date(2009, 4, 4))
self.assertEqual(self.perlinpinpin(u"Mardi 4 Avril"), datetime.date(2009, 4, 4))
self.assertEqual(self.perlinpinpin(u"le 4 Avril"), datetime.date(2009, 4, 4))
self.assertEqual(self.perlinpinpin(u"le mardi 4 Avril"), datetime.date(2009, 4, 4))
self.assertEqual(self.perlinpinpin(u"4 Fevrier"), datetime.date(2009, 2, 4))
self.assertEqual(self.perlinpinpin(u"4 Février"), datetime.date(2009, 2, 4))
self.assertEqual(self.perlinpinpin(u"le 1er février"), datetime.date(2009, 2, 1))
self.assertEqual(self.perlinpinpin(u"le 1 er février"), datetime.date(2009, 2, 1))
def test_day_and_month_and_year(self):
self.assertEqual(self.perlinpinpin(u"4 Avril 2008"), datetime.date(2008, 4, 4))
self.assertEqual(self.perlinpinpin(u"Vendredi 4 Avril 2008"), datetime.date(2008, 4, 4))
self.assertEqual(self.perlinpinpin(u"le 4 Avril 2008"), datetime.date(2008, 4, 4))
self.assertEqual(self.perlinpinpin(u"le Mardi 4 Avril 2008"), datetime.date(2008, 4, 4))
self.assertEqual(self.perlinpinpin(u"le 1er février 2008"), datetime.date(2008, 2, 1))
self.assertEqual(self.perlinpinpin(u"le 1 er février 2008"), datetime.date(2008, 2, 1))
def test_european_style(self):
self.assertEqual(self.perlinpinpin(u"02/03/2009"), datetime.date(2009, 3, 2))
self.assertEqual(self.perlinpinpin(u"2/3/2009"), datetime.date(2009, 3, 2))
self.assertEqual(self.perlinpinpin(u"le 02/03/2009"), datetime.date(2009, 3, 2))
def test_european_short_style(self):
self.assertEqual(self.perlinpinpin(u"02/03/09"), datetime.date(2009, 3, 2))
self.assertEqual(self.perlinpinpin(u"2/3/09"), datetime.date(2009, 3, 2))
self.assertEqual(self.perlinpinpin(u"le 02/03/09"), datetime.date(2009, 3, 2))
def test_american_style(self):
self.assertEqual(self.perlinpinpin(u"01/24/2009"), datetime.date(2009, 1, 24))
self.assertEqual(self.perlinpinpin(u"1/24/2009"), datetime.date(2009, 1, 24))
def test_american_short_style(self):
self.assertEqual(self.perlinpinpin(u"01/24/09"), datetime.date(2009, 1, 24))
self.assertEqual(self.perlinpinpin(u"1/24/09"), datetime.date(2009, 1, 24))
def test_iso_style(self):
self.assertEqual(self.perlinpinpin(u"2009-01-09"), datetime.date(2009, 1, 9))
self.assertEqual(self.perlinpinpin(u"2009-1-9"), datetime.date(2009, 1, 9))
def test_time_ahead(self):
self.assertEqual(self.perlinpinpin(u"dans 2 jours"), datetime.date(2009, 3, 8))
self.assertEqual(self.perlinpinpin(u"dans 1 semaine"), datetime.date(2009, 3, 13))
self.assertEqual(self.perlinpinpin(u"dans 2 semaines"), datetime.date(2009, 3, 20))
self.assertEqual(self.perlinpinpin(u"dans 1 semaine et 3 jours"), datetime.date(2009, 3, 16))
def test_time_ago(self):
self.assertEqual(self.perlinpinpin(u"il y a 2 jours"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"il y a 1 semaine"), datetime.date(2009, 2, 27))
self.assertEqual(self.perlinpinpin(u"il y a 2 semaines"), datetime.date(2009, 2, 20))
self.assertEqual(self.perlinpinpin(u"il y a 1 semaine et 3 jours"), datetime.date(2009, 2, 24))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "70afd589d7bb67dd6d7f8519fea38ac3",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 103,
"avg_line_length": 52.207142857142856,
"alnum_prop": 0.6768367765768231,
"repo_name": "cyberdelia/perlinpinpin",
"id": "feae2655faa2254ebae1bce6c1630ce9e8579405",
"size": "7342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32288"
}
],
"symlink_target": ""
}
|
"""
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
import textwrap
docdict = {}
def get(name):
return docdict.get(name)
# common parameter text to all ufuncs
subst = {
'PARAMS': textwrap.dedent("""
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
""").strip(),
'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be "
"broadcastable to a common shape (which becomes the "
"shape of the output)."),
'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
}
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
if name[0] != '_' and name != 'matmul':
# matmul is special, it does not use the OUT_SCALAR replacement strings
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
else:
assert False, "Could not detect number of inputs in {}".format(name)
for k, v in subst.items():
doc = doc.replace('$' + k, v)
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
``np.abs`` is a shorthand for this function.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
$OUT_SCALAR_1
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. $BROADCASTABLE_2
$PARAMS
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
$PARAMS
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi].
$OUT_SCALAR_1
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
$PARAMS
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
$OUT_SCALAR_1
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
$PARAMS
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
$OUT_SCALAR_1
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. $BROADCASTABLE_2
$PARAMS
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
$OUT_SCALAR_2
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True])
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
... np.array([4, 4, 4, 2147483647], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True])
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
$OUT_SCALAR_1
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
$OUT_SCALAR_1
Notes
-----
`conj` is an alias for `conjugate`:
>>> np.conj is np.conjugate
True
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding cosine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array of same shape as `x`.
$OUT_SCALAR_1
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
$OUT_SCALAR_1
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = np.degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in degrees.
$OUT_SCALAR_1
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'heaviside',
"""
Compute the Heaviside step function.
The Heaviside step function is defined as::
0 if x1 < 0
heaviside(x1, x2) = x2 if x1 == 0
1 if x1 > 0
where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
x1 : array_like
Input values.
x2 : array_like
The value of the function when x1 is 0. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The output array, element-wise Heaviside step function of `x1`.
$OUT_SCALAR_2
Notes
-----
.. versionadded:: 1.13.0
References
----------
.. Wikipedia, "Heaviside step function",
https://en.wikipedia.org/wiki/Heaviside_step_function
Examples
--------
>>> np.heaviside([-1.5, 0, 2.0], 0.5)
array([ 0. , 0.5, 1. ])
>>> np.heaviside([-1.5, 0, 2.0], 1)
array([ 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise.
$OUT_SCALAR_2
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False])
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True])
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
$OUT_SCALAR_1
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
https://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray')
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv')
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise 2 to the power `x`.
$OUT_SCALAR_1
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise exponential minus one: ``out = exp(x) - 1``.
$OUT_SCALAR_1
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
$PARAMS
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
$OUT_SCALAR_1
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
$OUT_SCALAR_2
See Also
--------
remainder : Remainder complementary to floor_divide.
divmod : Simultaneous floor division and remainder.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
$OUT_SCALAR_2
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False])
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : bool or ndarray of bool
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s). $BROADCASTABLE_2
$PARAMS
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
$OUT_SCALAR_2
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_1
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
The result depends on the bit-width:
>>> x = np.invert(np.array(13, dtype=np.uint16))
>>> x
65522
>>> np.binary_repr(x, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=np.int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(np.array([True, False]))
array([False, True])
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray, bool
True where ``x`` is not positive infinity, negative infinity,
or NaN; false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
$PARAMS
Returns
-------
y : bool (scalar) or boolean ndarray
True where ``x`` is positive or negative infinity, false otherwise.
$OUT_SCALAR_1
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaN, false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isfinite, isnat
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False])
""")
add_newdoc('numpy.core.umath', 'isnat',
"""
Test element-wise for NaT (not a time) and return result as a boolean array.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like
Input array with datetime or timedelta data type.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaT, false otherwise.
$OUT_SCALAR_1
See Also
--------
isnan, isinf, isneginf, isposinf, isfinite
Examples
--------
>>> np.isnat(np.datetime64("NaT"))
True
>>> np.isnat(np.datetime64("2016-01-01"))
False
>>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]"))
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
$OUT_SCALAR_2
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True])
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
$OUT_SCALAR_1
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
$OUT_SCALAR_1
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., nan])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
$OUT_SCALAR_1
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values. $BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
$OUT_SCALAR_2
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values. $BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
$OUT_SCALAR_2
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
$OUT_SCALAR_1
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False])
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False])
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
$OUT_SCALAR_1
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False])
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True])
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True])
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]])
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'matmul',
"""
Matrix product of two arrays.
Parameters
----------
x1, x2 : array_like
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
provided or `None`, a freshly-allocated array is returned.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
..versionadded:: 1.16
Now handles ufunc kwargs
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
If a scalar value is passed in.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
``matmul`` differs from ``dot`` in two important ways:
- Multiplication by scalars is not allowed, use ``*`` instead.
- Stacks of matrices are broadcast together as if the matrices
were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
The matmul function implements the semantics of the `@` operator introduced
in Python 3.5 following PEP465.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
98
>>> sum(a[0, 1, :] * b[0 , :, 1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: matmul: Input operand 1 does not have enough dimensions ...
.. versionadded:: 1.10.0
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y1 : ndarray
Fractional part of `x`.
$OUT_SCALAR_1
y2 : ndarray
Integral part of `x`.
$OUT_SCALAR_1
Notes
-----
For integer input the return values are floats.
See Also
--------
divmod : ``divmod(x, 1)`` is equivalent to ``modf`` with the return values
switched, except it always has a positive remainder.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
$PARAMS
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
$OUT_SCALAR_1
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'positive',
"""
Numerical positive, element-wise.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = +x`.
$OUT_SCALAR_1
Notes
-----
Equivalent to `x.copy()`, but only defined for types that support
arithmetic.
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True])
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]])
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding radian values.
$OUT_SCALAR_1
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in radians.
$OUT_SCALAR_1
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
Return array.
$OUT_SCALAR_1
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
is ``mod``.
.. warning::
This should not be confused with:
* Python 3.7's `math.remainder` and C's ``remainder``, which
computes the IEEE remainder, which are the complement to
``round(x1 / x2)``.
* The MATLAB ``rem`` function and or the C ``%`` operator which is the
complement to ``int(x1 / x2)``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
``mod`` is an alias of ``remainder``.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'divmod',
"""
Return element-wise quotient and remainder simultaneously.
.. versionadded:: 1.13.0
``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster
because it avoids redundant work. It is used to implement the Python
built-in function ``divmod`` on NumPy arrays.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
-------
out1 : ndarray
Element-wise quotient resulting from floor division.
$OUT_SCALAR_2
out2 : ndarray
Element-wise remainder from floor division.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent to Python's ``//`` operator.
remainder : Equivalent to Python's ``%`` operator.
modf : Equivalent to ``divmod(x, 1)`` for positive ``x`` with the return
values switched.
Examples
--------
>>> np.divmod(np.arange(5), 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
$OUT_SCALAR_2
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
$OUT_SCALAR_1
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The sign of `x`.
$OUT_SCALAR_1
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
$PARAMS
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
$OUT_SCALAR_1
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False])
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The next representable values of `x1` in the direction of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True])
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x : array_like
Values to find the spacing of.
$PARAMS
Returns
-------
out : ndarray or scalar
The spacing of values of `x`.
$OUT_SCALAR_1
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
$PARAMS
Returns
-------
y : array_like
The sine of each element of x.
$OUT_SCALAR_1
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, np.inf])
array([ 2., nan, inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise `x*x`, of the same shape and dtype as `x`.
$OUT_SCALAR_1
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
https://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
$OUT_SCALAR_2
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
$PARAMS
Returns
-------
mantissa : ndarray
Floating values between -1 and 1.
$OUT_SCALAR_1
exponent : ndarray
Integer exponents of 2.
$OUT_SCALAR_1
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
$OUT_SCALAR_2
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float16)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
add_newdoc('numpy.core.umath', 'gcd',
"""
Returns the greatest common divisor of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values. $BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The greatest common divisor of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
lcm : The lowest common multiple
Examples
--------
>>> np.gcd(12, 20)
4
>>> np.gcd.reduce([15, 25, 35])
5
>>> np.gcd(np.arange(6), 20)
array([20, 1, 2, 1, 4, 5])
""")
add_newdoc('numpy.core.umath', 'lcm',
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values. $BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm.reduce([3, 12, 20])
60
>>> np.lcm.reduce([40, 12, 20])
120
>>> np.lcm(np.arange(6), 20)
array([ 0, 20, 20, 60, 20, 20])
""")
|
{
"content_hash": "0db577975e71253786686605144b1100",
"timestamp": "",
"source": "github",
"line_count": 3924,
"max_line_length": 83,
"avg_line_length": 25.239041794087665,
"alnum_prop": 0.562329610856439,
"repo_name": "shoyer/numpy",
"id": "8b1a5a3db2705094400c433c4211c0b5a84fa963",
"size": "99038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/core/code_generators/ufunc_docstrings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8752922"
},
{
"name": "C++",
"bytes": "186633"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "7609450"
},
{
"name": "Shell",
"bytes": "9102"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
"""This module contains Google Cloud Vision operators."""
from typing import TYPE_CHECKING, Dict, Optional, Sequence, Union
from google.api_core.retry import Retry
from google.cloud.videointelligence_v1 import enums
from google.cloud.videointelligence_v1.types import VideoContext
from google.protobuf.json_format import MessageToDict
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.video_intelligence import CloudVideoIntelligenceHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudVideoIntelligenceDetectVideoLabelsOperator(BaseOperator):
"""
Performs video annotation, annotating video labels.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVideoIntelligenceDetectVideoLabelsOperator`.
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only
Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined
based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to ``google_cloud_default``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_video_intelligence_detect_labels_template_fields]
template_fields: Sequence[str] = (
"input_uri",
"output_uri",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_video_intelligence_detect_labels_template_fields]
def __init__(
self,
*,
input_uri: str,
input_content: Optional[bytes] = None,
output_uri: Optional[str] = None,
video_context: Union[Dict, VideoContext] = None,
location: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.input_uri = input_uri
self.input_content = input_content
self.output_uri = output_uri
self.video_context = video_context
self.location = location
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.timeout = timeout
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVideoIntelligenceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.annotate_video(
input_uri=self.input_uri,
input_content=self.input_content,
video_context=self.video_context,
location=self.location,
retry=self.retry,
features=[enums.Feature.LABEL_DETECTION],
timeout=self.timeout,
)
self.log.info("Processing video for label annotations")
result = MessageToDict(operation.result())
self.log.info("Finished processing.")
return result
class CloudVideoIntelligenceDetectVideoExplicitContentOperator(BaseOperator):
"""
Performs video annotation, annotating explicit content.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVideoIntelligenceDetectVideoExplicitContentOperator`
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only
Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined
based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud
Defaults to ``google_cloud_default``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_video_intelligence_detect_explicit_content_template_fields]
template_fields: Sequence[str] = (
"input_uri",
"output_uri",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_video_intelligence_detect_explicit_content_template_fields]
def __init__(
self,
*,
input_uri: str,
output_uri: Optional[str] = None,
input_content: Optional[bytes] = None,
video_context: Union[Dict, VideoContext] = None,
location: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.input_uri = input_uri
self.output_uri = output_uri
self.input_content = input_content
self.video_context = video_context
self.location = location
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.timeout = timeout
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVideoIntelligenceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.annotate_video(
input_uri=self.input_uri,
input_content=self.input_content,
video_context=self.video_context,
location=self.location,
retry=self.retry,
features=[enums.Feature.EXPLICIT_CONTENT_DETECTION],
timeout=self.timeout,
)
self.log.info("Processing video for explicit content annotations")
result = MessageToDict(operation.result())
self.log.info("Finished processing.")
return result
class CloudVideoIntelligenceDetectVideoShotsOperator(BaseOperator):
"""
Performs video annotation, annotating video shots.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVideoIntelligenceDetectVideoShotsOperator`
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only
Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined
based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to ``google_cloud_default``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_video_intelligence_detect_video_shots_template_fields]
template_fields: Sequence[str] = (
"input_uri",
"output_uri",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_video_intelligence_detect_video_shots_template_fields]
def __init__(
self,
*,
input_uri: str,
output_uri: Optional[str] = None,
input_content: Optional[bytes] = None,
video_context: Union[Dict, VideoContext] = None,
location: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.input_uri = input_uri
self.output_uri = output_uri
self.input_content = input_content
self.video_context = video_context
self.location = location
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.timeout = timeout
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVideoIntelligenceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.annotate_video(
input_uri=self.input_uri,
input_content=self.input_content,
video_context=self.video_context,
location=self.location,
retry=self.retry,
features=[enums.Feature.SHOT_CHANGE_DETECTION],
timeout=self.timeout,
)
self.log.info("Processing video for video shots annotations")
result = MessageToDict(operation.result())
self.log.info("Finished processing.")
return result
|
{
"content_hash": "d18a6294dd0b992d6301fa0c33c1ef3e",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 109,
"avg_line_length": 47,
"alnum_prop": 0.6734080144350049,
"repo_name": "bolkedebruin/airflow",
"id": "6f2cd220626463336d7974458b54a77501c042c1",
"size": "14088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/google/cloud/operators/video_intelligence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
}
|
from contextlib import closing
import getpass
import sys
import psycopg2
DB_CXN = {
'user' : getpass.getuser(),
'host' : 'lon.lib.virginia.edu'
}
def is_gis_db(cxn, db_name):
"""This checks that the database has a geometry_columns table. """
with closing(cxn.cursor()) as c:
c.execute('''
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema='public'
AND table_name='geometry_columns';
''')
return (c.fetchone()[0] == 1)
def missing_gis_tables(cxn, db_name):
"""\
This checks that the tables listed in geometry_columns.f_table_name
actually exist.
"""
with closing(cxn.cursor()) as c:
c.execute('''
SELECT f_table_name
FROM geometry_columns
WHERE f_table_name NOT IN (
SELECT distinct table_name
FROM information_schema.tables
WHERE table_schema='public'
)
ORDER BY f_table_name;
''')
for (table_name,) in c:
yield table_name
def main():
dbs = []
with closing(psycopg2.connect(database='postgres', **DB_CXN)) as cxn:
with closing(cxn.cursor()) as c:
c.execute('''
SELECT datname FROM pg_database
WHERE datistemplate=false;
''')
dbs = [ db_name for (db_name,) in c ]
for db_name in dbs:
with closing(psycopg2.connect(database=db_name, **DB_CXN)) as cxn:
if is_gis_db(cxn, db_name):
for table_name in missing_gis_tables(cxn, db_name):
sys.stdout.write('{0}.{1}\n'.format(db_name, table_name))
if __name__ == '__main__':
main()
|
{
"content_hash": "e88d2ec4eb9dbaa779fb84d9fe7ed071",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 28.125,
"alnum_prop": 0.5266666666666666,
"repo_name": "erochest/problayers",
"id": "bd9e70971a4b29650f64e1ff28e85129e0167b93",
"size": "1824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "list_missing_tables.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Haskell",
"bytes": "5215"
},
{
"name": "Python",
"bytes": "5673"
}
],
"symlink_target": ""
}
|
"""Functions and utilities to apply aesthetic styling to plots."""
from itertools import cycle
from functools import wraps
import matplotlib.pyplot as plt
from neurodsp.plts.settings import AXIS_STYLE_ARGS, LINE_STYLE_ARGS, CUSTOM_STYLE_ARGS, STYLE_ARGS
from neurodsp.plts.settings import (LABEL_SIZE, LEGEND_SIZE, LEGEND_LOC,
TICK_LABELSIZE, TITLE_FONTSIZE)
###################################################################################################
###################################################################################################
def check_style_options():
"""Check the list of valid style arguments that can be passed into plot functions."""
print('Valid style arguments:')
for label, options in zip(['Axis', 'Line', 'Custom'],
[AXIS_STYLE_ARGS, LINE_STYLE_ARGS, CUSTOM_STYLE_ARGS]):
print(' ', label, '\t', ', '.join(options))
def apply_axis_style(ax, style_args=AXIS_STYLE_ARGS, **kwargs):
"""Apply axis plot style.
Parameters
----------
ax : matplotlib.Axes
Figure axes to apply style to.
style_args : list of str
A list of arguments to be sub-selected from `kwargs` and applied as axis styling.
**kwargs
Keyword arguments that define plot style to apply.
"""
# Apply any provided axis style arguments
plot_kwargs = {key : val for key, val in kwargs.items() if key in style_args}
ax.set(**plot_kwargs)
def apply_line_style(ax, style_args=LINE_STYLE_ARGS, **kwargs):
"""Apply line plot style.
Parameters
----------
ax : matplotlib.Axes
Figure axes to apply style to.
style_args : list of str
A list of arguments to be sub-selected from `kwargs` and applied as line styling.
**kwargs
Keyword arguments that define line style to apply.
"""
# Check how many lines are from the current plot call, to apply style to
# If available, this indicates the apply styling to the last 'n' lines
n_lines_apply = kwargs.pop('n_lines_apply', 0)
# Get the line related styling arguments from the keyword arguments
line_kwargs = {key : val for key, val in kwargs.items() if key in style_args}
# Apply any provided line style arguments
for style, value in line_kwargs.items():
# Values should be either a single value, for all lines, or a list, of a value per line
# This line checks type, and makes a cycle-able / loop-able object out of the values
values = cycle([value] if isinstance(value, (int, float, str)) else value)
for line in ax.lines[-n_lines_apply:]:
line.set(**{style : next(values)})
def apply_custom_style(ax, **kwargs):
"""Apply custom plot style.
Parameters
----------
ax : matplotlib.Axes
Figure axes to apply style to.
**kwargs
Keyword arguments that define custom style to apply.
"""
# If a title was provided, update the size
if ax.get_title():
ax.title.set_size(kwargs.pop('title_fontsize', TITLE_FONTSIZE))
# Settings for the axis labels
label_size = kwargs.pop('label_size', LABEL_SIZE)
ax.xaxis.label.set_size(label_size)
ax.yaxis.label.set_size(label_size)
# Settings for the axis ticks
ax.tick_params(axis='both', which='major',
labelsize=kwargs.pop('tick_labelsize', TICK_LABELSIZE))
# If labels were provided, add a legend
if ax.get_legend_handles_labels()[0]:
ax.legend(prop={'size': kwargs.pop('legend_size', LEGEND_SIZE)},
loc=kwargs.pop('legend_loc', LEGEND_LOC))
plt.tight_layout()
def plot_style(ax, axis_styler=apply_axis_style, line_styler=apply_line_style,
custom_styler=apply_custom_style, **kwargs):
"""Apply plot style to a figure axis.
Parameters
----------
ax : matplotlib.Axes
Figure axes to apply style to.
axis_styler, line_styler, custom_styler : callable, optional
Functions to apply style to aspects of the plot.
**kwargs
Keyword arguments that define style to apply.
Notes
-----
This function wraps sub-functions which apply style to different plot elements.
Each of these sub-functions can be replaced by passing in a replacement callable.
"""
axis_styler(ax, **kwargs)
line_styler(ax, **kwargs)
custom_styler(ax, **kwargs)
def style_plot(func, *args, **kwargs):
"""Decorator function to apply a plot style function, after plot generation.
Parameters
----------
func : callable
The plotting function for creating a plot.
*args, **kwargs
Arguments & keyword arguments.
These should include any arguments for the plot, and those for applying plot style.
Notes
-----
This decorator works by:
- catching all inputs that relate to plot style
- creating a plot, using the passed in plotting function & passing in all non-style arguments
- passing the style related arguments into a `plot_style` function which applies plot styling
By default, this function applies styling with the `plot_style` function. Custom
functions for applying style can be passed in using `plot_style` as a keyword argument.
The `plot_style` function calls sub-functions for applying style different plot elements,
and these sub-functions can be overridden by passing in alternatives for `axis_styler`,
`line_styler`, and `custom_styler`.
"""
@wraps(func)
def decorated(*args, **kwargs):
# Grab a custom style function, if provided, and grab any provided style arguments
style_func = kwargs.pop('plot_style', plot_style)
style_args = kwargs.pop('style_args', STYLE_ARGS)
style_kwargs = {key : kwargs.pop(key) for key in style_args if key in kwargs}
# Check how many lines are already on the plot, if it exists already
n_lines_pre = len(kwargs['ax'].lines) if 'ax' in kwargs and kwargs['ax'] is not None else 0
# Create the plot
func(*args, **kwargs)
# Get plot axis, if a specific one was provided, or if not, grab the current axis
cur_ax = kwargs['ax'] if 'ax' in kwargs and kwargs['ax'] is not None else plt.gca()
# Check how many lines were added to the plot, and make info available to plot styling
n_lines_apply = len(cur_ax.lines) - n_lines_pre
style_kwargs['n_lines_apply'] = n_lines_apply
# Apply the styling function
style_func(cur_ax, **style_kwargs)
return decorated
|
{
"content_hash": "9ade22c69cb230d886d22a35a17fdef5",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 99,
"avg_line_length": 36.8268156424581,
"alnum_prop": 0.6339502427184466,
"repo_name": "voytekresearch/neurodsp",
"id": "dca350cbe1408659063a7e0190867b3a2cdda5f5",
"size": "6592",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "neurodsp/plts/style.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2726"
},
{
"name": "Python",
"bytes": "421656"
},
{
"name": "TeX",
"bytes": "6424"
}
],
"symlink_target": ""
}
|
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from plugins.tff_backend.bizz.agenda import list_events, get_event, put_event, list_participants
from plugins.tff_backend.bizz.audit.audit import audit
from plugins.tff_backend.bizz.audit.mapping import AuditLogType
from plugins.tff_backend.bizz.authentication import Scopes
from plugins.tff_backend.to.agenda import EventTO, EventParticipantListTO
@rest('/agenda-events', 'get', Scopes.BACKEND_READONLY, silent_result=True)
@returns([EventTO])
@arguments(past=bool)
def api_list_events(past=False):
return [EventTO.from_model(model) for model in list_events(past)]
@rest('/agenda-events', 'post', Scopes.BACKEND_ADMIN)
@returns(EventTO)
@arguments(data=EventTO)
def api_create_event(data):
return EventTO.from_model(put_event(data))
@rest('/agenda-events/<event_id:[^/]+>/participants', 'get', Scopes.BACKEND_READONLY, silent_result=True)
@returns(EventParticipantListTO)
@arguments(event_id=(int, long), cursor=unicode, page_size=(int, long))
def api_list_event_participants(event_id, cursor=None, page_size=50):
return list_participants(event_id, cursor, page_size)
@rest('/agenda-events/<event_id:[^/]+>', 'get', Scopes.BACKEND_READONLY)
@returns(EventTO)
@arguments(event_id=(int, long))
def api_get_event(event_id):
return EventTO.from_model(get_event(event_id))
@audit(AuditLogType.UPDATE_AGENDA_EVENT, 'event_id')
@rest('/agenda-events/<event_id:[^/]+>', 'put', Scopes.BACKEND_ADMIN)
@returns(EventTO)
@arguments(event_id=(int, long), data=EventTO)
def api_put_event(event_id, data):
data.id = event_id
return EventTO.from_model(put_event(data))
|
{
"content_hash": "0c1de159c043f4a618d9522b032032de",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 105,
"avg_line_length": 37.68181818181818,
"alnum_prop": 0.7490952955367913,
"repo_name": "threefoldfoundation/app_backend",
"id": "357a4568c8b6a282c85f604cabaf6af6a414feed",
"size": "2291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/tff_backend/api/agenda.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8307"
},
{
"name": "HTML",
"bytes": "88477"
},
{
"name": "JavaScript",
"bytes": "1870"
},
{
"name": "Python",
"bytes": "430948"
},
{
"name": "TypeScript",
"bytes": "217217"
}
],
"symlink_target": ""
}
|
"""
PosteriorContainer saves (usually sampled) inputs to the generative model and their likelihood of producing some observed data.
Comes with a bunch of supporting methods to analyze the samples.
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os.path
import math
class PosteriorContainer(object):
"""
PosteriorContainer saves (usually sampled) inputs to the generative model and their likelihood of producing some observed data.
Comes with a bunch of supporting methods to analyze the samples.
"""
def __init__(self, C, R, L, ActionSequence, Planner=[]):
"""
Create an object that stores inputs to generative model.
Args:
C (list): List of cost samples
R (list): List of reward samples
L (list): List of log-likelihoods
ActionSequence (list): List of actions
Planner (Planner): (optional) Planner object (The generative model)
"""
self.CostSamples = C
self.RewardSamples = R
self.LogLikelihoods = L
self.CostDimensions = self.CostSamples.shape[1]
self.RewardDimensions = self.RewardSamples.shape[1]
self.Samples = self.RewardSamples.shape[0]
self.Actions = ActionSequence
self.MapFile = None
# Extract information from the planner object
if Planner is not None:
self.CostNames = Planner.Map.StateNames
self.StartingPoint = Planner.Map.StartingPoint
self.ActionNames = Planner.Map.ActionNames
self.ObjectLocations = Planner.Map.ObjectLocations
self.ObjectTypes = Planner.Map.ObjectTypes
self.ObjectNames = Planner.Map.ObjectNames
self.Organic = Planner.Map.Organic
self.SurvivalProb = Planner.Map.SurvivalProb
self.SoftChoice = Planner.Agent.SoftmaxChoice
self.SoftAction = Planner.Agent.SoftmaxAction
self.actionTau = Planner.Agent.actionTau
self.choiceTau = Planner.Agent.choiceTau
self.Method = Planner.Method
else:
self.CostNames = None
self.StartingPoint = None
self.ActionNames = None
self.ObjectLocations = None
self.ObjectTypes = None
self.ObjectNames = None
self.SoftChoice = None
self.SoftAction = None
self.actionTau = None
self.choiceTau = None
self.Method = "Unknown"
def SaveCSV(self, filename, overwrite=False):
"""
Export PosteriorContainer samples as a .csv file
Args:
filename (str): Filename
overwrite (bool): Overwrite file if it exists?
"""
if os.path.isfile(filename) and not overwrite:
print(("ERROR: File exists, type SaveCSV(\"" + filename + "\",True) to overwrite file."))
else:
f = open(filename, 'w')
# Create header
if self.ObjectNames is not None:
for i in range(len(self.ObjectNames)):
if i == 0:
Header = str(self.ObjectNames[i])
else:
Header = Header + "," + str(self.ObjectNames[i])
else:
for i in range(self.RewardDimensions):
if i == 0:
Header = "Object" + str(i)
else:
Header = Header + ",Object" + str(i)
if self.CostNames is not None:
for i in self.CostNames:
Header = Header + "," + str(i)
else:
for i in range(self.CostDimensions):
Header = Header + ",Terrain" + str(i)
Header = Header + ",LogLikelihood\n"
f.write(Header)
# Now add the samples
for i in range(self.CostSamples.shape[0]):
for j in range(self.RewardDimensions):
if j == 0:
NewLine = str(self.RewardSamples[i, j])
else:
NewLine = NewLine + "," + str(self.RewardSamples[i, j])
for j in range(self.CostDimensions):
NewLine = NewLine + "," + str(self.CostSamples[i, j])
NewLine = NewLine + "," + str(self.LogLikelihoods[i]) + "\n"
f.write(NewLine)
f.close()
def AssociateMap(self, MapName):
"""
Add the map's name to the object. This allows you to later reload the full details of whatever you run running.
Args:
MapName (string): Name of map to use.
"""
self.MapFile = MapName
def LongSummary(self):
"""
LongSummary prints a summary of the samples, the convergence analysis,
and it plots the posterior distributions.
"""
self.Summary()
self.AnalyzeConvergence()
# 10 is the default input. Just sending it to avoid the print message
self.PlotCostPosterior(10)
self.PlotRewardPosterior(10)
def CompareRewards(self):
"""
Create a matrix where (i,j) is the probability that object i has a
higher or equal reward than object j.
"""
RewardComparison = np.zeros(
(self.RewardDimensions, self.RewardDimensions))
for i in range(self.RewardDimensions):
for j in range(i, self.RewardDimensions):
for s in range(self.Samples):
if (self.RewardSamples[s, i] >= self.RewardSamples[s, j]):
RewardComparison[i][
j] += np.exp(self.LogLikelihoods[s])
else:
RewardComparison[j][
i] += np.exp(self.LogLikelihoods[s])
return RewardComparison
def CompareCosts(self):
"""
Create a matrix where (i,j) is the probability that terrain i has a
higher or equal cost than terrain j.
"""
CostComparison = np.zeros((self.CostDimensions, self.CostDimensions))
for i in range(self.CostDimensions):
for j in range(i, self.CostDimensions):
for s in range(self.Samples):
if (self.CostSamples[s, i] >= self.CostSamples[s, j]):
CostComparison[i][j] += np.exp(self.LogLikelihoods[s])
else:
CostComparison[j][i] += np.exp(self.LogLikelihoods[s])
return CostComparison
def GetExpectedCosts(self, limit=None):
"""
Calculate the expected costs using the first N samples (used for timeseries).
Args:
limit (int): Number of samples to use. If set to None, function uses all samples.
"""
ExpectedCosts = []
if limit is None:
limit = self.Samples - 1
for i in range(self.CostDimensions):
NL = np.exp(self.LogLikelihoods[0:(limit + 1)])
if sum(NL) == 0:
print("WARNING: All likelihoods are zero up to this point. POSTERIORCONTAINER-001")
NL = [1.0 / NL.shape[0]]
else:
NL = NL / sum(NL)
a = self.CostSamples[0:(limit + 1), i]
b = NL
res = sum([float(a[i]) * float(b[i]) for i in range(limit + 1)])
ExpectedCosts.append(res)
return ExpectedCosts
def GetExpectedRewards(self, limit=None):
"""
Calculate the expected rewards using the first N samples (used for timeseries).
Args:
limit (int): Number of samples to use. If set to None, function uses all samples.
"""
ExpectedRewards = []
if limit is None:
limit = self.Samples - 1
for i in range(self.RewardDimensions):
NL = np.exp(self.LogLikelihoods[0:(limit + 1)])
if sum(NL) == 0:
print("WARNING: All likelihoods are zero up to this point. POSTERIORCONTAINER-001")
NL = [1.0 / NL.shape[0]]
else:
NL = NL / sum(NL)
a = self.RewardSamples[0:(limit + 1), i]
b = NL
res = sum([float(a[i]) * float(b[i]) for i in range(limit + 1)])
ExpectedRewards.append(res)
return ExpectedRewards
def PlotCostPosterior(self, bins=None):
"""
Plot posterior distribution of cost samples.
Args:
bins (int): Number of bins to use
"""
if bins is None:
print("Number of bins not specified. Defaulting to 10.")
bins = 10
maxval = np.amax(self.CostSamples)
binwidth = maxval * 1.0 / bins + 0.00001
xvals = [binwidth * (i + 0.5) for i in range(bins)]
for i in range(self.CostDimensions):
yvals = [0] * bins
insert_indices = [int(math.floor(j / binwidth))
for j in self.CostSamples[:, i]]
for j in range(self.Samples):
yvals[insert_indices[j]] += np.exp(self.LogLikelihoods[j])
plt.plot(xvals, yvals)
if self.CostNames is not None:
plt.legend(self.CostNames, loc='upper left')
else:
plt.legend([str(i)
for i in range(self.CostDimensions)], loc='upper left')
plt.xlabel("Cost")
plt.ylabel("Probability")
plt.title("Posterior distribution of terrain costs")
plt.show()
def PlotRewardPosterior(self, bins=None):
"""
Plot posterior distribution of reward samples.
Args:
bins (int): Number of bins to use
"""
if bins is None:
print("Number of bins not specified. Defaulting to 10.")
bins = 10
maxval = np.amax(self.RewardSamples)
binwidth = maxval * 1.0 / bins + 0.00001
xvals = [binwidth * (i + 0.5) for i in range(bins)]
for i in range(self.RewardDimensions):
yvals = [0] * bins
insert_indices = [int(math.floor(j / binwidth))
for j in self.RewardSamples[:, i]]
for j in range(self.Samples):
yvals[insert_indices[j]] += np.exp(self.LogLikelihoods[j])
plt.plot(xvals, yvals)
if self.ObjectNames is not None:
plt.legend(self.ObjectNames, loc='upper left')
else:
plt.legend([str(i)
for i in range(self.RewardDimensions)], loc='upper left')
plt.set
plt.xlabel("Reward")
plt.ylabel("Probability")
plt.title("Posterior distribution of rewards")
plt.show()
def Summary(self, human=True, Id=None):
"""
Print summary of samples.
Args:
human (bool): When true function prints a human-readable format.
When false it prints a compressed csv format (suitable for merging many runs)
Id (string): Optional string. When provided the function simply adds it to the summary (Helpful for adding names to certain action sequences).
"""
ExpectedRewards = self.GetExpectedRewards()
RewardMatrix = self.CompareRewards()
ExpectedCosts = self.GetExpectedCosts()
CostMatrix = self.CompareCosts()
# Combine all functions to print summary
if not np.any(self.LogLikelihoods):
sys.stdout.write(
"All samples have likelihood 0. Ensure the observed path is rational or raise the choice and/or action softmax parameters")
return None
if human:
if Id is not None:
sys.stdout.write("Id: " + str(Id) + "\n")
if self.MapFile is not None:
sys.stdout.write("Map: " + str(self.MapFile) + "\n")
sys.stdout.write(
"To see map details run Bishop.LoadObserverFromPC(self).\n")
sys.stdout.write(
"Object locations: " + str(self.ObjectLocations) + "\n")
sys.stdout.write(
"Object types: " + str(self.ObjectTypes) + "\n")
sys.stdout.write(
"Organic: " + str(self.Organic) + " (survival probability = " + str(self.SurvivalProb) + ")\n")
sys.stdout.write(
"Results using " + str(self.Samples) + " samples.\n")
sys.stdout.write("\nPATH INFORMATION\n\n")
sys.stdout.write(
"Starting position: " + str(self.StartingPoint) + "\n")
sys.stdout.write("Actions: " +
str(self.ActionNames) + ".\n")
if self.SoftChoice:
sys.stdout.write("Softmaxed choices.\n")
else:
sys.stdout.write("Optimal choices.\n")
if self.SoftAction:
sys.stdout.write("Softmaxed actions.\n")
else:
sys.stdout.write("Optimal actions.\n")
usefulsamples = len(
[i for i in self.LogLikelihoods if i != (- sys.maxsize - 1)])
sys.stdout.write("\nNumber of useful samples: " +
str(usefulsamples) + "(" + str(usefulsamples * 100.0 / self.Samples) + "%)\n")
sys.stdout.write("\n Maximum likelihood result\n\n")
self.ML()
sys.stdout.write("\nINFERRED REWARDS\n\n")
if (self.ObjectNames is not None):
for i in range(self.RewardDimensions):
sys.stdout.write(
str(self.ObjectNames[i]) + ": " + str(ExpectedRewards[i]) + "\n")
sys.stdout.write(str(self.ObjectNames) + "\n")
else:
sys.stdout.write(str(ExpectedRewards) + "\n")
sys.stdout.write(
"Reward comparison matrix: i, j = p( R(i)>=R(j) )\n")
sys.stdout.write(str(RewardMatrix) + "\n")
sys.stdout.write("\nINFERRED COSTS\n\n")
if (self.CostNames is not None):
for i in range(self.CostDimensions):
sys.stdout.write(
str(self.CostNames[i]) + ": " + str(ExpectedCosts[i]) + "\n")
sys.stdout.write(str(self.CostNames) + "\n")
else:
sys.stdout.write(str(ExpectedCosts) + "\n")
sys.stdout.write(
"Cost comparison matrix: i, j = p( C(i)>=C(j) )\n")
sys.stdout.write(str(CostMatrix) + "\n")
else:
# Print file header
###################
if Id is not None:
sys.stdout.write("Id,")
sys.stdout.write(
"Samples,UsefulSamples,StartingPoint,ObjectLocations,ObjectTypes,OrganicObjects,SurvivalProbability,SoftmaxAction,ActionTau,SoftmaxChoice,ChoiceTau,Actions")
# Add names for objects and terrains
if self.ObjectNames is not None:
for i in range(self.RewardDimensions):
sys.stdout.write("," + str(self.ObjectNames[i]))
else:
for i in range(self.RewardDimensions):
sys.stdout.write(",Object" + str(i))
if self.CostNames is not None:
for i in range(self.CostDimensions):
sys.stdout.write("," + str(self.CostNames[i]))
else:
for i in range(self.CostDimensions):
sys.stdout.write(",Terrain" + str(i))
# Add names for objects and terrains prefixed with ML (headers for
# the Maximum likelihood samples)
if self.ObjectNames is not None:
for i in range(self.RewardDimensions):
sys.stdout.write(",ML_" + str(self.ObjectNames[i]))
else:
for i in range(self.RewardDimensions):
sys.stdout.write(",ML_Object" + str(i))
if self.CostNames is not None:
for i in range(self.CostDimensions):
sys.stdout.write(",ML_" + str(self.CostNames[i]))
else:
for i in range(self.CostDimensions):
sys.stdout.write(",ML_Terrain" + str(i))
# Names for reward tradeoffs
for i in range(self.RewardDimensions):
for j in range(i + 1, self.RewardDimensions):
if i != j:
if self.ObjectNames is not None:
sys.stdout.write(
"," + str(self.ObjectNames[i]) + "." + str(self.ObjectNames[j]))
else:
sys.stdout.write(",R" + str(i) + ".R" + str(j))
# Names for cost tradeoffs
for i in range(self.CostDimensions):
for j in range(i + 1, self.CostDimensions):
if i != j:
if self.CostNames is not None:
sys.stdout.write(
"," + str(self.CostNames[i]) + "." + str(self.CostNames[j]))
else:
sys.stdout.write(",O" + str(i) + ".O" + str(j))
sys.stdout.write("\n")
###############
###############
# Print results
###############
###############
# Print general info
if Id is not None:
sys.stdout.write(str(Id) + ",")
usefulsamples = len(
[i for i in self.LogLikelihoods if i != (- sys.maxsize - 1)])
sys.stdout.write(
str(self.Samples) + "," + str(usefulsamples) + "," + str(self.StartingPoint) + ",")
# print object locations
for i in range(len(self.ObjectLocations)):
if i < (len(self.ObjectLocations) - 1):
sys.stdout.write(str(self.ObjectLocations[i]) + ".")
else:
sys.stdout.write(str(self.ObjectLocations[i]))
sys.stdout.write(",")
# print object types
for i in range(len(self.ObjectTypes)):
if i < (len(self.ObjectTypes) - 1):
sys.stdout.write(str(self.ObjectTypes[i]) + ".")
else:
sys.stdout.write(str(self.ObjectTypes[i]))
sys.stdout.write(",")
# print object organicity
for i in range(len(self.Organic)):
if i < (len(self.Organic) - 1):
sys.stdout.write(str(self.Organic[i]) + ".")
else:
sys.stdout.write(str(self.Organic[i]))
# print survival prob and softmax information
sys.stdout.write("," + str(self.SurvivalProb) + "," + str(self.SoftAction) + "," + str(
self.actionTau) + "," + str(self.SoftChoice) + "," + str(self.choiceTau) + ",")
for i in range(len(self.Actions)):
if i < (len(self.Actions) - 1):
sys.stdout.write(str(self.Actions[i]) + ".")
else:
sys.stdout.write(str(self.Actions[i]))
# Print expected costs and rewards
for i in range(self.RewardDimensions):
sys.stdout.write("," + str(ExpectedRewards[i]))
for i in range(self.CostDimensions):
sys.stdout.write("," + str(ExpectedCosts[i]))
# Print maximum likelihood costs and rewards
# First two parameters don't matter because human is set to false.
[C, R] = self.ML(1, 2, False)
for i in range(self.RewardDimensions):
sys.stdout.write("," + str(R[0, i]))
for i in range(self.CostDimensions):
sys.stdout.write("," + str(C[0, i]))
# Print reward tradeoffs
RewardM = self.CompareRewards()
for i in range(self.RewardDimensions):
for j in range(i + 1, self.RewardDimensions):
if i != j:
sys.stdout.write("," + str(RewardM[i][j]))
# Print cost tradeoffs
CostM = self.CompareCosts()
for i in range(self.CostDimensions):
for j in range(i + 1, self.CostDimensions):
if i != j:
sys.stdout.write("," + str(CostM[i][j]))
sys.stdout.write("\n")
def AnalyzeConvergence(self, jump=None):
"""
Plot estimates as a function of the number of samples to visually determine is samples converged.
Args:
jump (int): Number of skips between each sample.
"""
NL = np.exp(self.LogLikelihoods)
if sum(NL) == 0:
print("ERROR: All likelihoods are zero up to this point. Cannot analyze convergence POSTERIORCONTAINER-002")
return None
# jump indicates how often to recompute the expected value
if jump is None:
if self.Samples > 100:
print("Recomputing expected value after every 20 samples")
jump = int(round(self.Samples * 1.0 / 20))
else:
print("Recomputing expected value after every sample")
jump = 1
rangevals = list(range(0, self.Samples, jump))
ycostvals = [self.GetExpectedCosts(i) for i in rangevals]
ycostvals = np.array(ycostvals)
yrewardvals = [self.GetExpectedRewards(i) for i in rangevals]
yrewardvals = np.array(yrewardvals)
# break it into plots.
# Costs
f, axarr = plt.subplots(1, 2)
for i in range(self.CostDimensions):
axarr[0].plot(rangevals, ycostvals[:, i])
if self.CostNames is not None:
axarr[0].legend(self.CostNames, loc='upper left')
else:
axarr[0].legend(
[str(i) for i in range(self.CostDimensions)], loc='upper left')
# Rewards
for i in range(self.RewardDimensions):
axarr[1].plot(rangevals, yrewardvals[:, i])
if self.ObjectNames is not None:
axarr[1].legend(self.ObjectNames, loc='upper left')
else:
axarr[1].legend(
[str(i) for i in range(self.RewardDimensions)], loc='upper left')
plt.show()
def ML(self, n=1, roundparam=2, human=True):
"""
Print maximum likelihood sample(s)
n (int): Print top n samples (if n exceeds number of samples then function prints all samples)
roundparam (int): How much to round the samples
human (bool): When set to true prints nicely, when set to false returns format for csv structure (in this case n is set to 1 and values aren't rounded)
"""
indices = self.LogLikelihoods.argsort()[-n:]
likelihoods = np.exp(self.LogLikelihoods[indices])
Costs = self.CostSamples[indices]
Rewards = self.RewardSamples[indices]
if human:
# Print header
if self.CostNames is not None:
for i in range(self.CostDimensions):
sys.stdout.write(str(self.CostNames[i]) + "\t")
else:
for i in range(self.CostDimensions):
sys.stdout.write("Terrain" + str(i) + "\t")
if self.ObjectNames is not None:
for i in range(self.RewardDimensions):
sys.stdout.write(str(self.ObjectNames[i]) + "\t")
else:
for i in range(self.RewardDimensions):
sys.stdout.write("Object" + str(i) + "\t")
sys.stdout.write("Likelihood\n")
# Print data
for top in range(min(n, self.Samples)):
# Print cost samples
for i in range(self.CostDimensions):
sys.stdout.write(
str(np.round(Costs[top, i], roundparam)) + "\t")
# Print reward samples
for i in range(self.RewardDimensions):
sys.stdout.write(
str(np.round(Rewards[top, i], roundparam)) + "\t")
sys.stdout.write(
str(np.round(likelihoods[top], roundparam)) + "\n")
else:
return [Costs[0], Rewards[0]]
def Display(self, Full=False):
"""
Print object attributes.
.. Warning::
This function is for internal use only.
Args:
Full (bool): When set to False, function only prints attribute names. Otherwise, it also prints its values.
Returns:
standard output summary
"""
if Full:
for (property, value) in vars(self).items():
print((property, ': ', value))
else:
for (property, value) in vars(self).items():
print(property)
|
{
"content_hash": "fe13773c653fdf9818c3b831350c91a4",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 173,
"avg_line_length": 43.56643356643357,
"alnum_prop": 0.5268057784911717,
"repo_name": "julianje/Bishop",
"id": "1b8963546b0fe36b58452257919cb08add0397a7",
"size": "24945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bishop/PosteriorContainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "336173"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Network(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param broadcast: {"default": 0, "not-list": ["non-broadcast", "point-to-point", "point-to-multipoint"], "type": "number", "description": "Specify OSPF broadcast multi-access network", "format": "flag"}
:param point_to_multipoint: {"default": 0, "not-list": ["broadcast", "non-broadcast", "point-to-point"], "type": "number", "description": "Specify OSPF point-to-multipoint network", "format": "flag"}
:param non_broadcast: {"default": 0, "not-list": ["broadcast", "point-to-point", "point-to-multipoint"], "type": "number", "description": "Specify OSPF NBMA network", "format": "flag"}
:param point_to_point: {"default": 0, "not-list": ["broadcast", "non-broadcast", "point-to-multipoint"], "type": "number", "description": "Specify OSPF point-to-point network", "format": "flag"}
:param p2mp_nbma: {"default": 0, "type": "number", "description": "Specify non-broadcast point-to-multipoint network", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "network"
self.DeviceProxy = ""
self.broadcast = ""
self.point_to_multipoint = ""
self.non_broadcast = ""
self.point_to_point = ""
self.p2mp_nbma = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AuthenticationCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param authentication: {"default": 0, "type": "number", "description": "Enable authentication", "format": "flag"}
:param value: {"enum": ["message-digest", "null"], "type": "string", "description": "'message-digest': Use message-digest authentication; 'null': Use no authentication; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "authentication-cfg"
self.DeviceProxy = ""
self.authentication = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class DatabaseFilterCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param database_filter: {"enum": ["all"], "type": "string", "description": "'all': Filter all LSA; ", "format": "enum"}
:param out: {"default": 0, "type": "number", "description": "Outgoing LSA", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "database-filter-cfg"
self.DeviceProxy = ""
self.database_filter = ""
self.out = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class BfdCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param disable: {"default": 0, "type": "number", "description": "Disable BFD", "format": "flag"}
:param bfd: {"default": 0, "type": "number", "description": "Bidirectional Forwarding Detection (BFD)", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "bfd-cfg"
self.DeviceProxy = ""
self.disable = ""
self.bfd = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Md5(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param md5_value: {"minLength": 1, "maxLength": 16, "type": "string", "description": "The OSPF password (1-16)", "format": "password"}
:param encrypted: {"type": "encrypted", "description": "Do NOT use this option manually. (This is an A10 reserved keyword.) (The ENCRYPTED password string)", "format": "encrypted"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "md5"
self.DeviceProxy = ""
self.md5_value = ""
self.encrypted = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class MessageDigestCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param message_digest_key: {"description": "Message digest authentication password (key) (Key id)", "minimum": 1, "type": "number", "maximum": 255, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "message-digest-cfg"
self.DeviceProxy = ""
self.message_digest_key = ""
self.md5 = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class OspfGlobal(A10BaseClass):
"""Class Description::
Global setting for Open Shortest Path First for IPv4 (OSPF).
Class ospf-global supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param dead_interval: {"description": "Interval after which a neighbor is declared dead (Seconds)", "format": "number", "default": 40, "optional": true, "maximum": 65535, "minimum": 1, "type": "number"}
:param authentication_key: {"description": "Authentication password (key) (The OSPF password (key))", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 8, "type": "string"}
:param mtu_ignore: {"default": 0, "optional": true, "type": "number", "description": "Ignores the MTU in DBD packets", "format": "flag"}
:param retransmit_interval: {"description": "Time between retransmitting lost link state advertisements (Seconds)", "format": "number", "default": 5, "optional": true, "maximum": 65535, "minimum": 1, "type": "number"}
:param transmit_delay: {"description": "Link state transmit delay (Seconds)", "format": "number", "default": 1, "optional": true, "maximum": 65535, "minimum": 1, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param disable: {"optional": true, "enum": ["all"], "type": "string", "description": "'all': All functionality; ", "format": "enum"}
:param cost: {"description": "Interface cost", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}
:param hello_interval: {"description": "Time between HELLO packets (Seconds)", "format": "number", "default": 10, "optional": true, "maximum": 65535, "minimum": 1, "type": "number"}
:param mtu: {"description": "OSPF interface MTU (MTU size)", "format": "number", "type": "number", "maximum": 65535, "minimum": 576, "optional": true}
:param message_digest_cfg: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"message-digest-key": {"description": "Message digest authentication password (key) (Key id)", "minimum": 1, "type": "number", "maximum": 255, "format": "number"}, "optional": true, "md5": {"type": "object", "properties": {"md5-value": {"minLength": 1, "maxLength": 16, "type": "string", "description": "The OSPF password (1-16)", "format": "password"}, "encrypted": {"type": "encrypted", "description": "Do NOT use this option manually. (This is an A10 reserved keyword.) (The ENCRYPTED password string)", "format": "encrypted"}}}}}]}
:param priority: {"description": "Router priority", "format": "number", "default": 1, "optional": true, "maximum": 255, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/ethernet/{ifnum}/ip/ospf/ospf-global`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ospf-global"
self.a10_url="/axapi/v3/interface/ethernet/{ifnum}/ip/ospf/ospf-global"
self.DeviceProxy = ""
self.dead_interval = ""
self.authentication_key = ""
self.network = {}
self.mtu_ignore = ""
self.retransmit_interval = ""
self.transmit_delay = ""
self.uuid = ""
self.disable = ""
self.authentication_cfg = {}
self.database_filter_cfg = {}
self.bfd_cfg = {}
self.cost = ""
self.hello_interval = ""
self.mtu = ""
self.message_digest_cfg = []
self.priority = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "7887edc4e8e713e13a1a95b2a47dca8e",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 675,
"avg_line_length": 44.766666666666666,
"alnum_prop": 0.6174875013296458,
"repo_name": "amwelch/a10sdk-python",
"id": "b3c91a301f3c5a4eed7bd0d4e60d78cabce7f088",
"size": "9401",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/interface/interface_ethernet_ip_ospf_ospf_global.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
}
|
from collections import Counter
def scramble(s1, s2):
# Minor refactor of a solution by 00kevin on CodeWars
return not(Counter(s2) - Counter(s1))
# def scramble(s1, s2):
# cnt = 0
# length = len(s2)
# s2 = sorted(s2)
# for letter in sorted(s1):
# if letter == s2[cnt]:
# cnt += 1
# if cnt == length:
# return True
# return False
|
{
"content_hash": "a2e7ae918e73578c0765cb226658c605",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 57,
"avg_line_length": 22.72222222222222,
"alnum_prop": 0.5403422982885085,
"repo_name": "the-zebulan/CodeWars",
"id": "24cd5b1e1deb06e70477d252adc179e762e9bb54",
"size": "409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_5/scramblies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
import json
import os
from swiftclient import exceptions as swiftexp
from solum.api.handlers import handler
from solum.common import exception as exc
from solum.common import solum_swiftclient
from solum import objects
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class UserlogHandler(handler.Handler):
def get_all(self):
"""Return all userlogs, based on the query provided."""
return objects.registry.UserlogList.get_all(self.context)
def get_all_by_id(self, resource_uuid):
return objects.registry.UserlogList.get_all_by_id(
self.context, resource_uuid=resource_uuid)
def delete(self, resource_uuid):
"""Delete existing logs."""
ulogs = objects.registry.UserlogList.get_all_by_id(
self.context, resource_uuid=resource_uuid)
# Delete log files
swift = solum_swiftclient.SwiftClient(self.context)
for ulog in ulogs:
location = ulog.location
strategy = ulog.strategy
strategy_info = json.loads(ulog.strategy_info)
if strategy == 'swift':
# Delete logs from swift
try:
swift.delete_object(strategy_info['container'],
location)
except swiftexp.ClientException:
raise exc.AuthorizationFailure(
client='swift',
message="Unable to delete logs from swift.")
elif strategy == 'local':
# Delete logs from local filesystem
# This setting is exclusively used for single node deployments.
try:
os.remove(location)
except OSError:
pass
# Delete the log reference from db.
ulog.destroy(self.context)
return
|
{
"content_hash": "808b3d6058004069e38625ffb845f966",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 33.6140350877193,
"alnum_prop": 0.5913361169102297,
"repo_name": "devdattakulkarni/test-solum",
"id": "f547c40777a0f5d989a7bac060f9bfdc53d42a1a",
"size": "2501",
"binary": false,
"copies": "1",
"ref": "refs/heads/add-virt-driver",
"path": "solum/api/handlers/userlog_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1240622"
},
{
"name": "Shell",
"bytes": "82235"
}
],
"symlink_target": ""
}
|
import smtplib
import os
from django.shortcuts import render
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def contact(request):
if request.method == 'GET':
context = None
return render(request, 'contact.html', context)
if request.method == 'POST':
# Collect the submitted data and email it to administrator.
subName = request.POST['realname']
subEmail = request.POST['email']
subSubject = request.POST['subject']
subBody = request.POST['body']
msg = MIMEMultipart('alternative')
msg['Subject'] = "You have a new message on APChemHelp!"
msg['From'] = subEmail
msg['To'] = 'grantrygh@gmail.com'
text = "Name: %s \nSubject: %s \nMessage: %s" % (subName, subSubject, subBody)
part1 = MIMEText(text, 'plain')
username = os.getenv('MAND_SMTP_USER')
password = os.getenv('MAND_SMTP_PASS')
msg.attach(part1)
s = smtplib.SMTP('smtp.mandrillapp.com', 587)
s.login(username, password)
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.quit()
context = {'contactComplete': True}
return render(request, 'contact.html', context)
|
{
"content_hash": "9e636372b70343ed0f39574f41e60386",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 86,
"avg_line_length": 33.75675675675676,
"alnum_prop": 0.6212970376301041,
"repo_name": "grantrygh/apchemhelp",
"id": "be6c3568c7c89355e1d27d1b4e1e040b04a978a1",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13011"
},
{
"name": "JavaScript",
"bytes": "38491"
},
{
"name": "Python",
"bytes": "12965"
}
],
"symlink_target": ""
}
|
import copy
import os
import re
import sys
import time
from conary.deps import deps
from conary_test import recipes
from testrunner import testhelp
from rmake import compat
from rmake.build import builder
from rmake.lib import logfile
from rmake.lib import repocache
from rmake_test import resources
from rmake_test import rmakehelp
workingRecipe = """\
class TestRecipe(PackageRecipe):
name = 'testcase'
version = '1.0'
# cross requirements shouldn't matter since we're not
# crosscompiling
crossReqs = ['nonexistant:runtime']
clearBuildReqs()
def setup(r):
if Use.ssl:
r.Create('/foo', contents='foo')
else:
r.Create('/bar', contents='foo')
"""
macrosRecipe = r"""\
class TestRecipe(PackageRecipe):
name = 'testcase'
version = '1.0'
clearBuildReqs()
def setup(r):
r.macros.foo = 'ssl'
# this recipe only passes if r.macros.foo == 'readline'
# and r.macros.multi == 'line1\nline2'
if r.macros.foo == 'readline' and r.macros.multi == 'line1\nline2':
r.Create('/foo', contents='foo')
"""
failingRecipe = """\
class TestRecipe(PackageRecipe):
name = 'testcase2'
version = '1.0'
clearBuildReqs()
def setup(r):
r.Create('/foo', contents='foo')
r.Run('exit 1')
"""
failedSetupRecipe = """\
class TestRecipe(PackageRecipe):
name = 'testcase3'
version = '1.0'
clearBuildReqs()
def setup(r):
a = b
r.Create('/foo', contents='foo')
"""
failedLoadRecipe = """\
class TestRecipe(PackageRecipe):
name = 'testcase4'
version = '1.0'
a = b # NameError
clearBuildReqs()
def setup(r):
r.Create('/foo', contents='foo')
"""
failedBuildReqRecipe = """\
class TestRecipe(PackageRecipe):
name = 'testcase5'
version = '1.0'
clearBuildReqs()
buildRequires = ['bbbbbbb:devel']
def setup(r):
r.Create('/foo', contents='foo')
"""
groupRecipe = """\
class TestRecipe(GroupRecipe):
name = 'group-foo'
version = '1.0'
clearBuildReqs()
def setup(r):
# sneak in a test of r.macros.buildlabel
r.setSearchPath(r.macros.buildlabel)
r.add('simple:runtime')
r.add('other:runtime')
"""
redirectRecipe = """\
class TestRecipe(RedirectRecipe):
name = 'redirect'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addRedirect("target", '/localhost@rpl:linux')
"""
filesetRecipe = """\
class TestRecipe(FilesetRecipe):
name = 'fileset-foo'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addFile('/bin/foo', "orig", '/localhost@rpl:linux')
"""
infoRecipe = """\
class Info(GroupInfoRecipe):
name = 'info-sys'
version = '1'
clearBuildReqs()
def setup(r):
r.Group('sys', 3)
"""
derivedRecipe = """\
class TestRecipe(DerivedPackageRecipe):
name = 'testcase'
version = '1.0'
clearBuildReqs()
def setup(r):
r.Create('/bam', contents='foo')
"""
buildReqsRecipe = """\
class TestRecipe(PackageRecipe):
name = 'testcase'
version = '1.0'
clearBuildReqs()
buildRequires = [ 'foo:runtime', 'foo:runtime[cross]']
def setup(r):
r.Create('/bar', contents='foo')
"""
class BuilderTest(rmakehelp.RmakeHelper):
def testBasic(self):
# FIXME: this is really slow - ~20 seconds.
# Perhaps we need to make hooks to make this test faster?
trv = self.addComponent('testcase:source', '1.0-1', '',
[('testcase.recipe', workingRecipe)])
trv2 = self.addComponent('testcase2:source', '1.0-1', '',
[('testcase2.recipe', failingRecipe)])
trv3 = self.addComponent('testcase3:source', '1.0-1', '',
[('testcase3.recipe', failedSetupRecipe)])
trv4 = self.addComponent('testcase4:source', '1.0-1', '',
[('testcase4.recipe', failedLoadRecipe)])
trv5 = self.addComponent('testcase5:source', '1.0-1', '',
[('testcase5.recipe', failedBuildReqRecipe)])
self.openRmakeRepository()
troveList = [
(trv.getName(), trv.getVersion(), deps.parseFlavor('!ssl')),
trv2.getNameVersionFlavor(),
trv3.getNameVersionFlavor(),
trv4.getNameVersionFlavor(),
trv5.getNameVersionFlavor(),
]
db = self.openRmakeDatabase()
job = self.newJob(*troveList)
db.subscribeToJob(job)
b = builder.Builder(self.rmakeCfg, job)
self.logFilter.add()
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
try:
b.build()
except Exception:
b.worker.stopAllCommands()
raise
logFile.restoreOutput()
assert(set([x.getName() for x in b.dh.depState.getFailedTroves()])
== set([trv2.getName(), trv3.getName(), trv4.getName(),
trv5.getName()]))
repos = self.openRepository()
results = repos.findTrove(None, ('testcase', 'rmakehost@local:linux',
deps.parseFlavor('!ssl')),
self.buildCfg.flavor)
assert(len(results) == 1)
assert(results[0][2] == deps.parseFlavor('~!ssl'))
troveDict = dict((x.getName(), x)
for x in db.getJob(1).iterFailedTroves())
assert(len(troveDict) == 4)
trv2 = troveDict['testcase2:source']
failureReason = str(trv2.getFailureReason())
# remove the arch-specific flavor here, we're not testing that
failureReason = re.sub(r'\[.*\]', '[FLAVOR]', failureReason)
assert(str(failureReason) == 'Failed while building: Error building recipe testcase2:source=/localhost@rpl:linux/1.0-1[FLAVOR]: Shell command "exit 1" exited with exit code 1')
trv3 = troveDict['testcase3:source']
assert(str(trv3.getFailureReason()) == "Failed while loading recipe: global name 'b' is not defined")
trv4 = troveDict['testcase4:source']
failureReason = str(trv4.getFailureReason())
failureReason = re.sub('/tmp.*\.recipe',
'TEMP.recipe',
failureReason)
failureReason = re.sub('temp-testcase4.*\.recipe',
'testcase4.recipe',
failureReason)
errStr = '''\
Failed while loading recipe: unable to load recipe file /varTEMP.recipe:
Error in recipe file "testcase4.recipe":
Traceback (most recent call last):
File "/varTEMP.recipe", line 1, in ?
class TestRecipe(PackageRecipe):
File "/varTEMP.recipe", line 4, in TestRecipe
a = b # NameError
NameError: name 'b' is not defined
'''
if sys.version_info > (2, 5):
errStr = errStr.replace('?', '<module>')
self.assertEquals(failureReason, errStr)
trv5 = troveDict['testcase5:source']
self.assertEquals(str(trv5.getFailureReason()),
'Could not satisfy build requirements: bbbbbbb:devel=[]')
assert(str(b.job.getFailureReason()) == """\
Failed while building: Build job had failures:
* testcase2:source: Error building recipe testcase2:source=/localhost@rpl:linux/1.0-1[%s]: Shell command "exit 1" exited with exit code 1
* testcase3:source: Failed while loading recipe
* testcase4:source: Failed while loading recipe
* testcase5:source: Could not satisfy build requirements: bbbbbbb:devel=[]
""" % self.getArchFlavor())
def testBuildReqs(self):
self.addComponent('foo:runtime[!cross]')
self.addComponent('foo:runtime=2[cross]', filePrimer=1)
trv = self.addComponent('testcase:source', '1.0-1', '',
[('testcase.recipe', buildReqsRecipe)])
self.openRmakeRepository()
db = self.openRmakeDatabase()
job = self.newJob(trv.getNameVersionFlavor())
db.subscribeToJob(job)
b = builder.Builder(self.rmakeCfg, job)
self.logFilter.add()
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
try:
b.build()
except Exception:
b.worker.stopAllCommands()
raise
logFile.restoreOutput()
assert b.job.isBuilt(), b.job.troves.values()[0].getFailureReason().getTraceback()
trv = self.findAndGetTrove('testcase=rmakehost@local:linux')
buildReqs = trv.getBuildRequirements()
assert len(buildReqs) == 2, "Got cross req twice!"
def testLoadInstalled(self):
# Test to ensure that when we are building package a
# and package b is installed in that root, the version of
# package b installed in that root is loaded and read for
# class info.
loadInstalledRecipe = """\
loadInstalled('buildreq')
class TestRecipe(PackageRecipe):
name = 'loadreq'
version = '1.0'
clearBuildReqs()
buildRequires = ['buildreq:runtime']
def setup(r):
r.Create(BuildReq.filePath)
"""
buildreqRecipe = """\
class BuildReq(PackageRecipe):
name = 'buildreq'
version = '1.0'
filePath = '/foo1.0'
clearBuildReqs()
def setup(r):
r.Create('/foo', contents='foo')
"""
buildreqRecipe2 = buildreqRecipe.replace('1.0', '2.0')
br = self.addComponent('buildreq:source', '1.0-1', '',
[('buildreq.recipe', buildreqRecipe)])
br2 = self.addComponent('buildreq:source', '2.0-1', '',
[('buildreq.recipe', buildreqRecipe2)])
li = self.addComponent('loadreq:source', '2.0-1', '',
[('loadreq.recipe', loadInstalledRecipe)])
self.addComponent('buildreq:runtime', '1.0-1-1')
self.addCollection('buildreq', '1.0-1-1', [':runtime'])
job = self.buildTroves(li.getNameVersionFlavor())
assert(job.isBuilt())
liBuilt = job.getTrove(*(job.getTrovesByName('loadreq')[0]))
# get built version info
v, f = liBuilt.iterBuiltTroves().next()[1:]
repos = repocache.CachingTroveSource(self.openRepository(),
self.rmakeCfg.getCacheDir())
trv = repos.getTrove('loadreq:runtime', v, f, withFiles=True)
files = [x[1] for x in trv.iterFileList()]
# make sure that we loaded version 1.0 when building, even though
# version 2.0 was available.
assert('/foo1.0' in files)
def testCopyInPolicy(self):
trv = self.addComponent('testcase:source', '1.0-1', '',
[('testcase.recipe', workingRecipe)])
self.openRmakeRepository()
troveList = [ trv.getNameVersionFlavor() ]
db = self.openRmakeDatabase()
buildCfg = copy.deepcopy(self.buildCfg)
buildCfg.strictMode = False
buildCfg.copyInConary = True
fakePolicyPath = resources.get_archive('policy')
buildCfg.policyDirs = buildCfg.policyDirs + [ fakePolicyPath ]
job = self.newJob(buildConfig=buildCfg, *troveList)
b = builder.Builder(self.rmakeCfg, job)
self.logFilter.add()
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
b.build()
logFile.restoreOutput()
trove = b.job.troves.values()[0]
assert(str(trove.getFailureReason()).endswith(
'This fake policy always breaks.'))
def testGroupRecipe(self):
if compat.ConaryVersion().conaryVersion[0:2] == [1,2]:
raise testhelp.SkipTestException('test fails on 1.2')
repos = self.openRmakeRepository()
db = self.openRmakeDatabase()
self.buildCfg.shortenGroupFlavors = True
self.buildCfg.setSection('foo') # add context foo
self.buildCfg.configLine('buildFlavor desktop is:x86')
self.buildCfg.setSection('bar')
self.buildCfg.configLine('buildFlavor !desktop is:x86')
simple = self.addComponent('simple:runtime', '1.0-1-1', '')
other1 = self.addComponent('other:runtime', '1.0-1-1', '!desktop',
filePrimer=1)
other2 = self.addComponent('other:runtime', '1.0-1-1', 'desktop',
filePrimer=1)
# Prevent the desktop flag from being pre-filtered out
recipe = groupRecipe + '\n if Use.desktop: pass\n'
trv = self.addComponent('group-foo:source',
'/localhost@rpl:linux//rmakehost@local:linux/1:1.0-1',
[('group-foo.recipe', recipe)])
troveList = [
trv.getNameVersionFlavor() + ('foo',),
trv.getNameVersionFlavor() + ('bar',),
]
job = self.newJob(*troveList)
b = builder.Builder(self.rmakeCfg, job)
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
b.build()
logFile.restoreOutput()
assert b.job.isBuilt(), str(b.job.getFailureReason())
assert len(b.job.getBuiltTroveList()) == 2, b.job.getBuiltTroveList()
for tup in b.job.getBuiltTroveList():
groupTrove = repos.getTrove(*tup)
# this is just a very basic test of builder -> group build.
# tests of the group cook code's ability to include the right
# version in particular cases should be in cooktest.py
if '!desktop' in str(groupTrove.getFlavor()):
other = other1
else:
other = other2
self.assertEqual(sorted(groupTrove.iterTroveList(strongRefs=True)),
[other.getNameVersionFlavor(),
simple.getNameVersionFlavor()])
def testRedirectRecipe(self):
self.openRmakeRepository()
db = self.openRmakeDatabase()
self.addComponent('redirect:run', '1')
self.addCollection('redirect', '1', [':run'])
self.addComponent('target:run', '1')
self.addCollection('target', '1', [':run'])
# simulate building a checkout
trv = self.addComponent(
'redirect:source=/localhost@rpl:linux//rmakehost@local:linux/1.0-1',
[('redirect.recipe', redirectRecipe)])
os.chdir(self.workDir)
troveList = [ trv.getNameVersionFlavor() ]
job = self.newJob(*troveList)
b = builder.Builder(self.rmakeCfg, job)
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
b.build()
logFile.restoreOutput()
assert b.job.isBuilt(), b.job.getFailureReason()
def testFilesetRecipe(self):
self.openRmakeRepository()
self.addComponent('orig:run', '1', ['/bin/foo'])
self.addCollection('orig', '1', [':run'])
trv = self.addComponent('fileset-foo:source', '1.0-1', '',
[('fileset-foo.recipe', filesetRecipe)])
troveList = [ trv.getNameVersionFlavor() ]
job = self.newJob(*troveList)
b = builder.Builder(self.rmakeCfg, job)
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
b.build()
logFile.restoreOutput()
assert b.job.isBuilt(), b.job.troves.values()[0].getFailureReason()
def testInfoRecipe(self):
self.openRmakeRepository()
db = self.openRmakeDatabase()
trv = self.addComponent('info-sys:source', '1.0-1', '',
[('info-sys.recipe', infoRecipe)])
troveList = [ trv.getNameVersionFlavor() ]
job = self.newJob(*troveList)
b = builder.Builder(self.rmakeCfg, job)
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
b.build()
logFile.restoreOutput()
assert(b.job.isBuilt())
def testMacrosRecipe(self):
self.openRmakeRepository()
db = self.openRmakeDatabase()
trv = self.addComponent('testcase:source', '1.0-1', '',
[('testcase.recipe', macrosRecipe)])
self.buildCfg.configLine('macros foo readline')
self.buildCfg.macros['multi'] = 'line1\nline2'
job = self.newJob(trv)
b = builder.Builder(self.rmakeCfg, job)
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
b.build()
logFile.restoreOutput()
assert(b.job.isBuilt())
# this recipe will only work if r.macros.foo has been successfully
# set to readline
def testDerivedRecipe(self):
self.openRmakeRepository()
db = self.openRmakeDatabase()
trv = self.addComponent('testcase:source', '1.0-1', '',
[('testcase.recipe', workingRecipe)])
self.addComponent('testcase:run', '1.0-1-1',
['/foo', '/bar'])
self.addCollection('testcase', '1.0-1-1', [':run'])
trv = self.addComponent('testcase:source',
'/localhost@rpl:linux//branch/1.0-1',
[('testcase.recipe', workingRecipe)])
trv = self.addComponent('testcase:source',
'/localhost@rpl:linux//branch/1.0-1.1',
[('testcase.recipe', derivedRecipe)])
troveList = [ trv.getNameVersionFlavor() ]
job = self.newJob(*troveList)
b = builder.Builder(self.rmakeCfg, job)
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
b.build()
logFile.restoreOutput()
assert(b.job.isBuilt())
def testDerivedRecipeCheckout(self):
self.openRmakeRepository()
db = self.openRmakeDatabase()
trv = self.addComponent('testcase:source', '1.0-1', '',
[('testcase.recipe', workingRecipe)])
self.addComponent('testcase:run', '1.0-1-1',
['/foo', '/bar'])
self.addCollection('testcase', '1.0-1-1', [':run'])
trv = self.addComponent('testcase:source',
'/localhost@rpl:linux//branch/1.0-1',
[('testcase.recipe', workingRecipe)])
trv = self.addComponent('testcase:source',
'/localhost@rpl:linux//branch/1.0-1.1',
[('testcase.recipe', derivedRecipe)])
os.chdir(self.workDir)
self.checkout('testcase=:branch')
os.chdir('testcase')
self.writeFile('testcase.recipe', '#comment\n' + derivedRecipe)
helper = self.getRmakeHelper()
server = helper.client.uri.server
jobId = self.discardOutput(helper.buildTroves, ['testcase.recipe'])
job = helper.getJob(jobId)
while not job.isFinished():
server._serveLoopHook()
job = helper.getJob(jobId)
import time
time.sleep(.5)
assert(job.isBuilt()), job.getFailureReason()
server._halt = 1
server._serveLoopHook()
time.sleep(2)
def testMultipleDelayedRecipes(self):
self.openRmakeRepository()
db = self.openRmakeDatabase()
trv = self.addComponent('redirect:source', '1.0-1', '',
[('redirect.recipe', redirectRecipe)])
trv2 = self.addComponent('fileset-foo:source', '1.0-1', '',
[('fileset-foo.recipe', filesetRecipe)])
troveList = [ trv.getNameVersionFlavor(), trv2.getNameVersionFlavor() ]
job = self.newJob(*troveList)
db.subscribeToJob(job)
b = builder.Builder(self.rmakeCfg, job)
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
b.build()
logFile.restoreOutput()
assert(b.job.isFailed())
failedTroves = list(b.job.iterFailedTroves())
assert(len(failedTroves) == 2)
group = [ x for x in failedTroves if x.getName() == trv.getName()][0]
assert(str(group.getFailureReason()) == 'Trove failed sanity check: redirect and fileset packages must be alone in their own job')
assert(str(b.job.getFailureReason()) == 'Job failed sanity check: redirect and fileset packages must be alone in their own job: fileset-foo, redirect')
def testPrebuiltBinaries(self):
trv = self.addComponent('testcase:source', '1.0-1', '',
[('testcase.recipe', workingRecipe)])
trv2 = self.addComponent('simple:source', '1.0-2',
[('simple.recipe', recipes.simpleRecipe)])
binTrv = self.addCollection('testcase=1.0-1-1[ssl]', [':runtime'],
createComps=True)
binTrv2 = self.addCollection('testcase=1.0-1-1[!ssl]', [':runtime'],
createComps=True)
binTrv3 = self.addCollection('testcase=1.0-1-1', [':runtime'],
createComps=True)
binTrv4 = self.addCollection('simple=1.0-1-1', [':runtime'])
job = self.newJob(trv, trv2)
prebuilt = [ x.getNameVersionFlavor() for x in
(binTrv, binTrv2, binTrv3, binTrv4)]
job.getMainConfig().prebuiltBinaries = prebuilt
b = builder.Builder(self.rmakeCfg, job)
b.initializeBuild()
for trv in job.iterTroves():
if trv.getName().split(':')[0] in ['testcase']:
assert(trv.isPrebuilt())
assert(trv.prebuiltIsSourceMatch())
else:
assert(trv.isPrebuilt())
assert(not trv.prebuiltIsSourceMatch())
job = self.newJob(trv2)
job.getMainConfig().prebuiltBinaries = prebuilt
b = builder.Builder(self.rmakeCfg, job)
b.initializeBuild()
for trv in job.iterTroves():
assert(trv.isPrebuilt())
assert(not trv.prebuiltIsSourceMatch())
binTrv5 = self.addCollection('simple=1.0-2-1', [':runtime'],
loadedReqs=[binTrv.getNameVersionFlavor()])
prebuilt.append(binTrv5.getNameVersionFlavor())
job.getMainConfig().prebuiltBinaries = prebuilt
b = builder.Builder(self.rmakeCfg, job)
b.initializeBuild()
for trv in job.iterTroves():
assert(trv.isPrebuilt())
assert(not trv.prebuiltIsSourceMatch())
job.getMainConfig().ignoreAllRebuildDeps = True
b = builder.Builder(self.rmakeCfg, job)
b.initializeBuild()
for trv in job.iterTroves():
assert(trv.isPrebuilt())
def testPrebuiltGroups(self):
"""
Groups should never be pre-built.
@tests: RMK-903
"""
self.addComponent('foo:runtime')
groupSource = self.addComponent('group-foo:source=1.0-1',
[('group-foo.recipe', groupRecipe)])
groupTrove = self.addCollection('group-foo=1.0-1-1',
['foo:runtime'])
job = self.newJob(groupSource)
job.getMainConfig().prebuiltBinaries = [
groupTrove.getNameVersionFlavor()]
build = builder.Builder(self.rmakeCfg, job)
build.initializeBuild()
for buildTrove in job.iterTroves():
self.failIf(buildTrove.isPrebuilt(), "Group trove is pre-built")
def testBuildImages(self):
rbuildServer = self.startMockRbuilder()
oldSleep = time.sleep
self.mock(time, 'sleep', lambda x: oldSleep(.1))
self.addComponent('foo:run')
trv = self.addCollection('group-foo', ['foo:run'])
db = self.openRmakeDatabase()
job = self.newJob()
trv = self.newImageTrove(job.jobId, productName='product', imageType='imageType',
imageOptions={},
*trv.getNameVersionFlavor())
trv.setConfig(self.buildCfg)
job.addBuildTrove(trv)
job.setBuildTroves([trv])
b = builder.Builder(self.rmakeCfg, job)
self.logFilter.add()
logFile = logfile.LogFile(self.workDir + '/buildlog')
logFile.redirectOutput()
try:
b.build()
except Exception:
b.worker.stopAllCommands()
raise
log = db.getTroveBuildLog(1, job.troves.values()[0].getNameVersionFlavor(), 0)[1]
expectedLog = '''\
0: Working: 51
0: Working: 101
0: Working: 151
0: Finished.
'''
assert(expectedLog in log)
def testBuildImagesMultinode(self):
raise testhelp.SkipTestException()
rbuildServer = self.startMockRbuilder()
oldSleep = time.sleep
self.mock(time, 'sleep', lambda x: oldSleep(.1))
self.addComponent('foo:run')
trv = self.addCollection('group-foo', ['foo:run'])
rmakeClient = self.startRmakeServer(multinode=True)
self.startNode()
helper = self.getRmakeHelper(rmakeClient.uri)
job = helper.createImageJob('project', [('group-foo', 'imageType', {})])
jobId = helper.buildJob(job)
helper.waitForJob(jobId)
db = self.openRmakeDatabase()
trove = job.troves.values()[0]
nvfc = list(trove.getNameVersionFlavor()) + [trove.getContext()]
log = db.getTroveBuildLog(1, nvfc, 0)[1]
expectedLog = '''\
0: Working: 51
0: Working: 101
0: Working: 151
0: Finished.
'''
assert(expectedLog in log)
def testBuildImagesWithBuildName(self):
raise testhelp.SkipTestException()
rbuildServer = self.startMockRbuilder()
oldSleep = time.sleep
self.mock(time, 'sleep', lambda x: oldSleep(.1))
self.addComponent('foo:run')
trv = self.addCollection('group-foo', ['foo:run'])
rmakeClient = self.startRmakeServer(multinode=True)
self.startNode()
helper = self.getRmakeHelper(rmakeClient.uri)
job = helper.createImageJob('project', [
('group-foo', 'imageType', {}, 'Image Name'),
('group-foo', 'imageType', {}, 'Image Name'),
('group-foo', 'imageType', {}),
('group-foo', 'imageType', {})
])
self.assertEquals(
[x[3] for x in job.troves.keys()],
['Image_3', 'Image_2', 'Image_Name_(1)', 'Image_Name'])
jobId = helper.buildJob(job)
helper.waitForJob(jobId)
db = self.openRmakeDatabase()
expectedLog = '''\
0: Working: 51
0: Working: 101
0: Working: 151
0: Finished.
'''
for trove in job.troves.values():
nvfc = list(trove.getNameVersionFlavor()) + [trove.getContext()]
log = db.getTroveBuildLog(1, nvfc, 0)[1]
assert(expectedLog in log)
|
{
"content_hash": "a204fca364bbca2e768f95d8f5ef86a8",
"timestamp": "",
"source": "github",
"line_count": 712,
"max_line_length": 184,
"avg_line_length": 37.77949438202247,
"alnum_prop": 0.5796498011078479,
"repo_name": "fedora-conary/rmake-2",
"id": "b34e9321a15f6e71db069953ddf2f1f48dbcfe7d",
"size": "27486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rmake_test/functional_test/buildtest/buildertest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35796"
},
{
"name": "C++",
"bytes": "3953"
},
{
"name": "Python",
"bytes": "1682020"
},
{
"name": "Shell",
"bytes": "12415"
}
],
"symlink_target": ""
}
|
"""
=======================
Triggered File Reader
=======================
This component accepts a filepath as an "inbox" message, and outputs the
contents of that file to "outbox". All requests are processed sequentially.
This component does not terminate.
"""
from Axon.Component import component
class TriggeredFileReader(component):
"""\
TriggeredFileReader() -> component that creates and writes files
"""
Inboxes = { "inbox" : "filepaths to read",
"control" : "UNUSED"
}
Outboxes = { "outbox" : "file contents, 1 per message",
"signal" : "UNUSED"
}
def __init__(self):
super(TriggeredFileReader, self).__init__()
def readFile(self, filename):
"""Read data out of a file"""
file = open(filename, "rb", 0)
data = file.read()
file.close()
return data
def main(self):
"""Main loop"""
while 1:
yield 1
while self.dataReady("inbox"):
command = self.recv("inbox")
#print "Read file " + command
self.send(self.readFile(command), "outbox")
self.pause()
|
{
"content_hash": "bc3974b1f961733d746c447507ce8f20",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 26.5,
"alnum_prop": 0.5523156089193825,
"repo_name": "bbc/kamaelia",
"id": "aef9406bec21766b196ca7d6acd23226e29dcc25",
"size": "2073",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/RJL/Util/TriggeredFileReader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
}
|
try:
import djsqla_query_operations
except ImportError:
djsqla_query_operations = None
|
{
"content_hash": "564ba0062e9efa73c8f07c698be76572",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 34,
"avg_line_length": 23.75,
"alnum_prop": 0.7578947368421053,
"repo_name": "rjusher/djsqla-rest",
"id": "a2adefa8c1a90a53163c5bf72b33755811e58209",
"size": "119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djsqla_rest/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63301"
}
],
"symlink_target": ""
}
|
"""
The PyBuilder execution module.
Deals with the execution of a PyBuilder process by
running tasks, actions and initializers in the correct
order regarding dependencies.
"""
import inspect
import re
import types
import copy
from pybuilder.errors import (CircularTaskDependencyException,
DependenciesNotResolvedException,
InvalidNameException,
MissingTaskDependencyException,
MissingActionDependencyException,
NoSuchTaskException)
from pybuilder.utils import as_list, Timer
from pybuilder.graph_utils import Graph, GraphHasCycles
def as_task_name_list(mixed):
result = []
for item in as_list(mixed):
if isinstance(item, types.FunctionType):
result.append(item.__name__)
else:
result.append(str(item))
return result
class Executable(object):
NAME_PATTERN = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]+$")
def __init__(self, name, callable, description=""):
if not Executable.NAME_PATTERN.match(name):
raise InvalidNameException(name)
self._name = name
self.description = description
self.callable = callable
if hasattr(callable, "__module__"):
self.source = callable.__module__
else:
self.source = "n/a"
if isinstance(self.callable, types.FunctionType):
self.parameters = inspect.getargspec(self.callable).args
else:
raise TypeError("Don't know how to handle callable %s" % callable)
@property
def name(self):
return self._name
def execute(self, argument_dict):
arguments = []
for parameter in self.parameters:
if parameter not in argument_dict:
raise ValueError("Invalid parameter '%s' for %s %s" % (parameter, self.__class__.__name__, self.name))
arguments.append(argument_dict[parameter])
self.callable(*arguments)
class Action(Executable):
def __init__(self, name, callable, before=None, after=None, description="", only_once=False):
super(Action, self).__init__(name, callable, description)
self.execute_before = as_task_name_list(before)
self.execute_after = as_task_name_list(after)
self.only_once = only_once
class Task(object):
def __init__(self, name, callable, dependencies=None, description=""):
self.name = name
self.executables = [Executable(name, callable, description)]
self.dependencies = as_task_name_list(dependencies)
self.description = [description]
def __eq__(self, other):
if isinstance(other, Task):
return self.name == other.name
return False
def __hash__(self):
return 9 * hash(self.name)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, Task):
return self.name < other.name
return self.name < other
def extend(self, task):
self.executables += task.executables
self.dependencies += task.dependencies
self.description += task.description
def execute(self, logger, argument_dict):
for executable in self.executables:
logger.debug("Executing subtask from %s", executable.source)
executable.execute(argument_dict)
class Initializer(Executable):
def __init__(self, name, callable, environments=None, description=""):
super(Initializer, self).__init__(name, callable, description)
self.environments = environments
def is_applicable(self, environments=None):
if not self.environments:
return True
for environment in as_list(environments):
if environment in self.environments:
return True
class TaskExecutionSummary(object):
def __init__(self, task, number_of_actions, execution_time):
self.task = task
self.number_of_actions = number_of_actions
self.execution_time = execution_time
class ExecutionManager(object):
def __init__(self, logger):
self.logger = logger
self._tasks = {}
self._task_dependencies = {}
self._actions = {}
self._execute_before = {}
self._execute_after = {}
self._initializers = []
self._dependencies_resolved = False
self._actions_executed = []
self._tasks_executed = []
self._current_task = None
@property
def initializers(self):
return self._initializers
@property
def tasks(self):
return list(self._tasks.values())
@property
def task_names(self):
return sorted(self._tasks.keys())
def register_initializer(self, initializer):
self.logger.debug("Registering initializer '%s'", initializer.name)
self._initializers.append(initializer)
def register_action(self, action):
self.logger.debug("Registering action '%s'", action.name)
self._actions[action.name] = action
def register_task(self, *tasks):
for task in tasks:
self.logger.debug("Registering task '%s'", task.name)
if task.name in self._tasks:
self._tasks[task.name].extend(task)
else:
self._tasks[task.name] = task
def execute_initializers(self, environments=None, **keyword_arguments):
for initializer in self._initializers:
if not initializer.is_applicable(environments):
message = "Not going to execute initializer '%s' from '%s' as environments do not match."
self.logger.debug(message, initializer.name, initializer.source)
else:
self.logger.debug("Executing initializer '%s' from '%s'",
initializer.name, initializer.source)
initializer.execute(keyword_arguments)
def assert_dependencies_resolved(self):
if not self._dependencies_resolved:
raise DependenciesNotResolvedException()
def execute_task(self, task, **keyword_arguments):
self.assert_dependencies_resolved()
self.logger.debug("Executing task '%s'",
task.name)
timer = Timer.start()
number_of_actions = 0
self._current_task = task
for action in self._execute_before[task.name]:
if self.execute_action(action, keyword_arguments):
number_of_actions += 1
task.execute(self.logger, keyword_arguments)
for action in self._execute_after[task.name]:
if self.execute_action(action, keyword_arguments):
number_of_actions += 1
self._current_task = None
if task not in self._tasks_executed:
self._tasks_executed.append(task)
timer.stop()
return TaskExecutionSummary(task.name, number_of_actions, timer.get_millis())
def execute_action(self, action, arguments):
if action.only_once and action in self._actions_executed:
message = "Action %s has been executed before and is marked as only_once, so will not be executed again"
self.logger.debug(message, action.name)
return False
self.logger.debug("Executing action '%s' from '%s' before task", action.name, action.source)
action.execute(arguments)
self._actions_executed.append(action)
return True
def execute_execution_plan(self, execution_plan, **keyword_arguments):
self.assert_dependencies_resolved()
summaries = []
for task in execution_plan:
summaries.append(self.execute_task(task, **keyword_arguments))
return summaries
def get_task(self, name):
if not self.has_task(name):
raise NoSuchTaskException(name)
return self._tasks[name]
def has_task(self, name):
return name in self._tasks
def _collect_transitive_tasks(self, task, visited=None):
if not visited:
visited = set()
if task in visited:
return visited
visited.add(task)
dependencies = [self.get_task(dependency_name) for dependency_name in task.dependencies]
for dependency in dependencies:
self._collect_transitive_tasks(dependency, visited)
return visited
def collect_all_transitive_tasks(self, task_names):
all_tasks = set()
for task_name in task_names:
all_tasks.update(self._collect_transitive_tasks(self.get_task(task_name)))
return all_tasks
def build_execution_plan(self, task_names):
self.assert_dependencies_resolved()
execution_plan = []
dependency_edges = {}
for task in self.collect_all_transitive_tasks(as_list(task_names)):
dependency_edges[task.name] = task.dependencies
try:
Graph(dependency_edges).assert_no_cycles_present()
except GraphHasCycles as cycles:
raise CircularTaskDependencyException(str(cycles))
for task_name in as_list(task_names):
self.enqueue_task(execution_plan, task_name)
return execution_plan
def build_shortest_execution_plan(self, task_names):
"""
Finds the shortest execution plan taking into the account tasks already executed
This is useful when you want to execute tasks dynamically without repeating pre-requisite
tasks you've already executed
"""
execution_plan = self.build_execution_plan(task_names)
shortest_plan = copy.copy(execution_plan)
for executed_task in self._tasks_executed:
candidate_task = shortest_plan[0]
if candidate_task.name not in task_names and candidate_task == executed_task:
shortest_plan.pop(0)
else:
break
if self._current_task and self._current_task in shortest_plan:
raise CircularTaskDependencyException("Task '%s' attempted to invoke tasks %s, "
"resulting in plan %s, creating circular dependency" %
(self._current_task, task_names, shortest_plan))
return shortest_plan
def enqueue_task(self, execution_plan, task_name):
task = self.get_task(task_name)
if task in execution_plan:
return
for dependency in self._task_dependencies[task.name]:
self.enqueue_task(execution_plan, dependency.name)
execution_plan.append(task)
def resolve_dependencies(self):
for task in self._tasks.values():
self._execute_before[task.name] = []
self._execute_after[task.name] = []
self._task_dependencies[task.name] = []
for d in task.dependencies:
if not self.has_task(d):
raise MissingTaskDependencyException(task.name, d)
self._task_dependencies[task.name].append(self.get_task(d))
for action in self._actions.values():
for task in action.execute_before:
if not self.has_task(task):
raise MissingActionDependencyException(action.name, task)
self._execute_before[task].append(action)
for task in action.execute_after:
if not self.has_task(task):
raise MissingActionDependencyException(action.name, task)
self._execute_after[task].append(action)
self._dependencies_resolved = True
|
{
"content_hash": "8caf0434e00f153bce09d1815f5681cc",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 118,
"avg_line_length": 35.01497005988024,
"alnum_prop": 0.6108593415989739,
"repo_name": "Designist/pybuilder",
"id": "087487b2d1d42fa291df68e0e13f4cb917da8954",
"size": "12364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/pybuilder/execution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "2886"
},
{
"name": "Python",
"bytes": "516058"
}
],
"symlink_target": ""
}
|
import clusto
from clusto.drivers.base import ResourceManager
from clusto.exceptions import ResourceException
class SimpleNameManagerException(ResourceException):
pass
class SimpleNameManager(ResourceManager):
"""
SimpleNameManager - manage the generation of a names with a common
prefix and an incrementing integer component.
e.g foo001, foo002, foo003, etc.
"""
_driver_name = "simplenamemanager"
_properties = {'basename':'',
'digits':2,
'next':1,
'leadingZeros':int(True)}
_record_allocations = True
_attr_name = 'simplename'
def allocator(self, thing=None):
clusto.flush()
counter = clusto.Counter.get(self.entity, 'next', default=self.next)
num = str(counter.value)
if self.leadingZeros:
num = num.rjust(self.digits, '0')
if len(num) > self.digits:
raise SimpleNameManagerException("Out of digits for the integer. "
"Max of %d digits and we're at "
"number %s." % (self.digits, num))
nextname = self.basename + num
counter.next()
return (nextname, True)
class SimpleEntityNameManager(SimpleNameManager):
_driver_name = "simpleentitynamemanager"
_record_allocations = False
def allocate(self, clustotype, resource=None, number=True):
"""allocates a resource element to the given thing.
resource - is passed as an argument it will be checked
before assignment.
refattr - the attribute name on the entity that will refer back
this resource manager.
returns the resource that was either passed in and processed
or generated.
"""
if not isinstance(clustotype, type):
raise TypeError("thing is not a Driver class")
try:
clusto.begin_transaction()
if not resource:
name, num = self.allocator()
newobj = clustotype(name)
else:
name = resource
newobj = clustotype(resource)
super(SimpleEntityNameManager, self).allocate(newobj, name)
clusto.commit()
except Exception, x:
clusto.rollback_transaction()
raise
return newobj
def deallocate(self, thing, resource=None, number=True):
raise Exception("can't deallocate an entity name, delete the entity instead.")
|
{
"content_hash": "0fefd0c21198f23b767ebadebd55f61b",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 86,
"avg_line_length": 26.90721649484536,
"alnum_prop": 0.5812260536398467,
"repo_name": "motivator/clusto",
"id": "8f0fa2126774e465560ac3f8c92536f45970d079",
"size": "2610",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/clusto/drivers/resourcemanagers/simplenamemanager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1496"
},
{
"name": "HTML",
"bytes": "5677"
},
{
"name": "Python",
"bytes": "346861"
}
],
"symlink_target": ""
}
|
import mythboxtest
import unittest2 as unittest
from mockito import Mock, when, any
from mythbox.feeds import TwitterFeed, FeedHose
log = mythboxtest.getLogger('mythbox.unittest')
class FeedHoseTest(unittest.TestCase):
def test_getLatestEntries_None(self):
# Setup
settings = Mock()
when(settings).get(any()).thenReturn('blah')
feedHose = FeedHose(settings=settings, bus=Mock())
# Test
entries = feedHose.getLatestEntries()
# Verify
self.assertTrue(len(entries) == 0)
def test_getLatestEntries_Sorted_from_newest_to_oldest(self):
pass
class TwitterFeedTest(unittest.TestCase):
def test_getEntries(self):
feed = TwitterFeed('mythboxfeed', sinceDays=99999)
s = feed.getEntries()
log.debug('feed text = %s' % s)
self.assertTrue(s)
|
{
"content_hash": "a1b556ccf7c670be1ee263376cf44232",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 65,
"avg_line_length": 25.6,
"alnum_prop": 0.6328125,
"repo_name": "GetSomeBlocks/Score_Soccer",
"id": "11fed84d3eb300625e3efaacf44a5baa061d3537",
"size": "1712",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "resources/test/mythboxtest/test_feeds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "930"
},
{
"name": "C",
"bytes": "293000"
},
{
"name": "C#",
"bytes": "9664"
},
{
"name": "CSS",
"bytes": "24716"
},
{
"name": "D",
"bytes": "542"
},
{
"name": "HTML",
"bytes": "374176"
},
{
"name": "Java",
"bytes": "206"
},
{
"name": "Objective-C",
"bytes": "9421"
},
{
"name": "Python",
"bytes": "8744725"
},
{
"name": "Ruby",
"bytes": "6773"
},
{
"name": "Shell",
"bytes": "13600"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='nirvaris-dictionary',
version='1.9.8',
packages=['dictionary'],
include_package_data=True,
license='MIT License', # example license
description='A simple Django app for post and comments on the website with some meta-tags.',
long_description=README,
url='https://github.com/nirvaris/nirvaris-dictionary',
author='Nirvaris',
author_email='contact@nirvaris.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
|
{
"content_hash": "e4387a424bc6bb0bf960a0bb86de2809",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 96,
"avg_line_length": 35.05882352941177,
"alnum_prop": 0.6484899328859061,
"repo_name": "nirvaris/nirvaris-dictionary",
"id": "0f9d56dd724f8fbb1c8e081efffd32d57da48c57",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4056"
},
{
"name": "HTML",
"bytes": "19724"
},
{
"name": "JavaScript",
"bytes": "882"
},
{
"name": "Python",
"bytes": "63545"
}
],
"symlink_target": ""
}
|
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
Only NetCDF3 is supported here; for NetCDF4 see
`netCDF4-python <http://unidata.github.io/netcdf4-python/>`__,
which has a similar API.
"""
# TODO:
# * properly implement ``_FillValue``.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file', 'netcdf_variable']
import warnings
import weakref
from operator import mul
from platform import python_implementation
import mmap as mm
import numpy as np
from numpy import frombuffer, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
IS_PYPY = python_implementation() == 'PyPy'
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
FILL_BYTE = b'\x81'
FILL_CHAR = b'\x00'
FILL_SHORT = b'\x80\x01'
FILL_INT = b'\x80\x00\x00\x01'
FILL_FLOAT = b'\x7C\xF0\x00\x00'
FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
FILLMAP = {NC_BYTE: FILL_BYTE,
NC_CHAR: FILL_CHAR,
NC_SHORT: FILL_SHORT,
NC_INT: FILL_INT,
NC_FLOAT: FILL_FLOAT,
NC_DOUBLE: FILL_DOUBLE}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file:
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w', 'a'}, optional
read-write-append mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object. Note that when mmap is in use, data arrays
returned refer directly to the mmapped data on disk, and the
file cannot be closed as long as references to it exist.
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_introduction.html#select_format>`__
for more info.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<https://www.unidata.ucar.edu/software/netcdf/guide_toc.html>`__. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Note that when `netcdf_file` is used to open a file with mmap=True
(default for read-only), arrays returned by it refer to data
directly on the disk. The file should not be closed, and cannot be cleanly
closed when asked, if such arrays are alive. You may want to copy data arrays
obtained from mmapped Netcdf file if they are to be processed after the file
is closed, see the example below.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf_file
>>> import numpy as np
>>> f = netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``arange(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf_file
>>> f = netcdf_file('simple.nc', 'r')
>>> print(f.history)
b'Created for a test'
>>> time = f.variables['time']
>>> print(time.units)
b'days since 2008-01-01'
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
NetCDF files, when opened read-only, return arrays that refer
directly to memory-mapped data on disk:
>>> data = time[:]
If the data is to be processed after the file is closed, it needs
to be copied to main memory:
>>> data = time[:].copy()
>>> f.close()
>>> data.mean()
4.5
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf_file
>>> with netcdf_file('simple.nc', 'r') as f:
... print(f.history)
b'Created for a test'
"""
def __init__(self, filename, mode='r', mmap=None, version=1,
maskandscale=False):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if mode not in 'rwa':
raise ValueError("Mode must be either 'r', 'w' or 'a'.")
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
omode = 'r+' if mode == 'a' else mode
self.fp = open(self.filename, '%sb' % omode)
if mmap is None:
# Mmapped files on PyPy cannot be usually closed
# before the GC runs, so it's better to use mmap=False
# as the default.
mmap = (not IS_PYPY)
if mode != 'r':
# Cannot read write-only files
mmap = False
self.use_mmap = mmap
self.mode = mode
self.version_byte = version
self.maskandscale = maskandscale
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self._mm = None
self._mm_buf = None
if self.use_mmap:
self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
self._attributes = {}
if mode in 'ra':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if hasattr(self, 'fp') and not self.fp.closed:
try:
self.flush()
finally:
self.variables = {}
if self._mm_buf is not None:
ref = weakref.ref(self._mm_buf)
self._mm_buf = None
if ref() is None:
# self._mm_buf is gc'd, and we can close the mmap
self._mm.close()
else:
# we cannot close self._mm, since self._mm_buf is
# alive and there may still be arrays referring to it
warnings.warn((
"Cannot close a netcdf_file opened with mmap=True, when "
"netcdf_variables or arrays referring to its data still exist. "
"All data arrays obtained from such files refer directly to "
"data on disk, and must be copied before the file can be cleanly "
"closed. (See netcdf_file docstring for more information on mmap.)"
), category=RuntimeWarning)
self._mm = None
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
if length is None and self._dims:
raise ValueError("Only first dimension may be unlimited!")
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for NumPy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions,
maskandscale=self.maskandscale)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode in 'wa':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tobytes())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_att_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variable names non-recs first, then recs.
def sortkey(n):
v = self.variables[n]
if v.isrec:
return (-1,)
return v._shape
variables = sorted(self.variables, key=sortkey, reverse=True)
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(nc_type)
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tobytes())
count = var.data.size * var.data.itemsize
self._write_var_padding(var, var._vsize - count)
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
var.data.resize(shape)
except ValueError:
var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tobytes())
# Padding
count = rec.size * rec.itemsize
self._write_var_padding(var, var._vsize - count)
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_var_padding(self, var, size):
encoded_fill_value = var._get_encoded_fill_value()
num_fills = size // len(encoded_fill_value)
self.fp.write(encoded_fill_value * num_fills)
def _write_att_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)]
# bytes index into scalars in py3k. Check for "string" types
if isinstance(values, (str, bytes)):
sample = values
else:
try:
sample = values[0] # subscriptable?
except TypeError:
sample = values # scalar
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
# asarray() dies with bytes and '>c' in py3k. Change to 'S'
dtype_ = 'S' if dtype_ == '>c' else dtype_
values = asarray(values, dtype=dtype_)
self.fp.write(nc_type)
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tobytes())
count = values.size * values.itemsize
self.fp.write(b'\x00' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = self._unpack_string().decode('latin1')
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = self._unpack_string().decode('latin1')
attributes[name] = self._read_att_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0:
begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
data.shape = shape
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = frombuffer(self.fp.read(a_size), dtype=dtype_
).copy()
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes,
maskandscale=self.maskandscale)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
rec_array.shape = (self._recs,)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = frombuffer(self.fp.read(self._recs*self._recsize),
dtype=dtypes).copy()
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = self._unpack_string().decode('latin1')
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_att_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode != 'c':
values = frombuffer(values, dtype='>%s' % typecode).copy()
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tobytes())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(frombuffer(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tobytes())
def _unpack_int64(self):
return frombuffer(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(s.encode('latin1'))
self.fp.write(b'\x00' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable:
"""
A data object for netcdf files.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions,
attributes=None,
maskandscale=False):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self.maskandscale = maskandscale
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return bool(self.data.shape) and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of NumPy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (e.g., 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (e.g., 8 for float64).
"""
return self._size
def __getitem__(self, index):
if not self.maskandscale:
return self.data[index]
data = self.data[index].copy()
missing_value = self._get_missing_value()
data = self._apply_missing_value(data, missing_value)
scale_factor = self._attributes.get('scale_factor')
add_offset = self._attributes.get('add_offset')
if add_offset is not None or scale_factor is not None:
data = data.astype(np.float64)
if scale_factor is not None:
data = data * scale_factor
if add_offset is not None:
data += add_offset
return data
def __setitem__(self, index, data):
if self.maskandscale:
missing_value = (
self._get_missing_value() or
getattr(data, 'fill_value', 999999))
self._attributes.setdefault('missing_value', missing_value)
self._attributes.setdefault('_FillValue', missing_value)
data = ((data - self._attributes.get('add_offset', 0.0)) /
self._attributes.get('scale_factor', 1.0))
data = np.ma.asarray(data).filled(missing_value)
if self._typecode not in 'fd' and data.dtype.kind == 'f':
data = np.round(data)
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
self.data.resize(shape)
except ValueError:
self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype)
self.data[index] = data
def _default_encoded_fill_value(self):
"""
The default encoded fill-value for this Variable's data type.
"""
nc_type = REVERSE[self.typecode(), self.itemsize()]
return FILLMAP[nc_type]
def _get_encoded_fill_value(self):
"""
Returns the encoded fill value for this variable as bytes.
This is taken from either the _FillValue attribute, or the default fill
value for this variable's data type.
"""
if '_FillValue' in self._attributes:
fill_value = np.array(self._attributes['_FillValue'],
dtype=self.data.dtype).tobytes()
if len(fill_value) == self.itemsize():
return fill_value
else:
return self._default_encoded_fill_value()
else:
return self._default_encoded_fill_value()
def _get_missing_value(self):
"""
Returns the value denoting "no data" for this variable.
If this variable does not have a missing/fill value, returns None.
If both _FillValue and missing_value are given, give precedence to
_FillValue. The netCDF standard gives special meaning to _FillValue;
missing_value is just used for compatibility with old datasets.
"""
if '_FillValue' in self._attributes:
missing_value = self._attributes['_FillValue']
elif 'missing_value' in self._attributes:
missing_value = self._attributes['missing_value']
else:
missing_value = None
return missing_value
@staticmethod
def _apply_missing_value(data, missing_value):
"""
Applies the given missing value to the data array.
Returns a numpy.ma array, with any value equal to missing_value masked
out (unless missing_value is None, in which case the original array is
returned).
"""
if missing_value is None:
newdata = data
else:
try:
missing_value_isnan = np.isnan(missing_value)
except (TypeError, NotImplementedError):
# some data types (e.g., characters) cannot be tested for NaN
missing_value_isnan = False
if missing_value_isnan:
mymask = np.isnan(data)
else:
mymask = (data == missing_value)
newdata = np.ma.masked_where(mymask, data)
return newdata
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
|
{
"content_hash": "4f000f07f0aae2373b22cc0a5f9fe534",
"timestamp": "",
"source": "github",
"line_count": 1088,
"max_line_length": 107,
"avg_line_length": 35.923713235294116,
"alnum_prop": 0.5705769476781374,
"repo_name": "scipy/scipy",
"id": "64b64324f06b53f0f78b4b64b41222f11db52ae1",
"size": "39085",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scipy/io/_netcdf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4626319"
},
{
"name": "C++",
"bytes": "965867"
},
{
"name": "Cython",
"bytes": "1062129"
},
{
"name": "Dockerfile",
"bytes": "10630"
},
{
"name": "Fortran",
"bytes": "5212087"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "147826"
},
{
"name": "Python",
"bytes": "15773035"
},
{
"name": "R",
"bytes": "3059"
},
{
"name": "Shell",
"bytes": "17744"
},
{
"name": "Starlark",
"bytes": "1757"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""Circuits Library - Web
circuits.web contains the circuits full stack web server that is HTTP
and WSGI compliant.
"""
from utils import url
from loggers import Logger
from sessions import Sessions
from controllers import expose, Controller
from events import Request, Response
from servers import BaseServer, Server
from errors import HTTPError, Forbidden, NotFound, Redirect
from dispatchers import Static, Dispatcher, VirtualHosts, XMLRPC
try:
from dispatchers import JSONRPC
except ImportError:
pass
try:
from controllers import JSONController
except ImportError:
pass
|
{
"content_hash": "7cdf5b3da1064a3f62a85ca408811904",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 24.75,
"alnum_prop": 0.8013468013468014,
"repo_name": "antont/tundra",
"id": "85650c208a44dd86d0781c18d09d7786c149f1b6",
"size": "710",
"binary": false,
"copies": "1",
"ref": "refs/heads/tundra2",
"path": "src/Application/PythonScriptModule/pymodules_old/circuits/web/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "110345"
},
{
"name": "C#",
"bytes": "76173"
},
{
"name": "C++",
"bytes": "4959154"
},
{
"name": "CoffeeScript",
"bytes": "2229"
},
{
"name": "JavaScript",
"bytes": "316308"
},
{
"name": "Objective-C",
"bytes": "222359"
},
{
"name": "Python",
"bytes": "999850"
},
{
"name": "Shell",
"bytes": "8224"
},
{
"name": "TypeScript",
"bytes": "230019"
}
],
"symlink_target": ""
}
|
import os
import glob
import six
from st2common import log as logging
from st2common.constants.pack import MANIFEST_FILE_NAME
from st2common.content.loader import MetaLoader
from st2common.content.loader import ContentPackLoader
from st2common.models.api.pack import PackAPI
from st2common.persistence.pack import Pack
from st2common.util.file_system import get_file_list
__all__ = [
'ResourceRegistrar'
]
LOG = logging.getLogger(__name__)
# Note: We use the cache to avoid manipulating the DB object for the same pack multiple times
# during the same register-content run.
# This works fine since those classes are only uses from register-content which is a script and not
# a long running process.
REGISTERED_PACKS_CACHE = {}
EXCLUDE_FILE_PATTERNS = [
'*.pyc'
]
class ResourceRegistrar(object):
ALLOWED_EXTENSIONS = []
def __init__(self, use_pack_cache=True, fail_on_failure=False):
"""
:param use_pack_cache: True to cache which packs have been registered in memory and making
sure packs are only registered once.
:type use_pack_cache: ``bool``
:param fail_on_failure: Throw an exception if resource registration fails.
:type fail_on_failure: ``bool``
"""
self._use_pack_cache = use_pack_cache
self._fail_on_failure = fail_on_failure
self._meta_loader = MetaLoader()
self._pack_loader = ContentPackLoader()
def get_resources_from_pack(self, resources_dir):
resources = []
for ext in self.ALLOWED_EXTENSIONS:
resources_glob = resources_dir
if resources_dir.endswith('/'):
resources_glob = resources_dir + ext
else:
resources_glob = resources_dir + '/*' + ext
resource_files = glob.glob(resources_glob)
resources.extend(resource_files)
resources = sorted(resources)
return resources
def register_packs(self, base_dirs):
"""
Register packs in all the provided directories.
"""
packs = self._pack_loader.get_packs(base_dirs=base_dirs)
registered_count = 0
for pack_name, pack_path in six.iteritems(packs):
self.register_pack(pack_name=pack_name, pack_dir=pack_path)
registered_count += 1
return registered_count
def register_pack(self, pack_name, pack_dir):
"""
Register pack in the provided directory.
"""
if self._use_pack_cache and pack_name in REGISTERED_PACKS_CACHE:
# This pack has already been registered during this register content run
return
LOG.debug('Registering pack: %s' % (pack_name))
REGISTERED_PACKS_CACHE[pack_name] = True
try:
pack_db = self._register_pack(pack_name=pack_name, pack_dir=pack_dir)
except Exception:
LOG.exception('Failed to register pack "%s"' % (pack_name))
return None
return pack_db
def _register_pack(self, pack_name, pack_dir):
"""
Register a pack (create a DB object in the system).
Note: Pack registration now happens when registering the content and not when installing
a pack using packs.install. Eventually this will be moved to the pack management API.
"""
manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)
if not os.path.isfile(manifest_path):
raise ValueError('Pack "%s" is missing %s file' % (pack_name, MANIFEST_FILE_NAME))
content = self._meta_loader.load(manifest_path)
if not content:
raise ValueError('Pack "%s" metadata file is empty' % (pack_name))
content['ref'] = pack_name
# Include a list of pack files
pack_file_list = get_file_list(directory=pack_dir, exclude_patterns=EXCLUDE_FILE_PATTERNS)
content['files'] = pack_file_list
pack_api = PackAPI(**content)
pack_db = PackAPI.to_model(pack_api)
try:
pack_db.id = Pack.get_by_ref(pack_name).id
except ValueError:
LOG.debug('Pack %s not found. Creating new one.', pack_name)
pack_db = Pack.add_or_update(pack_db)
LOG.debug('Pack %s registered.' % (pack_name))
return pack_db
|
{
"content_hash": "44191aac48876037c17e0734df00bf59",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 99,
"avg_line_length": 33.41085271317829,
"alnum_prop": 0.6320185614849188,
"repo_name": "armab/st2",
"id": "e28827d2dbe66bb167234bd5a105ef2d65c6a41a",
"size": "5090",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2common/st2common/bootstrap/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "36807"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3259877"
},
{
"name": "Shell",
"bytes": "27345"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
}
|
from pants.backend.python.target_types import (
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestTarget,
PythonTestUtilsGeneratorTarget,
)
from pants.engine.target import BoolField
class SkipYapfField(BoolField):
alias = "skip_yapf"
default = False
help = "If true, don't run yapf on this target's code."
def rules():
return [
PythonSourcesGeneratorTarget.register_plugin_field(SkipYapfField),
PythonSourceTarget.register_plugin_field(SkipYapfField),
PythonTestsGeneratorTarget.register_plugin_field(SkipYapfField),
PythonTestTarget.register_plugin_field(SkipYapfField),
PythonTestUtilsGeneratorTarget.register_plugin_field(SkipYapfField),
]
|
{
"content_hash": "050cdbc35daa1939ec10d534e228a331",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 76,
"avg_line_length": 31.875,
"alnum_prop": 0.7516339869281046,
"repo_name": "benjyw/pants",
"id": "bd6c595e87cce3866f239eb45e484f3b8eb715cc",
"size": "897",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/python/lint/yapf/skip_field.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
}
|
import os
class ArgHandlerWithParam:
'''
Handler for some arguments which needs a value
'''
def __init__(self, arg_name, convert_val=None, default_val=None):
self.arg_name = arg_name
self.arg_v_rep = '--%s' % (arg_name,)
self.convert_val = convert_val
self.default_val = default_val
def to_argv(self, lst, setup):
v = setup.get(self.arg_name)
if v is not None and v != self.default_val:
lst.append(self.arg_v_rep)
lst.append('%s' % (v,))
def handle_argv(self, argv, i, setup):
assert argv[i] == self.arg_v_rep
del argv[i]
val = argv[i]
if self.convert_val:
val = self.convert_val(val)
setup[self.arg_name] = val
del argv[i]
class ArgHandlerBool:
'''
If a given flag is received, mark it as 'True' in setup.
'''
def __init__(self, arg_name, default_val=False):
self.arg_name = arg_name
self.arg_v_rep = '--%s' % (arg_name,)
self.default_val = default_val
def to_argv(self, lst, setup):
v = setup.get(self.arg_name)
if v:
lst.append(self.arg_v_rep)
def handle_argv(self, argv, i, setup):
assert argv[i] == self.arg_v_rep
del argv[i]
setup[self.arg_name] = True
def convert_ppid(ppid):
ret = int(ppid)
if ret != 0:
if ret == os.getpid():
raise AssertionError(
'ppid passed is the same as the current process pid (%s)!' % (ret,))
return ret
ACCEPTED_ARG_HANDLERS = [
ArgHandlerWithParam('port', int, 0),
ArgHandlerWithParam('ppid', convert_ppid, 0),
ArgHandlerWithParam('vm_type'),
ArgHandlerWithParam('client'),
ArgHandlerWithParam('access-token'),
ArgHandlerWithParam('client-access-token'),
ArgHandlerBool('server'),
ArgHandlerBool('DEBUG_RECORD_SOCKET_READS'),
ArgHandlerBool('multiproc'), # Used by PyCharm (reuses connection: ssh tunneling)
ArgHandlerBool('multiprocess'), # Used by PyDev (creates new connection to ide)
ArgHandlerBool('save-signatures'),
ArgHandlerBool('save-threading'),
ArgHandlerBool('save-asyncio'),
ArgHandlerBool('print-in-debugger-startup'),
ArgHandlerBool('cmd-line'),
ArgHandlerBool('module'),
ArgHandlerBool('skip-notify-stdin'),
# The ones below should've been just one setting to specify the protocol, but for compatibility
# reasons they're passed as a flag but are mutually exclusive.
ArgHandlerBool('json-dap'), # Protocol used by ptvsd to communicate with pydevd (a single json message in each read)
ArgHandlerBool('json-dap-http'), # Actual DAP (json messages over http protocol).
ArgHandlerBool('protocol-quoted-line'), # Custom protocol with quoted lines.
ArgHandlerBool('protocol-http'), # Custom protocol with http.
]
ARGV_REP_TO_HANDLER = {}
for handler in ACCEPTED_ARG_HANDLERS:
ARGV_REP_TO_HANDLER[handler.arg_v_rep] = handler
def get_pydevd_file():
import pydevd
f = pydevd.__file__
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
return f
def setup_to_argv(setup):
'''
:param dict setup:
A dict previously gotten from process_command_line.
:note: does not handle --file nor --DEBUG.
'''
ret = [get_pydevd_file()]
for handler in ACCEPTED_ARG_HANDLERS:
if handler.arg_name in setup:
handler.to_argv(ret, setup)
return ret
def process_command_line(argv):
""" parses the arguments.
removes our arguments from the command line """
setup = {}
for handler in ACCEPTED_ARG_HANDLERS:
setup[handler.arg_name] = handler.default_val
setup['file'] = ''
setup['qt-support'] = ''
i = 0
del argv[0]
while i < len(argv):
handler = ARGV_REP_TO_HANDLER.get(argv[i])
if handler is not None:
handler.handle_argv(argv, i, setup)
elif argv[i].startswith('--qt-support'):
# The --qt-support is special because we want to keep backward compatibility:
# Previously, just passing '--qt-support' meant that we should use the auto-discovery mode
# whereas now, if --qt-support is passed, it should be passed as --qt-support=<mode>, where
# mode can be one of 'auto', 'none', 'pyqt5', 'pyqt4', 'pyside', 'pyside2'.
if argv[i] == '--qt-support':
setup['qt-support'] = 'auto'
elif argv[i].startswith('--qt-support='):
qt_support = argv[i][len('--qt-support='):]
valid_modes = ('none', 'auto', 'pyqt5', 'pyqt4', 'pyside', 'pyside2')
if qt_support not in valid_modes:
raise ValueError("qt-support mode invalid: " + qt_support)
if qt_support == 'none':
# On none, actually set an empty string to evaluate to False.
setup['qt-support'] = ''
else:
setup['qt-support'] = qt_support
else:
raise ValueError("Unexpected definition for qt-support flag: " + argv[i])
del argv[i]
elif argv[i] == '--file':
# --file is special because it's the last one (so, no handler for it).
del argv[i]
setup['file'] = argv[i]
i = len(argv) # pop out, file is our last argument
elif argv[i] == '--DEBUG':
from pydevd import set_debug
del argv[i]
set_debug(setup)
else:
raise ValueError("Unexpected option: " + argv[i])
return setup
|
{
"content_hash": "01161cd35dd1bb32b72d99b567372d48",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 121,
"avg_line_length": 33.94219653179191,
"alnum_prop": 0.5667574931880109,
"repo_name": "glenngillen/dotfiles",
"id": "82dd47546895a4cd1f3fec8ed8509ef069f6ebac",
"size": "5872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".vscode/extensions/ms-python.python-2022.2.1924087327/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_command_line_handling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "3634"
},
{
"name": "Shell",
"bytes": "4225"
},
{
"name": "Vim script",
"bytes": "16306"
}
],
"symlink_target": ""
}
|
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
|
{
"content_hash": "8e677ec00247038b8418efff06801562",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 26.513513513513512,
"alnum_prop": 0.7043832823649337,
"repo_name": "pywikibot/core-migration-example",
"id": "66c5d19bed1769f9a8d33285d9ec1b447591165d",
"size": "1006",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2685207"
}
],
"symlink_target": ""
}
|
from math import factorial
def memoize_factorial_sum(func):
cache={}
def inner(n, length=None):
if n is not str:
n=str(n)
if not n in cache:
cache[n]=func(n, length)
return cache[n]
return inner
@memoize_factorial_sum
def factorial_sum(n, length=None):
if n is not str:
n=str(n)
if not length:
length=len(n)
if length==1:
return factorial(int(n))
else:
return factorial(int(n[0]))+factorial_sum(n[1:], length-1)
def memoize(func):
cache={}
def inner(n, prev):
if not n in cache:
cache[n]=func(n, prev)
return cache[n]
return inner
#@memoize
def length_factorial_chain(n, previous):
if n in previous:
return 0
else:
previous.add(n)
return 1+length_factorial_chain(factorial_sum(n), previous)
def main(length=60, upper=10**6):
total=0
for i in range(1,upper):
if length_factorial_chain(i, set())==length:
total+=1
return total
|
{
"content_hash": "2e1b8272654c310ecfbd31febecc897d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 23.155555555555555,
"alnum_prop": 0.5738963531669866,
"repo_name": "Bolt64/my_code",
"id": "70cd33cd71da1a347d98dee2ed7e840eb2a5269e",
"size": "1066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "euler/digit_factorial_chain_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "57094"
},
{
"name": "C++",
"bytes": "12255"
},
{
"name": "Haskell",
"bytes": "27215"
},
{
"name": "Jupyter Notebook",
"bytes": "18418"
},
{
"name": "Python",
"bytes": "308871"
},
{
"name": "Racket",
"bytes": "3888"
},
{
"name": "Rust",
"bytes": "22856"
},
{
"name": "Scala",
"bytes": "51026"
},
{
"name": "Shell",
"bytes": "514"
},
{
"name": "Vim script",
"bytes": "341"
}
],
"symlink_target": ""
}
|
import logging
from django.db.utils import DataError
from scholarly_citation_finder.tools.nameparser.AuthorNameParser import AuthorNameParser
from scholarly_citation_finder.apps.core.models import Author, AuthorNameBlock, AuthorNameVariation
from scholarly_citation_finder.apps.parser.Exceptions import ParserDataError
logger = logging.getLogger(__name__)
class AuthorParser:
'''
Parse an author.
'''
def __init__(self, database):
'''
Create object.
:param database: Database name
'''
self.database = database
def parse(self, name):
'''
Parse an author.
:param name: Author name as string
'''
name = AuthorNameParser(name, normalize=True)
if name.title and not name.first:
name.first = name.title
#name.title = ''
name_middle = name.middle if name.middle else None
name_suffix = name.suffix if name.suffix else None
name_nickname = name.nickname if name.nickname else None
if name.last and name.first:
try:
# Get block
block, _ = AuthorNameBlock.objects.using(self.database).get_or_create(name='%s,%s' % (name.last, name.first[0]))
# Get or create name variation
variation = AuthorNameVariation.objects.using(self.database).filter(block_id=block.id,
first=name.first,
middle=name_middle,
last=name.last,
suffix=name_suffix,
nickname=name_nickname)[:1]
if variation:
return variation[0].author_id
else:
variation_short = AuthorNameVariation.objects.using(self.database).filter(block_id=block.id,
first=name.first[0],
middle=name_middle[0] if name_middle else None,
last=name.last)[:1]
if variation_short:
author_id = variation_short[0].author_id
else:
#name.capitalize()
author = Author.objects.using(self.database).create(name=str(name).title())
author_id = author.id
if len(name.first) > 1: # Otherwise this version was already stored above
self.__store_shortname_variation(block.id, author_id, name.first, name_middle, name.last)
AuthorNameVariation.objects.using(self.database).create(block_id=block.id,
author_id=author_id,
first=name.first,
middle=name_middle,
last=name.last,
suffix=name_suffix,
nickname=name_nickname)
return author_id
except(DataError) as e:
raise ParserDataError('Author name is invalid: %s' % str(e))
else:
raise ParserDataError('Author name has no last or first name: %s' % name)
def __store_shortname_variation(self, block_id, author_id, first, middle, last):
'''
Store the short version of the name variation.
:param block_id: ID of the block
:param author_id: ID of the author
:param first: First name
:param middle: Middle name
:param last: Last name
'''
middle = middle[0] if middle else None
AuthorNameVariation.objects.using(self.database).get_or_create(block_id=block_id,
author_id=author_id,
first=first[0],
middle=middle,
last=last)
|
{
"content_hash": "2688b0d739786045ac1fea53ce05d344",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 141,
"avg_line_length": 51.287234042553195,
"alnum_prop": 0.4266749637004771,
"repo_name": "citationfinder/scholarly_citation_finder",
"id": "596e015784d55cc57f0fe3507cc0b55c334ed05b",
"size": "4863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scholarly_citation_finder/apps/parser/AuthorParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "251904"
}
],
"symlink_target": ""
}
|
import factory
from factory.django import DjangoModelFactory
from .. import models
class AccountFactory(DjangoModelFactory):
# pylint: disable=R0903
class Meta:
model = models.Account
username = factory.Faker('user_name')
email = factory.LazyAttribute(
lambda f: '{0}.{1}@example.com'.format(
f.first_name, f.last_name
).lower()
)
first_name = factory.Faker('first_name')
last_name = factory.Faker('last_name')
is_staff = False
is_superuser = False
password = factory.Faker('password')
|
{
"content_hash": "d51e51348a060ad70284da8341a3e6d2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 47,
"avg_line_length": 24.652173913043477,
"alnum_prop": 0.6490299823633157,
"repo_name": "ahmed-taj/test_travis",
"id": "68504e2cf3349b48e7cb731b72894b4991f00445",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/backend/api/accounts/tests/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "824"
},
{
"name": "Nginx",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "20095"
},
{
"name": "Shell",
"bytes": "3353"
}
],
"symlink_target": ""
}
|
import itertools
from heapq import heappop, heappush, heapify
# Based on:
# docs.python.org/2/library/heapq.html#priority-queue-implementation-notes
class PriorityQueue:
def __init__(self):
# entry: [priority, count from counter, task]
self.heap = [] # list of entries arranged in a heap
self.entry_finder = {} # mapping of tasks to entries
self.counter = itertools.count() # unique sequence count
def __getitem__(self, task):
return self.entry_finder[task][0]
# def __iter__(self):
# return iter(self.entry_finder)
def populate(self, iterable):
assert not self.entry_finder, 'The queue is supposed to be empty!'
heap = [ ]
for task, priority in iterable:
entry = [priority, next(self.counter), task]
heap.append(entry)
self.entry_finder[task] = entry
heapify(heap)
self.heap = heap
def __len__(self):
return len(self.entry_finder)
def peekitem(self):
while self.heap and self.heap[0][2] is None:
heappop(self.heap)
if self.heap:
priority, _, task = self.heap[0]
return (task, priority)
raise KeyError('the priority queue is empty')
def __setitem__(self, task, priority):
'Add a new task or update the priority of an existing task.'
if task in self.entry_finder:
entry = self.entry_finder.pop(task)
entry[-1] = None
count = next(self.counter)
entry = [priority, count, task]
self.entry_finder[task] = entry
heappush(self.heap, entry)
def batch_update(self, iterable_task_newpriority):
# Assumes that each task is already in the queue!
entry_finder, counter, heap = self.entry_finder, self.counter, self.heap
for task, priority in iterable_task_newpriority:
entry = entry_finder.pop(task)
entry[-1] = None
entry = [priority, next(counter), task]
entry_finder[task] = entry
heappush(heap, entry)
#heap.append(entry)
#heapify(heap)
def __delitem__(self, task):
'Mark an existing task as removed (None). Raise KeyError if not found.'
entry = self.entry_finder.pop(task)
entry[-1] = None
def popitem(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while self.heap:
priority, _, task = heappop(self.heap)
if task is not None:
del self.entry_finder[task]
return priority, task
raise KeyError('pop from an empty priority queue')
|
{
"content_hash": "e4d99ec4f9b611ca3df03577084d13a5",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 80,
"avg_line_length": 36.078947368421055,
"alnum_prop": 0.5802334062727936,
"repo_name": "baharev/sdopt-tearing",
"id": "506bbdda64321d25c3d35d0a068cfe41fc90ec0a",
"size": "2874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pqueue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "985"
},
{
"name": "Modelica",
"bytes": "17648"
},
{
"name": "Python",
"bytes": "506293"
}
],
"symlink_target": ""
}
|
from twisted.trial.unittest import TestCase
from txjsonrpc.jsonrpc import BaseProxy, BaseQueryFactory
from txjsonrpc.jsonrpclib import Fault, VERSION_PRE1, VERSION_1, VERSION_2
class BaseQueryFactoryTestCase(TestCase):
def test_creation(self):
factory = BaseQueryFactory("someMethod")
self.assertTrue(factory.payload is not None)
self.assertTrue(factory.deferred is not None)
def test_buildVersionedPayloadPre1(self):
factory = BaseQueryFactory("someMethod",version=VERSION_PRE1)
payload = factory._buildVersionedPayload()
self.assertEquals(
payload, '{"params": [], "method": ""}')
def test_buildVersionedPayload1(self):
factory = BaseQueryFactory("someMethod", version=VERSION_1)
payload = factory._buildVersionedPayload()
self.assertEquals(
payload,
'{"params": [], "method": "", "id": 1}')
def test_buildVersionedPayload2(self):
factory = BaseQueryFactory("someMethod", version=VERSION_2)
payload = factory._buildVersionedPayload()
self.assertEquals(
payload,
'{"params": [], "jsonrpc": "2.0", "method": "", "id": 1}')
def test_parseResponseNoJSON(self):
def check_error(error):
self.assertEquals(
error.value.message, "No JSON object could be decoded")
factory = BaseQueryFactory("someMethod")
d = factory.deferred
factory.parseResponse("oops")
return d.addErrback(check_error)
def test_parseResponseRandomJSON(self):
def check_result(result):
self.assertEquals(
result, {u'something': 1})
factory = BaseQueryFactory("someMethod")
d = factory.deferred
factory.parseResponse('{"something": 1}')
return d.addCallback(check_result)
def test_parseResponseFaultData(self):
def check_error(error):
self.assertTrue(isinstance(error.value, Fault))
self.assertEquals(error.value.faultCode, 1)
self.assertEquals(error.value.faultString, u"oops")
factory = BaseQueryFactory("someMethod")
d = factory.deferred
factory.parseResponse(
'{"fault": "Fault", "faultCode": 1, "faultString": "oops"}')
return d.addErrback(check_error)
class BaseProxyTestCase(TestCase):
def test_creation(self):
proxy = BaseProxy()
self.assertEquals(proxy.version, VERSION_PRE1)
self.assertEquals(proxy.factoryClass, None)
def test_getVersionDefault(self):
proxy = BaseProxy()
version = proxy._getVersion({})
self.assertEquals(version, VERSION_PRE1)
def test_getVersionPre1(self):
proxy = BaseProxy()
version = proxy._getVersion({"version": VERSION_PRE1})
self.assertEquals(version, VERSION_PRE1)
def test_getVersion1(self):
proxy = BaseProxy()
version = proxy._getVersion({"version": VERSION_1})
self.assertEquals(version, VERSION_1)
def test_getFactoryClassDefault(self):
proxy = BaseProxy()
factoryClass = proxy._getFactoryClass({})
self.assertEquals(factoryClass, None)
def test_getFactoryClassPassed(self):
class FakeFactory(object):
pass
proxy = BaseProxy()
factoryClass = proxy._getFactoryClass({"factoryClass": FakeFactory})
self.assertEquals(factoryClass, FakeFactory)
|
{
"content_hash": "0fe5ce1a4c7df1540a449c2f9818452a",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 76,
"avg_line_length": 33.34615384615385,
"alnum_prop": 0.6401384083044983,
"repo_name": "medialab/txjsonrpc",
"id": "780c81e742af8d1322d08be20ade215d7b9132e0",
"size": "3468",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "build/lib/txjsonrpc/test/test_jsonrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126343"
},
{
"name": "Shell",
"bytes": "2986"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.views.generic import DetailView, ListView, CreateView, UpdateView, DeleteView
from nectr.tutor.models import Tutor
def get_tutor_profile_by_username(request, username):
tutor = get_object_or_404(Tutor, user__username=username)
courses = tutor.courses
return render(request, template_name='tutors/tutor_detail.html', context={'tutor': tutor, 'courses': courses})
class TutorDetailView(DetailView):
model = Tutor
# These next two lines tell the view to index lookups by username
# slug_field = 'user'
slug_url_kwarg = 'username'
def get_queryset(self):
return Tutor.objects.filter(user__username=self.get_slug_field())
class TutorListView(ListView):
model = Tutor
template_name = 'search/tutor/tutor_search_result_list.html'
paginate_by = 10
context_object_name = 'tutors_list'
def get_queryset(self):
query = self.request.GET.get('search_text')
return Tutor.objects.filter(courses=query)
class TutorCreate(CreateView):
model = Tutor
class TutorUpdate(UpdateView):
model = Tutor
class TutorDelete(DeleteView):
model = Tutor
class TutorProfile(object):
model = Tutor
|
{
"content_hash": "3b3ab2d4b45009ea281f841cbff505cd",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 114,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.7142857142857143,
"repo_name": "nectR-Tutoring/nectr",
"id": "b94950f77f66ab909fd2fc23bb5244c8befa6452",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/new_development",
"path": "nectr/tutor/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "331817"
},
{
"name": "Gherkin",
"bytes": "21392"
},
{
"name": "HTML",
"bytes": "573719"
},
{
"name": "JavaScript",
"bytes": "62996"
},
{
"name": "Nginx",
"bytes": "1243"
},
{
"name": "PHP",
"bytes": "225"
},
{
"name": "Python",
"bytes": "151273"
},
{
"name": "Shell",
"bytes": "8726"
}
],
"symlink_target": ""
}
|
import unittest
from logEntry import LogEntry
class TestLogEntry(unittest.TestCase):
def test_parse_log_1(self):
line = '188.45.108.168 - - [12/Dec/2015:19:44:09 +0100] "GET /images/stories/raith/almhuette_raith.jpg HTTP/1.1" 200 43300 "http://www.almhuette-raith.at/" "Mozilla/5.0 (Linux; Android 4.4.2; de-at; SAMSUNG GT-I9301I Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Version/1.5 Chrome/28.0.1500.94 Mobile Safari/537.36" "-"'
entry = LogEntry(line)
self.assertEqual(entry.clientIp,'188.45.108.168')
self.assertEqual(entry.clientId, '-')
self.assertEqual(entry.userName, '-')
self.assertEqual(entry.requestLine, 'GET /images/stories/raith/almhuette_raith.jpg HTTP/1.1')
self.assertEqual(entry.requestUrl, '/images/stories/raith/almhuette_raith.jpg')
self.assertEqual(entry.urlSection, '/images/')
self.assertEqual(entry.statusCode, 200)
self.assertEqual(entry.sizeBytes, 43300)
def test_parse_log_2(self):
line = 'hmu4.cs.auckland.ac.nz - - [09/Feb/2016:02:50:20 -0500] "GET /docs/GCDOAR/EnergyStar.html HTTP/1.0" 200 6829'
entry = LogEntry(line)
self.assertEqual(entry.clientIp, 'hmu4.cs.auckland.ac.nz')
self.assertEqual(entry.clientId, '-')
self.assertEqual(entry.userName, '-')
self.assertEqual(entry.requestLine, 'GET /docs/GCDOAR/EnergyStar.html HTTP/1.0')
self.assertEqual(entry.requestUrl, '/docs/GCDOAR/EnergyStar.html')
self.assertEqual(entry.urlSection, '/docs/')
self.assertEqual(entry.statusCode, 200)
self.assertEqual(entry.sizeBytes, 6829)
def test_parse_log_3(self):
line = '2607:f0d0:1002:0051:0000:0000:0000:0004 - - [23/Jan/2016:15:41:52 +0100] "POST /administrator/index.php HTTP/1.1" 200 "-" "-" "-" "-"'
entry = LogEntry(line)
self.assertEqual(entry.clientIp, '2607:f0d0:1002:0051:0000:0000:0000:0004')
self.assertEqual(entry.clientId, '-')
self.assertEqual(entry.userName, '-')
self.assertEqual(entry.requestLine, 'POST /administrator/index.php HTTP/1.1')
self.assertEqual(entry.requestUrl, '/administrator/index.php')
self.assertEqual(entry.urlSection, '/administrator/')
self.assertEqual(entry.statusCode, 200)
self.assertEqual(entry.sizeBytes, 0)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b57b45c9b188d2918484725649ec05a4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 336,
"avg_line_length": 54.044444444444444,
"alnum_prop": 0.6591282894736842,
"repo_name": "luchasei/http-log-app",
"id": "b3f79e68c9f2e7ca216e612d04e7992c2c89749b",
"size": "2432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/logEntryTests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12359"
},
{
"name": "Shell",
"bytes": "245"
}
],
"symlink_target": ""
}
|
"""Utils for metrics used in eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import numpy as np
import six
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import bleu_hook
from tensor2tensor.utils import registry
from tensor2tensor.utils import rouge
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
class Metrics(object):
"""Available evaluation metrics."""
# Entries here should match the keys in METRICS_FNS below
ACC = "accuracy"
ACC_TOP5 = "accuracy_top5"
ACC_PER_SEQ = "accuracy_per_sequence"
ACC_MULTILABEL_MATCH3 = "accuracy_multilabel_match3"
NEG_LOG_PERPLEXITY = "neg_log_perplexity"
APPROX_BLEU = "approx_bleu_score"
RMSE = "rmse"
LOG_POISSON = "log_poisson"
R2 = "r_squared"
ROUGE_2_F = "rouge_2_fscore"
ROUGE_L_F = "rouge_L_fscore"
EDIT_DISTANCE = "edit_distance"
SET_PRECISION = "set_precision"
SET_RECALL = "set_recall"
SOFTMAX_CROSS_ENTROPY_ONE_HOT = "softmax_cross_entropy_one_hot"
SIGMOID_ACCURACY_ONE_HOT = "sigmoid_accuracy_one_hot"
SIGMOID_RECALL_ONE_HOT = "sigmoid_recall_one_hot"
SIGMOID_PRECISION_ONE_HOT = "sigmoid_precision_one_hot"
SIGMOID_CROSS_ENTROPY_ONE_HOT = "sigmoid_cross_entropy_one_hot"
ROC_AUC = "roc_auc"
IMAGE_SUMMARY = "image_summary"
DMOL_PERPLEXITY = "disc_mol_neg_log_perplexity"
ABS_ERR = "mean_absolute_error"
IMAGE_RMSE = "image_rmse"
def image_rmse(predictions, labels, weights_fn=common_layers.weights_all):
"""RMSE but will argmax if last dim is not 1."""
if common_layers.shape_list(predictions)[-1] == 1:
predictions = tf.squeeze(predictions, axis=[-1])
else:
predictions = tf.argmax(predictions, axis=-1)
return padded_rmse(predictions, labels, weights_fn)
def padded_rmse(predictions, labels, weights_fn=common_layers.weights_all):
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
predictions, labels = common_layers.pad_with_zeros(predictions, labels)
weights = weights_fn(labels)
error = tf.pow(predictions - labels, 2)
error_sqrt = tf.sqrt(tf.reduce_mean(error * weights))
return error_sqrt, tf.reduce_sum(weights)
def abs_error(predictions, labels, weights_fn=None):
"""Computes mean(abs(preds-target))."""
del weights_fn # Unused
targets = tf.squeeze(labels, axis=[2, 3])
batch_abs_error = tf.abs(predictions - targets)
den = tf.ones(tf.shape(batch_abs_error), dtype=tf.float32)
return (batch_abs_error, den)
def padded_log_poisson(predictions,
labels,
weights_fn=common_layers.weights_all):
# Expects predictions to already be transformed into log space
predictions, labels = common_layers.pad_with_zeros(predictions, labels)
targets = labels
weights = weights_fn(targets)
lp_loss = tf.nn.log_poisson_loss(targets, predictions, compute_full_loss=True)
return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights)
def padded_variance_explained(predictions,
labels,
weights_fn=common_layers.weights_all):
"""Explained variance, also known as R^2."""
predictions, labels = common_layers.pad_with_zeros(predictions, labels)
targets = labels
weights = weights_fn(targets)
y_bar = tf.reduce_mean(weights * targets)
tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2))
res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2))
r2 = 1. - res_ss / tot_ss
return r2, tf.reduce_sum(weights)
def padded_accuracy_topk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
effective_k = tf.minimum(k,
common_layers.shape_list(padded_predictions)[-1])
_, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(padded_labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
return padded_accuracy_topk(predictions, labels, 5, weights_fn)
def rounding_sequence_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Sequence accuracy for L1/L2 losses: round down the predictions to ints."""
outputs = tf.squeeze(tf.to_int32(predictions), axis=-1)
weights = weights_fn(labels)
labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_sequence_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
# If the last dimension is 1 then we're using L1/L2 loss.
if common_layers.shape_list(predictions)[-1] == 1:
return rounding_sequence_accuracy(
predictions, labels, weights_fn=weights_fn)
with tf.variable_scope(
"padded_sequence_accuracy", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
# Flatten, keeping batch dim (and num_classes dim for predictions)
# TPU argmax can only deal with a limited number of dimensions
predictions_shape = common_layers.shape_list(padded_predictions)
batch_size = predictions_shape[0]
num_classes = predictions_shape[-1]
flat_size = common_layers.list_product(
common_layers.shape_list(padded_labels)[1:])
padded_predictions = tf.reshape(
padded_predictions,
[batch_size, common_layers.list_product(predictions_shape[1:-1]),
num_classes])
padded_labels = tf.reshape(padded_labels, [batch_size, flat_size])
weights = tf.reshape(weights, [batch_size, flat_size])
outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
padded_labels = tf.to_int32(padded_labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def sequence_edit_distance(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Average edit distance, ignoring padding 0s.
The score returned is the edit distance divided by the total length of
reference truth and the weight returned is the total length of the truth.
Args:
predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and
type tf.float32 representing the logits, 0-padded.
labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32
representing the labels of same length as logits and 0-padded.
weights_fn: ignored. The weights returned are the total length of the ground
truth labels, excluding 0-paddings.
Returns:
(edit distance / reference length, reference length)
Raises:
ValueError: if weights_fn is not common_layers.weights_nonzero.
"""
if weights_fn is not common_layers.weights_nonzero:
raise ValueError("Only weights_nonzero can be used for this metric.")
with tf.variable_scope("edit_distance", values=[predictions, labels]):
# Transform logits into sequence classes by taking max at every step.
predictions = tf.to_int32(
tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3)))
nonzero_idx = tf.where(tf.not_equal(predictions, 0))
sparse_outputs = tf.SparseTensor(nonzero_idx,
tf.gather_nd(predictions, nonzero_idx),
tf.shape(predictions, out_type=tf.int64))
labels = tf.squeeze(labels, axis=(2, 3))
nonzero_idx = tf.where(tf.not_equal(labels, 0))
label_sparse_outputs = tf.SparseTensor(nonzero_idx,
tf.gather_nd(labels, nonzero_idx),
tf.shape(labels, out_type=tf.int64))
distance = tf.reduce_sum(
tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False))
reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0])
return distance / reference_length, reference_length
def padded_neg_log_perplexity(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Average log-perplexity exluding padding 0s. No smoothing."""
num, den = common_layers.padded_cross_entropy(
predictions, labels, 0.0, weights_fn=weights_fn, reduce_sum=False)
return (-num, den)
def dmol_neg_log_perplexity(predictions,
labels,
weights_fn=None):
"""Average log-perplexity excluding padding 0s. No smoothing."""
del weights_fn # Unused
num, den = common_layers.dml_loss(
predictions, labels, reduce_sum=False)
return (-num, den)
def rounding_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Rounding accuracy for L1/L2 losses: round down the predictions to ints."""
outputs = tf.squeeze(tf.to_int32(predictions))
labels = tf.squeeze(labels)
weights = weights_fn(labels)
labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, labels)), weights
def padded_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that predictions matches labels on non-0s."""
# If the last dimension is 1 then we're using L1/L2 loss.
if common_layers.shape_list(predictions)[-1] == 1:
return rounding_accuracy(predictions, labels, weights_fn=weights_fn)
with tf.variable_scope("padded_accuracy", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
padded_labels = tf.to_int32(padded_labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def multilabel_accuracy_matchk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Used to evaluate the VQA accuracy.
Let n be the times that predictions appear in labels, then final score
is min(n/k, 1).
Refer to https://arxiv.org/pdf/1505.00468.pdf.
Args:
predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size].
labels: A tensor with shape [batch_size, length, 1, 1].
k: A tensor constant.
weights_fn: weight function.
Returns:
scores: min(n/k, 1).
weights: returns all ones.
"""
predictions = tf.to_int32(tf.argmax(predictions, axis=-1))
scores = tf.to_float(tf.equal(predictions, labels))
# those label == 0 do not count
weights = weights_fn(labels)
scores *= weights
scores = tf.reduce_sum(scores, axis=[1, 2, 3])
scores = tf.minimum(scores / tf.to_float(k), 1)
# every sample count
weights = tf.ones(tf.shape(scores), dtype=tf.float32)
return scores, weights
def multilabel_accuracy_match3(predictions, labels,
weights_fn=common_layers.weights_nonzero):
return multilabel_accuracy_matchk(predictions, labels, 3, weights_fn)
def set_precision(predictions, labels,
weights_fn=common_layers.weights_nonzero):
"""Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_precision", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
"""Recall of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_recall", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
def image_summary(predictions, targets, hparams):
"""Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of the same shape as predictions.
"""
del hparams
results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8)
gold = tf.cast(targets, tf.uint8)
summary1 = tf.summary.image("prediction", results, max_outputs=2)
summary2 = tf.summary.image("data", gold, max_outputs=2)
summary = tf.summary.merge([summary1, summary2])
return summary, tf.zeros_like(predictions)
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None):
"""Calculate softmax cross entropy given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
cross-entropy (scalar), weights
"""
with tf.variable_scope("softmax_cross_entropy_one_hot",
values=[logits, labels]):
del weights_fn
cross_entropy = tf.losses.softmax_cross_entropy(
onehot_labels=labels, logits=logits)
return cross_entropy, tf.constant(1.0)
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
"""Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights
"""
with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]):
del weights_fn
predictions = tf.nn.sigmoid(logits)
labels = tf.argmax(labels, -1)
predictions = tf.argmax(predictions, -1)
_, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
return accuracy, tf.constant(1.0)
def sigmoid_precision_one_hot(logits, labels, weights_fn=None):
"""Calculate precision for a set, given one-hot labels and logits.
Predictions are converted to one-hot,
as predictions[example][arg-max(example)] = 1
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
precision (scalar), weights
"""
with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]):
del weights_fn
num_classes = logits.shape[-1]
predictions = tf.nn.sigmoid(logits)
predictions = tf.argmax(predictions, -1)
predictions = tf.one_hot(predictions, num_classes)
_, precision = tf.metrics.precision(labels=labels, predictions=predictions)
return precision, tf.constant(1.0)
def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
"""Calculate recall for a set, given one-hot labels and logits.
Predictions are converted to one-hot,
as predictions[example][arg-max(example)] = 1
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
recall (scalar), weights
"""
with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]):
del weights_fn
num_classes = logits.shape[-1]
predictions = tf.nn.sigmoid(logits)
predictions = tf.argmax(predictions, -1)
predictions = tf.one_hot(predictions, num_classes)
_, recall = tf.metrics.recall(labels=labels, predictions=predictions)
return recall, tf.constant(1.0)
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None):
"""Calculate sigmoid cross entropy for one-hot lanels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
cross_entropy (scalar), weights
"""
with tf.variable_scope("sigmoid_cross_entropy_one_hot",
values=[logits, labels]):
del weights_fn
cross_entropy = tf.losses.sigmoid_cross_entropy(
multi_class_labels=labels, logits=logits)
return cross_entropy, tf.constant(1.0)
def roc_auc(logits, labels, weights_fn=None):
"""Calculate ROC AUC.
Requires binary classes.
Args:
logits: Tensor of size [batch_size, 1, 1, num_classes]
labels: Tensor of size [batch_size, 1, 1, num_classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
ROC AUC (scalar), weights
"""
del weights_fn
with tf.variable_scope("roc_auc", values=[logits, labels]):
predictions = tf.argmax(logits, axis=-1)
_, auc = tf.metrics.auc(labels, predictions, curve="ROC")
return auc, tf.constant(1.0)
def create_evaluation_metrics(problems, model_hparams):
"""Creates the evaluation metrics for the model.
Args:
problems: List of Problem instances.
model_hparams: a set of hparams.
Returns:
dict<metric name, metric function>. The metric functions have signature
(Tensor predictions, features) -> (metric Tensor, update op), where features
is a dict with keys {targets}.
Raises:
ValueError: if the metrics specified by a problem are not recognized (i.e.
are not defined in the Metrics enum.
"""
def reduce_dimensions(predictions, labels):
"""Reduce dimensions for high-dimensional predictions and labels."""
# We will treat first dimensions as batch. One example are video frames.
if len(predictions.get_shape()) > 5:
predictions_shape = common_layers.shape_list(predictions)
predictions = tf.reshape(
predictions, [predictions_shape[0], predictions_shape[1], -1,
predictions_shape[-1]])
labels_shape = common_layers.shape_list(labels)
labels = tf.reshape(
labels, [labels_shape[0], labels_shape[1], -1])
return predictions, labels
def make_problem_specific_metric_fn(metric_fn, weights_fn):
"""Create a metric fn."""
def problem_metric_fn(predictions, features, labels):
"""Metric fn."""
# Send along the entire features dict if the metric fn has the kwarg
# "features".
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if ("features" in args) or keywords:
kwargs["features"] = features
predictions, labels = reduce_dimensions(predictions, labels)
scores, weights = metric_fn(predictions, labels,
weights_fn=weights_fn, **kwargs)
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def make_image_wrapped_metric_fn(metric_fn):
"""Metric fn without tf.metrics.mean."""
def image_wrapped_metric_fn(predictions,
features,
labels,
weights_fn=common_layers.weights_all):
del weights_fn
del features
predictions, labels = reduce_dimensions(predictions, labels)
return metric_fn(predictions, labels, model_hparams)
return image_wrapped_metric_fn
def weights_fn_for_mp(problem_task_id):
return lambda x: common_layers.weights_multi_problem(x, problem_task_id)
eval_metrics = dict()
for problem_instance in problems:
problem_name = problem_instance.name
metrics = problem_instance.eval_metrics()
if hasattr(model_hparams.problem, "task_list"):
metrics = model_hparams.problem.eval_metrics()
if not all([m in METRICS_FNS for m in metrics]):
error_str = ("Unrecognized metric. Problem %s specified metrics "
"%s. Recognized metrics are %s.")
raise ValueError(error_str % (problem_name,
metrics,
list(METRICS_FNS.keys())))
tm = problem_instance.get_hparams().target_modality
if not isinstance(tm, dict):
tm = {"targets": tm}
for target_name, modality in six.iteritems(tm):
if isinstance(modality, tuple):
modality = registry.create_modality(modality, model_hparams)
weights_fn = modality.targets_weights_fn
if hasattr(model_hparams.problem, "task_list"):
ptid = problem_instance.task_id # pylint: disable=cell-var-from-loop
weights_fn = weights_fn_for_mp(ptid)
for metric in metrics:
metric_fn = METRICS_FNS[metric]
metric_name = "metrics-%s/%s/%s" % (problem_name, target_name, metric)
if metric == Metrics.IMAGE_SUMMARY:
eval_metrics[metric_name] = make_image_wrapped_metric_fn(metric_fn)
else:
eval_metrics[metric_name] = make_problem_specific_metric_fn(
metric_fn, weights_fn)
return eval_metrics
def create_eager_metrics_for_problem(problem, model_hparams=None):
"""See create_eager_metrics."""
metric_names = problem.eval_metrics()
tm = problem.get_hparams().target_modality
if isinstance(tm, tuple):
assert model_hparams is not None
tm = registry.create_modality(tm, model_hparams)
return create_eager_metrics(metric_names, weights_fn=tm.targets_weights_fn)
def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all):
"""Create metrics accumulators and averager for Eager mode.
Args:
metric_names: list<str> from Metrics enum
weights_fn: function that takes labels and returns a weights mask. Defaults
to weights of all 1, i.e. common_layers.weights_all. Use
common_layers.weights_nonzero if labels have 0-padding.
Returns:
(accum_fn(predictions, targets) => None,
result_fn() => dict<str metric_name, float avg_val>
"""
metric_fns = dict(
[(name, METRICS_FNS[name]) for name in metric_names])
tfe_metrics = dict()
for name in metric_names:
tfe_metrics[name] = tfe.metrics.Mean(name=name)
def metric_accum(predictions, targets):
for name, metric_fn in metric_fns.items():
val, weight = metric_fn(predictions, targets,
weights_fn=weights_fn)
tfe_metrics[name](np.squeeze(val), np.squeeze(weight))
def metric_means():
avgs = {}
for name in metric_names:
avgs[name] = tfe_metrics[name].result().numpy()
return avgs
return metric_accum, metric_means
# Metrics are functions that take predictions and labels and return
# a tensor of metrics and a tensor of weights.
# If the function has "features" as an argument, it will receive the whole
# features dict as well.
# The results are passed to tf.metrics.mean to accumulate properly.
METRICS_FNS = {
Metrics.ACC: padded_accuracy,
Metrics.ACC_TOP5: padded_accuracy_top5,
Metrics.ACC_PER_SEQ: padded_sequence_accuracy,
Metrics.ACC_MULTILABEL_MATCH3: multilabel_accuracy_match3,
Metrics.NEG_LOG_PERPLEXITY: padded_neg_log_perplexity,
Metrics.APPROX_BLEU: bleu_hook.bleu_score,
Metrics.RMSE: padded_rmse,
Metrics.LOG_POISSON: padded_log_poisson,
Metrics.R2: padded_variance_explained,
Metrics.ROUGE_2_F: rouge.rouge_2_fscore,
Metrics.ROUGE_L_F: rouge.rouge_l_fscore,
Metrics.EDIT_DISTANCE: sequence_edit_distance,
Metrics.SOFTMAX_CROSS_ENTROPY_ONE_HOT: softmax_cross_entropy_one_hot,
Metrics.SIGMOID_ACCURACY_ONE_HOT: sigmoid_accuracy_one_hot,
Metrics.SIGMOID_RECALL_ONE_HOT: sigmoid_recall_one_hot,
Metrics.SIGMOID_PRECISION_ONE_HOT: sigmoid_precision_one_hot,
Metrics.SIGMOID_CROSS_ENTROPY_ONE_HOT: sigmoid_cross_entropy_one_hot,
Metrics.SET_PRECISION: set_precision,
Metrics.SET_RECALL: set_recall,
Metrics.ROC_AUC: roc_auc,
Metrics.IMAGE_SUMMARY: image_summary,
Metrics.DMOL_PERPLEXITY: dmol_neg_log_perplexity,
Metrics.ABS_ERR: abs_error,
Metrics.IMAGE_RMSE: image_rmse,
}
|
{
"content_hash": "85fca98f0842cbc98547453fda490378",
"timestamp": "",
"source": "github",
"line_count": 672,
"max_line_length": 80,
"avg_line_length": 38.5625,
"alnum_prop": 0.6719147950914564,
"repo_name": "vthorsteinsson/tensor2tensor",
"id": "db9ddb183181a2d73a8eee54e4c1bae07c3645b8",
"size": "26519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/utils/metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "34646"
},
{
"name": "JavaScript",
"bytes": "78396"
},
{
"name": "Jupyter Notebook",
"bytes": "2423366"
},
{
"name": "Python",
"bytes": "3566836"
},
{
"name": "Shell",
"bytes": "7888"
}
],
"symlink_target": ""
}
|
from pystorm.bolt import BasicBolt
class SentenceSplitterBolt(BasicBolt):
def process(self, tup):
sentence = tup.values[0]
for word in sentence.split(' '):
BasicBolt.emit(word)
if __name__ == '__main__':
SentenceSplitterBolt().run()
|
{
"content_hash": "e2819d8b0581da6848e5dc95fb0ec934",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 40,
"avg_line_length": 21.076923076923077,
"alnum_prop": 0.6240875912408759,
"repo_name": "thedrow/streamparse",
"id": "a83e9aed8b692aeee7764377422f256af3ca5a25",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/wordcount/multilang/resources/sentence_splitter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
def get_support_mail():
"""Returns the most appropriate support email address"""
from sentry.options import get
return get("system.support-email") or get("system.admin-email") or None
|
{
"content_hash": "5ca9ba394bd858f34e1a5f90c94328f3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 29.75,
"alnum_prop": 0.7184873949579832,
"repo_name": "mvaled/sentry",
"id": "8b23d0e6724d1f949b1a38f3bbecaffb2b535e5d",
"size": "238",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/utils/support.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import ValidationError
from django.db import connection
from django.test import TestCase
from django_pg.models.fields.json import JSONField
from tests.jsont.models import Song
import math
class JSONSuite(TestCase):
"""Test suite for testing the specialized JSON field."""
def setUp(self):
"""Create test data."""
Song.objects.create(
title='Song of the Lonely Mountain',
data={
'sung_by': 'Dwarves',
'verses': 10,
},
sample_lines=[
'Far over the misty mountains cold,',
'In dungeons deep, and caverns old,',
'We must away, ere break of day,',
'To seek the pale-enchanted gold.',
],
)
Song.objects.create(
title="The Elves' Lullaby",
data={
'sung_by': 'Elves',
'verses': 4,
},
sample_lines=[
'Sing now ye joyful, now sing all together!',
"The wind's in the tree-top, the wind's in the eather;",
'The stars are in blossom, the moon is in flower,',
'And bright are the windows of Night in her tower.',
],
stuff=5,
)
Song.objects.create(
title='All that is Gold does not Glitter',
data={
'written_by': 'Bilbo Baggins',
},
stuff='About Aragorn',
)
Song.objects.create(
title='Chip the Glasses and Crack the Plates',
sample_lines=[
'Chip the glasses and crack the plates!',
'Blunt the knives and bend the forks!',
"That's what Bilbo Baggins hates-",
'Smash the bottles and burn the corks!',
],
stuff=6.8
)
Song.objects.create(
title='O! What are you doing?',
sample_lines=[
'O! What are you doing,',
'And where are you going?',
'Your ponies need shoeing!',
'The river is flowing!',
'O! tra-la-la-lally here down in the valley!',
],
stuff=True,
)
def test_list_access(self):
"""Test that I access an item saved as a list and it remains
a list.
"""
# Assert that a simple retreival on a list works.
song = Song.objects.get(title='Song of the Lonely Mountain')
self.assertEqual(song.sample_lines[0],
'Far over the misty mountains cold,')
def test_empty_list_default(self):
"""Test that accessing a field with an empty list as the default
properly returns empty list.
"""
# Assert that a retreival of a default value from a database
# save returns what we expect.
song = Song.objects.get(title='All that is Gold does not Glitter')
self.assertEqual(song.sample_lines, [])
def test_creation_default_list(self):
"""Run an awkward test to ensure that setting default to
empty list doesn't cross-contaminate models that use it.
"""
# Assert that using a default list doesn't cross-contaminate.
song1 = Song(title='The Long List of the Ents')
song2 = Song(title='The Ent and the Entwife')
song1.sample_lines.append('Learn now the Lore of Living Creatures!')
self.assertEqual(song2.sample_lines, [])
def test_ints(self):
"""Test that we get a value saved as an integer back
as an integer.
"""
# Test the save and load case.
song = Song.objects.get(title="The Elves' Lullaby")
self.assertEqual(song.stuff, 5)
self.assertEqual(song.data, {
'sung_by': 'Elves',
'verses': 4,
})
# Test the assignment-as-string case.
dummy = Song(stuff='8')
self.assertEqual(dummy.stuff, 8)
def test_floats(self):
"""Test that we get a value saved as a float back
as a float.
"""
# Test the save and load case.
song = Song.objects.get(title='Chip the Glasses and Crack the Plates')
self.assertEqual(song.stuff, 6.8)
# Test the assignment-as-string-case.
dummy = Song(stuff='42.7')
self.assertEqual(dummy.stuff, 42.7)
def test_bools(self):
"""Test that we get a value saved as a boolean back
as a boolean.
"""
# Test the save and load case.
song = Song.objects.get(title='O! What are you doing?')
self.assertEqual(song.stuff, True)
# Test the assignment-as-string case.
dummy = Song(stuff='false')
self.assertEqual(dummy.stuff, False)
def test_strs(self):
"""Test that we get a value saved as a string back
as a string.
"""
# Test the save and load case.
song = Song.objects.get(title='All that is Gold does not Glitter')
self.assertEqual(song.stuff, 'About Aragorn')
# Test the assignment-within-JSON-string case.
dummy = Song(stuff='"The Hobbit"')
self.assertEqual(dummy.stuff, 'The Hobbit')
def test_nones(self):
"""Test that we get a value saved as None back as None,
and test that assigning null returns None.
"""
# Test the save and load case.
song = Song.objects.get(title='Song of the Lonely Mountain')
self.assertEqual(song.stuff, None)
# Test the assignment-as-string case.
dummy = Song(stuff='null')
self.assertEqual(dummy.stuff, None)
def test_lookups(self):
"""Test that lookups raise TypeError, as PostgreSQL 9.2 does not
support lookups of any kind on JSON fields.
"""
with self.assertRaises(TypeError):
Song.objects.get(stuff=5)
with self.assertRaises(TypeError):
Song.objects.get(stuff__contains={ 'verses': 10 })
def test_invalid_assignment(self):
"""Establish that assignment of an invalid value to a JSON
field raises ValidationError.
"""
with self.assertRaises(TypeError):
song = Song(sample_lines={'foo': 'bar'})
def test_validation_falsy_coercion(self):
"""Establish that if we begin with a falsy value on a typed JSONField,
that the value is converted to the correct type, and no error is
raised.
"""
song = Song(title='Something', sample_lines='')
self.assertIsInstance(song.sample_lines, list)
self.assertEqual(song.sample_lines, [])
class SupportSuite(TestCase):
"""Suite for testing more rarely-accessed aspects of JSON fields."""
def test_invalid_type_assignment(self):
"""Establish that if we attempt to instantiate a JSONField
with a type that doesn't readily serialize to JSON, that we
raise TypeError.
"""
with self.assertRaises(TypeError):
JSONField(type=object)
def test_empty_string_assign(self):
"""Test that assignment of empty string works as expected."""
Song.objects.create(
title='Fifteen Birds in Five Fir Trees',
stuff='',
)
# Prove that the empty string is preserved when we get the
# value back.
song = Song.objects.get(title='Fifteen Birds in Five Fir Trees')
self.assertEqual(song.stuff, '')
def test_nan_and_inf(self):
"""Test that receipt of special JavaScript values are handled
as they should be.
"""
song = Song(
title='Fifteen Birds in Five Fir Trees',
stuff='{ "foo": NaN, "bar": Infinity }',
)
assert math.isnan(song.stuff['foo']), ' '.join((
'Expected nan; got %r.' % song.stuff['foo'],
))
assert math.isinf(song.stuff['bar']), ' '.join((
'Expected inf; got %r.' % song.stuff['bar'],
))
|
{
"content_hash": "f6bf2ca5f991509c3e7b150f80cae1a6",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 78,
"avg_line_length": 35.33913043478261,
"alnum_prop": 0.5670521653543307,
"repo_name": "lukesneeringer/django-pgfields",
"id": "6f966085078944d24cb30440d2b1eadf4b26c17e",
"size": "8128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/jsont/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "83"
},
{
"name": "Python",
"bytes": "143630"
}
],
"symlink_target": ""
}
|
from .plotter import (plot_with_plotly, plot_embedding_with_plotly,
plot_with_matplotlib, plot_embedding_with_matplotlib)
|
{
"content_hash": "e71ecc7748411449e25432a5af58ca21",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 67,
"avg_line_length": 63,
"alnum_prop": 0.7857142857142857,
"repo_name": "mmp2/megaman",
"id": "ab2ce1e6b215a0a46547a29657b4924b91343d9d",
"size": "205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "megaman/plotter/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "5781"
},
{
"name": "Makefile",
"bytes": "645"
},
{
"name": "Matlab",
"bytes": "1207"
},
{
"name": "Python",
"bytes": "331044"
},
{
"name": "Shell",
"bytes": "1274"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
import vtrace.qt
import vdb.qt.base
from vqt.main import *
class VdbRegistersWindow(vdb.qt.base.VdbWidgetWindow):
def __init__(self, db, dbt, parent=None):
vdb.qt.base.VdbWidgetWindow.__init__(self, db, dbt, parent=parent)
self.regsWidget = vtrace.qt.RegistersView(trace=dbt, parent=parent)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.regsWidget)
self.setLayout(vbox)
self.setWindowTitle('Registers')
vqtconnect(self.vqLoad, 'vdb:setregs')
vqtconnect(self.vqLoad, 'vdb:setthread')
def vqLoad(self):
'''
the widgets in RegistersView already register for notifications.
'''
self.regsWidget.reglist.vqLoad()
|
{
"content_hash": "f475296a75dc4b524a61c14ac46ac7b7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 26.892857142857142,
"alnum_prop": 0.6613545816733067,
"repo_name": "HackerTool/vivisect",
"id": "0428513ba2bc94838b5524fb24fe7003f6896962",
"size": "753",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "vdb/qt/registers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "11384786"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
}
|
import json
import tempfile
import uuid
import nose.exc
from keystone import config
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.policy.backends import rules
import test_v3
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class IdentityTestProtectedCase(test_v3.RestfulTestCase):
"""Test policy protection of a sample of v3 identity apis"""
def setUp(self):
"""Setup for Identity Protection Test Cases.
As well as the usual housekeeping, create a set of domains,
users, roles and projects for the subsequent tests:
- Three domains: A,B & C. C is disabled.
- DomainA has user1, DomainB has user2 and user3
- DomainA has group1 and group2, DomainB has group3
- User1 has a role on DomainA
Remember that there will also be a fourth domain in existence,
the default domain.
"""
# Ensure that test_v3.RestfulTestCase doesn't load its own
# sample data, which would make checking the results of our
# tests harder
super(IdentityTestProtectedCase, self).setUp(load_sample_data=False)
# Start by creating a couple of domains
self.domainA = self.new_domain_ref()
domainA_ref = self.identity_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = self.new_domain_ref()
domainB_ref = self.identity_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = self.new_domain_ref()
self.domainC['enabled'] = False
domainC_ref = self.identity_api.create_domain(self.domainC['id'],
self.domainC)
# Now create some users, one in domainA and two of them in domainB
self.user1 = self.new_user_ref(
domain_id=self.domainA['id'])
self.user1['password'] = uuid.uuid4().hex
user_ref = self.identity_api.create_user(self.user1['id'],
self.user1)
self.user2 = self.new_user_ref(
domain_id=self.domainB['id'])
self.user2['password'] = uuid.uuid4().hex
user_ref = self.identity_api.create_user(self.user2['id'],
self.user2)
self.user3 = self.new_user_ref(
domain_id=self.domainB['id'])
self.user3['password'] = uuid.uuid4().hex
user_ref = self.identity_api.create_user(self.user3['id'],
self.user3)
self.group1 = self.new_group_ref(
domain_id=self.domainA['id'])
user_ref = self.identity_api.create_group(self.group1['id'],
self.group1)
self.group2 = self.new_group_ref(
domain_id=self.domainA['id'])
user_ref = self.identity_api.create_group(self.group2['id'],
self.group2)
self.group3 = self.new_group_ref(
domain_id=self.domainB['id'])
user_ref = self.identity_api.create_group(self.group3['id'],
self.group3)
self.role = self.new_role_ref()
self.identity_api.create_role(self.role['id'], self.role)
self.identity_api.create_grant(self.role['id'],
user_id=self.user1['id'],
domain_id=self.domainA['id'])
# Initialize the policy engine and allow us to write to a temp
# file in each test to create the policies
self.orig_policy_file = CONF.policy_file
rules.reset()
_unused, self.tmpfilename = tempfile.mkstemp()
self.opt(policy_file=self.tmpfilename)
# A default auth request we can use - un-scoped user token
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'])
def tearDown(self):
super(IdentityTestProtectedCase, self).tearDown()
rules.reset()
self.opt(policy_file=self.orig_policy_file)
def _get_id_list_from_ref_list(self, ref_list):
result_list = []
for x in ref_list:
result_list.append(x['id'])
return result_list
def _set_policy(self, new_policy):
with open(self.tmpfilename, "w") as policyfile:
policyfile.write(jsonutils.dumps(new_policy))
def test_list_users_unprotected(self):
"""GET /users (unprotected)
Test Plan:
- Update policy so api is unprotected
- Use an un-scoped token to make sure we can get back all
the users independent of domain
"""
self._set_policy({"identity:list_users": []})
r = self.get('/users', auth=self.auth)
id_list = self._get_id_list_from_ref_list(r.body.get('users'))
self.assertIn(self.user1['id'], id_list)
self.assertIn(self.user2['id'], id_list)
self.assertIn(self.user3['id'], id_list)
def test_list_users_filtered_by_domain(self):
"""GET /users?domain_id=mydomain (filtered)
Test Plan:
- Update policy so api is unprotected
- Use an un-scoped token to make sure we can filter the
users by domainB, getting back the 2 users in that domain
"""
self._set_policy({"identity:list_users": []})
url_by_name = '/users?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth)
# We should get back two users, those in DomainB
id_list = self._get_id_list_from_ref_list(r.body.get('users'))
self.assertIn(self.user2['id'], id_list)
self.assertIn(self.user3['id'], id_list)
def test_get_user_protected_match_id(self):
"""GET /users/{id} (match payload)
Test Plan:
- Update policy to protect api by user_id
- List users with user_id of user1 as filter, to check that
this will correctly match user_id in the flattened
payload
"""
# TODO (henry-nash, ayoung): It would be good to expand this
# test for further test flattening, e.g. protect on, say, an
# attribute of an object being created
new_policy = {"identity:get_user": [["user_id:%(user_id)s"]]}
self._set_policy(new_policy)
url_by_name = '/users/%s' % self.user1['id']
r = self.get(url_by_name, auth=self.auth)
body = r.body
self.assertEquals(self.user1['id'], body['user']['id'])
def test_list_users_protected_by_domain(self):
"""GET /users?domain_id=mydomain (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA with a filter
specifying domainA - we should only get back the one user
that is in domainA.
- Try and read the users from domainB - this should fail since
we don't have a token scoped for domainB
"""
new_policy = {"identity:list_users": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/users?domain_id=%s' % self.domainA['id']
r = self.get(url_by_name, auth=self.auth)
# We should only get back one user, the one in DomainA
id_list = self._get_id_list_from_ref_list(r.body.get('users'))
self.assertEqual(len(id_list), 1)
self.assertIn(self.user1['id'], id_list)
# Now try for domainB, which should fail
url_by_name = '/users?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
def test_list_groups_protected_by_domain(self):
"""GET /groups?domain_id=mydomain (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA and make sure
we only get back the two groups that are in domainA
- Try and read the groups from domainB - this should fail since
we don't have a token scoped for domainB
"""
new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/groups?domain_id=%s' % self.domainA['id']
r = self.get(url_by_name, auth=self.auth)
# We should only get back two groups, the ones in DomainA
id_list = self._get_id_list_from_ref_list(r.body.get('groups'))
self.assertEqual(len(id_list), 2)
self.assertIn(self.group1['id'], id_list)
self.assertIn(self.group2['id'], id_list)
# Now try for domainB, which should fail
url_by_name = '/groups?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
def test_list_groups_protected_by_domain_and_filtered(self):
"""GET /groups?domain_id=mydomain&name=myname (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA with a filter
specifying both domainA and the name of group.
- We should only get back the group in domainA that matches
the name
"""
new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/groups?domain_id=%s&name=%s' % (
self.domainA['id'], self.group2['name'])
r = self.get(url_by_name, auth=self.auth)
# We should only get back one user, the one in DomainA that matches
# the name supplied
id_list = self._get_id_list_from_ref_list(r.body.get('groups'))
self.assertEqual(len(id_list), 1)
self.assertIn(self.group2['id'], id_list)
def test_list_filtered_domains(self):
"""GET /domains?enabled=0
Test Plan:
- Update policy for no protection on api
- Filter by the 'enabled' boolean to get disabled domains, which
should return just domainC
- Try the filter using different ways of specifying 'true'
to test that our handling of booleans in filter matching is
correct
"""
new_policy = {"identity:list_domains": []}
self._set_policy(new_policy)
r = self.get('/domains?enabled=0', auth=self.auth)
id_list = self._get_id_list_from_ref_list(r.body.get('domains'))
self.assertEqual(len(id_list), 1)
self.assertIn(self.domainC['id'], id_list)
# Now try a few ways of specifying 'true' when we should get back
# the other two domains, plus the default domain
r = self.get('/domains?enabled=1', auth=self.auth)
id_list = self._get_id_list_from_ref_list(r.body.get('domains'))
self.assertEqual(len(id_list), 3)
self.assertIn(self.domainA['id'], id_list)
self.assertIn(self.domainB['id'], id_list)
self.assertIn(DEFAULT_DOMAIN_ID, id_list)
r = self.get('/domains?enabled', auth=self.auth)
id_list = self._get_id_list_from_ref_list(r.body.get('domains'))
self.assertEqual(len(id_list), 3)
self.assertIn(self.domainA['id'], id_list)
self.assertIn(self.domainB['id'], id_list)
self.assertIn(DEFAULT_DOMAIN_ID, id_list)
def test_multiple_filters(self):
"""GET /domains?enabled&name=myname
Test Plan:
- Update policy for no protection on api
- Filter by the 'enabled' boolean and name - this should
return a single domain
"""
new_policy = {"identity:list_domains": []}
self._set_policy(new_policy)
my_url = '/domains?enableds&name=%s' % self.domainA['name']
r = self.get(my_url, auth=self.auth)
id_list = self._get_id_list_from_ref_list(r.body.get('domains'))
self.assertEqual(len(id_list), 1)
self.assertIn(self.domainA['id'], id_list)
|
{
"content_hash": "55368ff80cc72ee14a88cf2d8d36845a",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 76,
"avg_line_length": 41.061290322580646,
"alnum_prop": 0.5892057506481263,
"repo_name": "paypal/keystone",
"id": "ea38b24c47976b1225ddc42bdcbde587114048ee",
"size": "13380",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_v3_protection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""labquiz URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('login.urls')),
url(r'^teacher/', include('teacher.urls', namespace='teacher')),
url(r'^student/', include('student.urls', namespace='student')),
]
|
{
"content_hash": "33367f22c754890791990d0de6a5c781",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 38.30769230769231,
"alnum_prop": 0.6947791164658634,
"repo_name": "sehgalayush1/labquiz",
"id": "7501d5bcf6953a129b910065a89c4b344a72722c",
"size": "996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labquiz/labquiz/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14085"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "29653"
}
],
"symlink_target": ""
}
|
"""Tensor and Operation class for computation declaration."""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs
from ._ffi.node import NodeBase, NodeGeneric, register_node, convert_to_node
from . import _api_internal
from . import make as _make
from . import expr as _expr
class TensorSlice(NodeGeneric, _expr.ExprOp):
"""Auxiliary data structure for enable slicing syntax from tensor."""
def __init__(self, tensor, indices):
if not isinstance(indices, tuple):
indices = (indices,)
self.tensor = tensor
self.indices = indices
def __getitem__(self, indices):
if not isinstance(indices, tuple):
indices = (indices,)
return TensorSlice(self.tensor, self.indices + indices)
def asnode(self):
"""Convert slice to node."""
return self.tensor(*self.indices)
@property
def dtype(self):
"""Data content of the tensor."""
return self.tensor.dtype
@register_node
class TensorIntrinCall(NodeBase):
"""Intermediate structure for calling a tensor intrinsic."""
itervar_cls = None
@register_node
class Tensor(NodeBase, _expr.ExprOp):
"""Tensor object, to construct, see function.Tensor"""
def __call__(self, *indices):
ndim = self.ndim
if len(indices) != ndim:
raise ValueError("Need to provide %d index in tensor slice" % ndim)
indices = convert_to_node(indices)
args = []
for x in indices:
if isinstance(x, _expr.Expr):
args.append(x)
elif isinstance(x, iter_var_cls):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _make.Call(self.dtype, self.op.name,
args, _expr.Call.Halide,
self.op, self.value_index)
def __getitem__(self, indices):
return TensorSlice(self, indices)
def __hash__(self):
return _api_internal._TensorHash(self)
def __eq__(self, other):
if not isinstance(other, Tensor):
if isinstance(other, _expr.ExprOp):
return _expr.EqualOp(self, other)
return False
if self.ndim == 0 and other.ndim == 0:
raise ValueError("Equal == comparison among rank-0 tensor is ambiguous, "
"use Tensor.equal for content expression equvalence, "
"use Tensor.same_as for exact reference comparison")
return _api_internal._TensorEqual(self, other)
@property
def ndim(self):
"""Dimension of the tensor."""
return len(self.shape)
@property
def axis(self):
"""Axis of the tensor."""
return self.__getattr__("axis")
@property
def op(self):
"""The corressponding :any:`Operation`."""
return self.__getattr__("op")
@property
def value_index(self):
"""The output value index the tensor corresponds to."""
return self.__getattr__("value_index")
@property
def shape(self):
"""The output shape of the tensor."""
return self.__getattr__("shape")
@property
def name(self):
op = self.op
if op.num_outputs == 1:
return op.name
return "%s.v%d" % (op.name, self.value_index)
class Operation(NodeBase):
"""Represent an operation that generates a tensor"""
def output(self, index):
"""Get the index-th output of the operation
Parameters
----------
index : int
The index size.
Returns
-------
out : Tensor
The i-th output.
"""
return _api_internal._OpGetOutput(self, index)
@property
def num_outputs(self):
"""Number of outputs from this op."""
return _api_internal._OpNumOutputs(self)
@property
def input_tensors(self):
"""List of input tensors to this op."""
return _api_internal._OpInputTensors(self)
@register_node
class PlaceholderOp(Operation):
"""Placeholder operation."""
@register_node
class BaseComputeOp(Operation):
"""Compute operation."""
@property
def axis(self):
"""Represent the IterVar axis, defined when it is a ComputeOp"""
return self.__getattr__("axis")
@property
def reduce_axis(self):
"""Represent axis of reductions, only defined when it is a ComputeOp"""
return self.__getattr__("reduce_axis")
@register_node
class ComputeOp(BaseComputeOp):
"""Scalar operation."""
pass
@register_node
class TensorComputeOp(BaseComputeOp):
"""Tensor operation."""
@register_node
class ScanOp(Operation):
"""Scan operation."""
@property
def scan_axis(self):
"""Represent the scan axis, only defined when it is a ScanOp"""
return self.__getattr__("scan_axis")
@register_node
class ExternOp(Operation):
"""External operation."""
@register_node
class HybridOp(Operation):
"""Hybrid operation."""
@property
def axis(self):
"""Represent the IterVar axis, also defined when it is a HybridOp"""
return self.__getattr__("axis")
@register_node
class Layout(NodeBase):
"""Layout is composed of upper cases, lower cases and numbers,
where upper case indicates a primal axis and
the corresponding lower case with factor size indicates the subordinate axis.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block].
Here subordinate axis channel_block=16 is the factor size of the primal axis C (channel).
Do not construct directly, use :any:`layout` instead.
See the documentation of :any:`layout` for more details.
See Also
--------
layout : Declare a layout
"""
def __len__(self):
return _api_internal._LayoutNdim(self)
def __contains__(self, axis):
return len(axis) == 1 and axis[0].isalpha() and axis[0] in self.name
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Layout index out of range")
return _api_internal._LayoutGetItem(self, index)
def index_of(self, axis):
"""Get the index of an axis
Parameters
----------
axis : str
The axis name, need to be [a-z,A-Z]
Returns
-------
index : int
The index of the axis, -1 if not found.
"""
return _api_internal._LayoutIndexOf(self, axis)
def factor_of(self, axis):
"""Get the factor size of the subordinate axis.
Parameters
----------
axis : str
The axis name, need to be [a-z,A-Z]
Returns
-------
factor : int
the size of the subordinate-axis of axis (if axis is a primal-axis),
or the size of axis itself (if axis is a subordinate-axis).
Return -1 if axis is not in the layout.
"""
return _api_internal._LayoutFactorOf(self, axis)
@register_node
class BijectiveLayout(NodeBase):
"""Bijective mapping for two layouts (src-layout and dst-layout).
It provides shape and index conversion between each other.
Do not construct directly, use :any:`bijective_layout` instead.
See the documentation of :any:`bijective_layout` for more details.
See Also
--------
bijective_layout : Declare a bijective layout converter
"""
def forward_index(self, index):
"""Given the indices of the src-layout, infer the dst index.
Parameters
----------
index: Array of Expr
The indices in src-layout.
Returns
-------
dst_index: Array of Expr
The inferred indices in dst-layout.
"""
return _api_internal._BijectiveLayoutForwardIndex(self, index)
def backward_index(self, index):
"""Given the indices of the dst-layout, infer the src index.
Parameters
----------
index: Array of Expr
The indices in dst-layout.
Returns
-------
src_index: Array of Expr
The inferred indices in src-layout.
"""
return _api_internal._BijectiveLayoutBackwardIndex(self, index)
def forward_shape(self, shape):
"""Given the shape of the src-layout, infer the dst shape.
Parameters
----------
shape: Array of Expr
The shape in src-layout.
Returns
-------
dst_shape: Array of Expr
The inferred shape in dst-layout.
"""
return _api_internal._BijectiveLayoutForwardShape(self, shape)
def backward_shape(self, shape):
"""Given the shape of the dst-layout, infer the src shape.
Parameters
----------
shape: Array of Expr
The shape in dst-layout.
Returns
-------
src_shape: Array of Expr
The inferred shape in src-layout.
"""
return _api_internal._BijectiveLayoutBackwardShape(self, shape)
|
{
"content_hash": "0188b0df9316c55fe02824b4909b83b1",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 93,
"avg_line_length": 28.033742331288344,
"alnum_prop": 0.5895612211401685,
"repo_name": "Huyuwei/tvm",
"id": "1cadf0621823e7db7479a1ef2d7cb3013444e2ab",
"size": "9924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/tvm/tensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
"""Provide pre-made queries on top of the recorder component."""
from collections import defaultdict
from datetime import timedelta
from itertools import groupby
import logging
import time
import voluptuous as vol
from homeassistant.const import (
HTTP_BAD_REQUEST, CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_INCLUDE)
import homeassistant.util.dt as dt_util
from homeassistant.components import recorder, script
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import ATTR_HIDDEN
from homeassistant.components.recorder.util import session_scope, execute
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'history'
CONF_ORDER = 'use_include_order'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: recorder.FILTER_SCHEMA.extend({
vol.Optional(CONF_ORDER, default=False): cv.boolean,
})
}, extra=vol.ALLOW_EXTRA)
SIGNIFICANT_DOMAINS = ('thermostat', 'climate', 'water_heater')
IGNORE_DOMAINS = ('zone', 'scene',)
def get_significant_states(hass, start_time, end_time=None, entity_ids=None,
filters=None, include_start_time_state=True):
"""
Return states changes during UTC period start_time - end_time.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
timer_start = time.perf_counter()
from homeassistant.components.recorder.models import States
with session_scope(hass=hass) as session:
query = session.query(States).filter(
(States.domain.in_(SIGNIFICANT_DOMAINS) |
(States.last_changed == States.last_updated)) &
(States.last_updated > start_time))
if filters:
query = filters.apply(query, entity_ids)
if end_time is not None:
query = query.filter(States.last_updated < end_time)
query = query.order_by(States.last_updated)
states = (
state for state in execute(query)
if (_is_significant(state) and
not state.attributes.get(ATTR_HIDDEN, False)))
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
'get_significant_states took %fs', elapsed)
return states_to_json(
hass, states, start_time, entity_ids, filters,
include_start_time_state)
def state_changes_during_period(hass, start_time, end_time=None,
entity_id=None):
"""Return states changes during UTC period start_time - end_time."""
from homeassistant.components.recorder.models import States
with session_scope(hass=hass) as session:
query = session.query(States).filter(
(States.last_changed == States.last_updated) &
(States.last_updated > start_time))
if end_time is not None:
query = query.filter(States.last_updated < end_time)
if entity_id is not None:
query = query.filter_by(entity_id=entity_id.lower())
entity_ids = [entity_id] if entity_id is not None else None
states = execute(
query.order_by(States.last_updated))
return states_to_json(hass, states, start_time, entity_ids)
def get_last_state_changes(hass, number_of_states, entity_id):
"""Return the last number_of_states."""
from homeassistant.components.recorder.models import States
start_time = dt_util.utcnow()
with session_scope(hass=hass) as session:
query = session.query(States).filter(
(States.last_changed == States.last_updated))
if entity_id is not None:
query = query.filter_by(entity_id=entity_id.lower())
entity_ids = [entity_id] if entity_id is not None else None
states = execute(
query.order_by(States.last_updated.desc()).limit(number_of_states))
return states_to_json(hass, reversed(states),
start_time,
entity_ids,
include_start_time_state=False)
def get_states(hass, utc_point_in_time, entity_ids=None, run=None,
filters=None):
"""Return the states at a specific point in time."""
from homeassistant.components.recorder.models import States
if run is None:
run = recorder.run_information(hass, utc_point_in_time)
# History did not run before utc_point_in_time
if run is None:
return []
from sqlalchemy import and_, func
with session_scope(hass=hass) as session:
if entity_ids and len(entity_ids) == 1:
# Use an entirely different (and extremely fast) query if we only
# have a single entity id
most_recent_state_ids = session.query(
States.state_id.label('max_state_id')
).filter(
(States.last_updated < utc_point_in_time) &
(States.entity_id.in_(entity_ids))
).order_by(
States.last_updated.desc())
most_recent_state_ids = most_recent_state_ids.limit(1)
else:
# We have more than one entity to look at (most commonly we want
# all entities,) so we need to do a search on all states since the
# last recorder run started.
most_recent_states_by_date = session.query(
States.entity_id.label('max_entity_id'),
func.max(States.last_updated).label('max_last_updated')
).filter(
(States.last_updated >= run.start) &
(States.last_updated < utc_point_in_time)
)
if entity_ids:
most_recent_states_by_date.filter(
States.entity_id.in_(entity_ids))
most_recent_states_by_date = most_recent_states_by_date.group_by(
States.entity_id)
most_recent_states_by_date = most_recent_states_by_date.subquery()
most_recent_state_ids = session.query(
func.max(States.state_id).label('max_state_id')
).join(most_recent_states_by_date, and_(
States.entity_id == most_recent_states_by_date.c.max_entity_id,
States.last_updated == most_recent_states_by_date.c.
max_last_updated))
most_recent_state_ids = most_recent_state_ids.group_by(
States.entity_id)
most_recent_state_ids = most_recent_state_ids.subquery()
query = session.query(States).join(
most_recent_state_ids,
States.state_id == most_recent_state_ids.c.max_state_id
).filter((~States.domain.in_(IGNORE_DOMAINS)))
if filters:
query = filters.apply(query, entity_ids)
return [state for state in execute(query)
if not state.attributes.get(ATTR_HIDDEN, False)]
def states_to_json(
hass,
states,
start_time,
entity_ids,
filters=None,
include_start_time_state=True):
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
result = defaultdict(list)
# Get the states at the start time
timer_start = time.perf_counter()
if include_start_time_state:
for state in get_states(hass, start_time, entity_ids, filters=filters):
state.last_changed = start_time
state.last_updated = start_time
result[state.entity_id].append(state)
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
'getting %d first datapoints took %fs', len(result), elapsed)
# Append all changes to it
for ent_id, group in groupby(states, lambda state: state.entity_id):
result[ent_id].extend(group)
return result
def get_state(hass, utc_point_in_time, entity_id, run=None):
"""Return a state at a specific point in time."""
states = list(get_states(hass, utc_point_in_time, (entity_id,), run))
return states[0] if states else None
async def async_setup(hass, config):
"""Set up the history hooks."""
filters = Filters()
conf = config.get(DOMAIN, {})
exclude = conf.get(CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude.get(CONF_ENTITIES, [])
filters.excluded_domains = exclude.get(CONF_DOMAINS, [])
include = conf.get(CONF_INCLUDE)
if include:
filters.included_entities = include.get(CONF_ENTITIES, [])
filters.included_domains = include.get(CONF_DOMAINS, [])
use_include_order = conf.get(CONF_ORDER)
hass.http.register_view(HistoryPeriodView(filters, use_include_order))
hass.components.frontend.async_register_built_in_panel(
'history', 'history', 'hass:poll-box')
return True
class HistoryPeriodView(HomeAssistantView):
"""Handle history period requests."""
url = '/api/history/period'
name = 'api:history:view-period'
extra_urls = ['/api/history/period/{datetime}']
def __init__(self, filters, use_include_order):
"""Initialize the history period view."""
self.filters = filters
self.use_include_order = use_include_order
async def get(self, request, datetime=None):
"""Return history over a period of time."""
timer_start = time.perf_counter()
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
now = dt_util.utcnow()
one_day = timedelta(days=1)
if datetime:
start_time = dt_util.as_utc(datetime)
else:
start_time = now - one_day
if start_time > now:
return self.json([])
end_time = request.query.get('end_time')
if end_time:
end_time = dt_util.parse_datetime(end_time)
if end_time:
end_time = dt_util.as_utc(end_time)
else:
return self.json_message('Invalid end_time', HTTP_BAD_REQUEST)
else:
end_time = start_time + one_day
entity_ids = request.query.get('filter_entity_id')
if entity_ids:
entity_ids = entity_ids.lower().split(',')
include_start_time_state = 'skip_initial_state' not in request.query
hass = request.app['hass']
result = await hass.async_add_job(
get_significant_states, hass, start_time, end_time,
entity_ids, self.filters, include_start_time_state)
result = list(result.values())
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
'Extracted %d states in %fs', sum(map(len, result)), elapsed)
# Optionally reorder the result to respect the ordering given
# by any entities explicitly included in the configuration.
if self.use_include_order:
sorted_result = []
for order_entity in self.filters.included_entities:
for state_list in result:
if state_list[0].entity_id == order_entity:
sorted_result.append(state_list)
result.remove(state_list)
break
sorted_result.extend(result)
result = sorted_result
return await hass.async_add_job(self.json, result)
class Filters:
"""Container for the configured include and exclude filters."""
def __init__(self):
"""Initialise the include and exclude filters."""
self.excluded_entities = []
self.excluded_domains = []
self.included_entities = []
self.included_domains = []
def apply(self, query, entity_ids=None):
"""Apply the include/exclude filter on domains and entities on query.
Following rules apply:
* only the include section is configured - just query the specified
entities or domains.
* only the exclude section is configured - filter the specified
entities and domains from all the entities in the system.
* if include and exclude is defined - select the entities specified in
the include and filter out the ones from the exclude list.
"""
from homeassistant.components.recorder.models import States
# specific entities requested - do not in/exclude anything
if entity_ids is not None:
return query.filter(States.entity_id.in_(entity_ids))
query = query.filter(~States.domain.in_(IGNORE_DOMAINS))
filter_query = None
# filter if only excluded domain is configured
if self.excluded_domains and not self.included_domains:
filter_query = ~States.domain.in_(self.excluded_domains)
if self.included_entities:
filter_query &= States.entity_id.in_(self.included_entities)
# filter if only included domain is configured
elif not self.excluded_domains and self.included_domains:
filter_query = States.domain.in_(self.included_domains)
if self.included_entities:
filter_query |= States.entity_id.in_(self.included_entities)
# filter if included and excluded domain is configured
elif self.excluded_domains and self.included_domains:
filter_query = ~States.domain.in_(self.excluded_domains)
if self.included_entities:
filter_query &= (States.domain.in_(self.included_domains) |
States.entity_id.in_(self.included_entities))
else:
filter_query &= (States.domain.in_(self.included_domains) & ~
States.domain.in_(self.excluded_domains))
# no domain filter just included entities
elif not self.excluded_domains and not self.included_domains and \
self.included_entities:
filter_query = States.entity_id.in_(self.included_entities)
if filter_query is not None:
query = query.filter(filter_query)
# finally apply excluded entities filter if configured
if self.excluded_entities:
query = query.filter(~States.entity_id.in_(self.excluded_entities))
return query
def _is_significant(state):
"""Test if state is significant for history charts.
Will only test for things that are not filtered out in SQL.
"""
# scripts that are not cancellable will never change state
return (state.domain != 'script' or
state.attributes.get(script.ATTR_CAN_CANCEL))
|
{
"content_hash": "d881ae61e5784e4d0237db8ca846a567",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 79,
"avg_line_length": 37.549751243781095,
"alnum_prop": 0.6211990725405764,
"repo_name": "jabesq/home-assistant",
"id": "d0dd098638f62fb05a3f34ef5d3c98b2322b13bc",
"size": "15095",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/history/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
}
|
"""
**Note**: this connector is in beta!
The ``@dockerssh`` connector allows you to run commands on Docker containers on a remote machine.
.. code:: shell
# A Docker base image must be provided
pyinfra @dockerssh/remotehost:alpine:3.8 ...
# pyinfra can run on multiple Docker images in parallel
pyinfra @dockerssh/remotehost:alpine:3.8,@dockerssh/remotehost:ubuntu:bionic ...
"""
import os
from tempfile import mkstemp
from typing import TYPE_CHECKING
import click
from pyinfra import logger
from pyinfra.api import QuoteString, StringCommand
from pyinfra.api.connectors import BaseConnectorMeta
from pyinfra.api.exceptions import ConnectError, InventoryError, PyinfraError
from pyinfra.api.util import get_file_io, memoize
from pyinfra.progress import progress_spinner
from . import ssh
from .util import make_unix_command_for_host
if TYPE_CHECKING:
from pyinfra.api.host import Host
from pyinfra.api.state import State
class Meta(BaseConnectorMeta):
handles_execution = True
def remote_remove(
state: "State", host: "Host", filename, print_output: bool = False, print_input: bool = False
):
"""
Deletes a file on a remote machine over ssh.
"""
remove_status, _, remove_stderr = ssh.run_shell_command(
state,
host,
"rm -f {0}".format(filename),
print_output=print_output,
print_input=print_input,
)
if not remove_status:
raise IOError("\n".join(remove_stderr))
@memoize
def show_warning():
logger.warning("The @dockerssh connector is in beta!")
def make_names_data(host_image_str):
try:
hostname, image = host_image_str.split(":", 1)
except (AttributeError, ValueError): # failure to parse the host_image_str
raise InventoryError("No ssh host or docker base image provided!")
if not image:
raise InventoryError("No docker base image provided!")
show_warning()
yield (
"@dockerssh/{0}:{1}".format(hostname, image),
{"ssh_hostname": hostname, "docker_image": image},
["@dockerssh"],
)
def connect(state: "State", host: "Host"):
if not host.connection:
host.connection = ssh.connect(state, host)
if "docker_container_id" in host.host_data: # user can provide a docker_container_id
return host.connection
try:
with progress_spinner({"docker run"}):
# last line is the container ID
status, stdout, stderr = ssh.run_shell_command(
state,
host,
"docker run -d {0} tail -f /dev/null".format(host.data.docker_image),
)
if not status:
raise IOError("\n".join(stderr))
container_id = stdout[-1]
except PyinfraError as e:
host.connection = None # fail connection
raise ConnectError(e.args[0])
host.host_data["docker_container_id"] = container_id
return host.connection
def disconnect(state: "State", host: "Host"):
container_id = host.host_data["docker_container_id"][:12]
with progress_spinner({"docker commit"}):
image_id = ssh.run_shell_command(state, host, "docker commit {0}".format(container_id))[1][
-1
][
7:19
] # last line is the image ID, get sha256:[XXXXXXXXXX]...
with progress_spinner({"docker rm"}):
ssh.run_shell_command(
state,
host,
"docker rm -f {0}".format(container_id),
)
logger.info(
"{0}docker build complete, image ID: {1}".format(
host.print_prefix,
click.style(image_id, bold=True),
),
)
def run_shell_command(
state: "State",
host: "Host",
command,
get_pty: bool = False,
timeout=None,
stdin=None,
success_exit_codes=None,
print_output: bool = False,
print_input: bool = False,
return_combined_output=False,
**command_kwargs,
):
container_id = host.host_data["docker_container_id"]
# Don't sudo/su in Docker - is this the right thing to do? Makes deploys that
# target SSH systems work w/Docker out of the box (ie most docker commands
# are run as root).
for key in ("sudo", "su_user"):
command_kwargs.pop(key, None)
command = make_unix_command_for_host(state, host, command, **command_kwargs)
command = QuoteString(command)
docker_flags = "-it" if get_pty else "-i"
docker_command = StringCommand(
"docker",
"exec",
docker_flags,
container_id,
"sh",
"-c",
command,
)
return ssh.run_shell_command(
state,
host,
docker_command,
timeout=timeout,
stdin=stdin,
success_exit_codes=success_exit_codes,
print_output=print_output,
print_input=print_input,
return_combined_output=return_combined_output,
)
def put_file(
state: "State",
host: "Host",
filename_or_io,
remote_filename,
remote_temp_filename=None,
print_output: bool = False,
print_input: bool = False,
**kwargs, # ignored (sudo/etc)
):
"""
Upload a file/IO object to the target Docker container by copying it to a
temporary location and then uploading it into the container using ``docker cp``.
"""
fd, local_temp_filename = mkstemp()
remote_temp_filename = remote_temp_filename or state.get_temp_filename(local_temp_filename)
# Load our file or IO object and write it to the temporary file
with get_file_io(filename_or_io) as file_io:
with open(local_temp_filename, "wb") as temp_f:
data = file_io.read()
if isinstance(data, str):
data = data.encode()
temp_f.write(data)
# upload file to remote server
ssh_status = ssh.put_file(state, host, local_temp_filename, remote_temp_filename)
if not ssh_status:
raise IOError("Failed to copy file over ssh")
try:
docker_id = host.host_data["docker_container_id"]
docker_command = "docker cp {0} {1}:{2}".format(
remote_temp_filename,
docker_id,
remote_filename,
)
status, _, stderr = ssh.run_shell_command(
state,
host,
docker_command,
print_output=print_output,
print_input=print_input,
)
finally:
os.close(fd)
os.remove(local_temp_filename)
remote_remove(
state,
host,
local_temp_filename,
print_output=print_output,
print_input=print_input,
)
if not status:
raise IOError("\n".join(stderr))
if print_output:
click.echo(
"{0}file uploaded to container: {1}".format(
host.print_prefix,
remote_filename,
),
err=True,
)
return status
def get_file(
state: "State",
host: "Host",
remote_filename,
filename_or_io,
remote_temp_filename=None,
print_output: bool = False,
print_input: bool = False,
**kwargs, # ignored (sudo/etc)
):
"""
Download a file from the target Docker container by copying it to a temporary
location and then reading that into our final file/IO object.
"""
remote_temp_filename = remote_temp_filename or state.get_temp_filename(remote_filename)
try:
docker_id = host.host_data["docker_container_id"]
docker_command = "docker cp {0}:{1} {2}".format(
docker_id,
remote_filename,
remote_temp_filename,
)
status, _, stderr = ssh.run_shell_command(
state,
host,
docker_command,
print_output=print_output,
print_input=print_input,
)
ssh_status = ssh.get_file(state, host, remote_temp_filename, filename_or_io)
finally:
remote_remove(
state,
host,
remote_temp_filename,
print_output=print_output,
print_input=print_input,
)
if not ssh_status:
raise IOError("failed to copy file over ssh")
if not status:
raise IOError("\n".join(stderr))
if print_output:
click.echo(
"{0}file downloaded from container: {1}".format(
host.print_prefix,
remote_filename,
),
err=True,
)
return status
|
{
"content_hash": "d09c011202f9cfb73e338a67ca36629d",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 99,
"avg_line_length": 27.169329073482427,
"alnum_prop": 0.5940733772342427,
"repo_name": "Fizzadar/pyinfra",
"id": "5aedd4a66a1e11bcc9581b71c60a081c7cecf816",
"size": "8504",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.x",
"path": "pyinfra/connectors/dockerssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "57"
},
{
"name": "Python",
"bytes": "861601"
},
{
"name": "Shell",
"bytes": "3448"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ocd.validation import enhance_html
import random
import string
class HTMLField(models.TextField):
"""
A string field for HTML content.
"""
description = _("HTML content")
def formfield(self, **kwargs):
ff = super(HTMLField, self).formfield(**kwargs)
if 'class' in ff.widget.attrs:
ff.widget.attrs['class'] += " wysiwyg"
else:
ff.widget.attrs['class'] = "wysiwyg"
return ff
def clean(self, value, model_instance):
value = super(HTMLField, self).clean(value, model_instance)
return enhance_html(value)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^ocd\.base_models\.HTMLField"])
except ImportError:
pass
UID_CHARS = string.lowercase + string.digits
UID_LENGTH = 24
def create_uid(length=UID_LENGTH):
"""
Creates a random code of lowercase letters and numbers
"""
return "".join(random.choice(UID_CHARS) for _x in xrange(length))
class UIDManager(models.Manager):
def get_by_natural_key(self, uid):
return self.get(uid=uid)
class UIDMixin(models.Model):
uid = models.CharField(max_length=UID_LENGTH, unique=True,
default=create_uid)
objects = UIDManager()
def natural_key(self):
return (self.uid,)
class Meta:
abstract = True
class ConfidentialMixin(models.Model):
class Meta:
abstract = True
confidential_reason = models.ForeignKey(
'communities.CommunityConfidentialReason',
blank=True,
null=True,)
is_confidential = models.BooleanField(
_('Is Confidential'),
default=False,
editable=False,)
def enforce_confidential_rules(self):
if self.confidential_reason is None:
self.is_confidential = False
else:
self.is_confidential = True
def save(self, *args, **kwargs):
self.enforce_confidential_rules()
return super(ConfidentialMixin, self).save(*args, **kwargs)
class ConfidentialByRelationMixin(models.Model):
confidential_from = None
class Meta:
abstract = True
is_confidential = models.BooleanField(
_('Is Confidential'),
default=False,
editable=False,)
def enforce_confidential_rules(self):
if not self.confidential_from:
# if the model is misconfigured in any way with respect to
# confidentiality, we want to raise an error here.
raise ValueError(_('Models with ConfidentialByRelationMixin must '
'declare a valid field which can pass on'
'confidentiality.'))
else:
# things seem good, so let's apply the confidential object logic.
confidential_relation = getattr(self, self.confidential_from)
if confidential_relation.is_confidential is True:
# when the confidential_relation is True, this *must* be true.
self.is_confidential = True
else:
self.is_confidential = False
def save(self, *args, **kwargs):
self.enforce_confidential_rules()
return super(ConfidentialByRelationMixin, self).save(*args, **kwargs)
|
{
"content_hash": "d57d64da3f4ff0e8616455a7dc17a048",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 27.737704918032787,
"alnum_prop": 0.6270685579196218,
"repo_name": "hasadna/OpenCommunity",
"id": "5b38bdf7247c00a2a9017d97a7428018ca9c979b",
"size": "3384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ocd/base_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "596925"
},
{
"name": "HTML",
"bytes": "235437"
},
{
"name": "JavaScript",
"bytes": "2238750"
},
{
"name": "Python",
"bytes": "1264867"
},
{
"name": "Shell",
"bytes": "699"
}
],
"symlink_target": ""
}
|
"""Defines utility things.
"""
import os
import shelve
import inspect
import hashlib
import datetime
from enum import Enum
from django.conf import settings
class ChoiceEnum(Enum):
"""Enum class which can be used in django field choices.
"""
@classmethod
def choices(cls):
# get all members of the class
members = inspect.getmembers(cls, lambda m: not inspect.isroutine(m))
# filter down to just properties
props = [m for m in members if m[0][:2] != '__']
# format into django choice tuple
return tuple([(str(p[1].value), p[0]) for p in props])
def gen_random_md5(len_=32):
return hashlib.md5(str(datetime.datetime.now())).hexdigest()[:len_]
class FileBroker(object):
"""For debug mode, we don't need to use asynchronous search with celery
and redis broker.
To achieve this without modifying client code, server mimics celery
behavior and replaces redis with txt file.
NOTE that `shelve` isn't thread safe. That being said, this object should
never be used in production env.
"""
_FILENAME = 'filebroker'
def __init__(self):
self._broker = shelve.open(
os.path.join(settings.BASE_DIR, self._FILENAME))
def get(self, key):
if self._broker.has_key(key):
return self._broker[key]
def set(self, value):
"""`key` is generated by random hashing and returned.
"""
key = gen_random_md5()
self._broker[key] = value
return key
def close(self):
self._broker.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
|
{
"content_hash": "631b2c89dedb57cac4914f6beaf25336",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 25.772727272727273,
"alnum_prop": 0.6272780717225162,
"repo_name": "daftshady/dropbeat",
"id": "8e65cbf0b8833d4a063dd07ba8a0c8bb01cc5628",
"size": "1701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dropbeat/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32824"
},
{
"name": "HTML",
"bytes": "8274"
},
{
"name": "JavaScript",
"bytes": "221186"
},
{
"name": "Python",
"bytes": "51130"
},
{
"name": "Shell",
"bytes": "307"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst', 'rb') as readme_file:
readme = readme_file.read().decode('utf8')
with open('HISTORY.rst', 'rb') as history_file:
history = history_file.read().decode('utf8')
requirements = [
"marshmallow>=3.10.0",
"pymongo>=3.7.0",
]
setup(
name='umongo',
version='3.1.0',
description="sync/async MongoDB ODM, yes.",
long_description=readme + '\n\n' + history,
author="Emmanuel Leblond, Jérôme Lafréchoux",
author_email='jerome@jolimont.fr',
url='https://github.com/touilleMan/umongo',
packages=['umongo', 'umongo.frameworks'],
include_package_data=True,
python_requires='>=3.7',
install_requires=requirements,
extras_require={
'motor': ['motor>=2.0,<3.0'],
'txmongo': ['txmongo>=19.2.0'],
'mongomock': ['mongomock'],
},
license="MIT",
zip_safe=False,
keywords='umongo mongodb pymongo txmongo motor mongomock asyncio twisted',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
],
)
|
{
"content_hash": "5d72d5263366ab1f4a4f4684930dc10d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 30.653061224489797,
"alnum_prop": 0.6138482023968043,
"repo_name": "Scille/umongo",
"id": "aa5b05c308daba4e7abf64430a144f168fc20e58",
"size": "1553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2591"
},
{
"name": "Python",
"bytes": "411515"
}
],
"symlink_target": ""
}
|
import pkg_resources
from turbodbc_intern import Megabytes, Rows
from .api_constants import apilevel, paramstyle, threadsafety
from .connect import connect
from .constructors import Date, Time, Timestamp
from .data_types import BINARY, DATETIME, NUMBER, ROWID, STRING
from .exceptions import DatabaseError, Error, InterfaceError, ParameterError
from .options import make_options
try:
__version__ = pkg_resources.get_distribution(__name__).version
except: # noqa: E722
__version__ = "unknown"
__all__ = [
"Megabytes",
"Rows",
"apilevel",
"paramstyle",
"threadsafety",
"connect",
"Date",
"Time",
"Timestamp",
"BINARY",
"DATETIME",
"NUMBER",
"ROWID",
"STRING",
"DatabaseError",
"Error",
"InterfaceError",
"ParameterError",
"make_options",
]
|
{
"content_hash": "f1bafa9c473b1eb853d05c8777c3f1d5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 22.944444444444443,
"alnum_prop": 0.6634382566585957,
"repo_name": "blue-yonder/turbodbc",
"id": "f0b9a14c0deec54e330e13489d4988dc0c94d5ac",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/turbodbc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "285"
},
{
"name": "C",
"bytes": "1227"
},
{
"name": "C++",
"bytes": "599284"
},
{
"name": "CMake",
"bytes": "32908"
},
{
"name": "Earthly",
"bytes": "15678"
},
{
"name": "Makefile",
"bytes": "8082"
},
{
"name": "Python",
"bytes": "134301"
},
{
"name": "Shell",
"bytes": "357"
}
],
"symlink_target": ""
}
|
import numpy as np
import os.path
from pickle_ import dump, load
# Logger
from log import getLogger
logger = getLogger('basic')
# all
__all__ = [
'coefficient',
'points',
'required_total',
'required_weight'
]
# Source files
WILKS_TABLE_MEN = 'wilks_data/wilks_men.txt'
WILKS_TABLE_WOMEN = 'wilks_data/wilks_women.txt'
# Working file(s)
WILKS_DICTIONARY_FILE = 'wilks_data/wilks_dictionary.dat'
# Global dictionary
WILKS_DICTIONARY = None
# Parse functions
# parse_wilks_table
def parse_wilks_table(fid):
# Open file if required
own_fid = False
if isinstance(fid, basestring):
fid = open(fid, 'r')
own_fid = True
# Skip first line
fid.readline()
W = []
C = []
for line in fid:
as_floats = [float(x) for x in line.split('\t')]
# Get base weight and coefficients
base_weight = as_floats[0]
coefficients = as_floats[1:]
# Get all weights
all_weights = base_weight + np.linspace(0.,1.,10,endpoint=False)
# Save
W.append(all_weights)
C.append(coefficients)
# Close file if required
if own_fid:
fid.close()
return np.hstack(W), np.hstack(C)
# update_wilks_dictionary
def update_wilks_dictionary():
global WILKS_DICTIONARY
# Parse each set of wilks coefficients
wilks_ = {}
wilks_['M'] = parse_wilks_table(WILKS_TABLE_MEN)
wilks_['F'] = parse_wilks_table(WILKS_TABLE_WOMEN)
# Dump to file
dump(WILKS_DICTIONARY_FILE, wilks_)
# Reset WILKS_DICTIONARY
WILKS_DICTIONARY = wilks_
# Access function
# wilks_dictionary
def wilks_dictionary(update=False):
global WILKS_DICTIONARY
# Update if required or file is not present
if update or not os.path.exists(WILKS_DICTIONARY_FILE):
update_wilks_dictionary()
# Load the file if it is None
if WILKS_DICTIONARY is None:
WILKS_DICTIONARY = load(WILKS_DICTIONARY_FILE)
return WILKS_DICTIONARY
# Calculation functions
# coefficient
def coefficient(g, w):
# Get the Wilks dictionary
d = wilks_dictionary()
# Get the weight and coefficient arrays for the given gender
try:
W,C = d[g]
except KeyError:
raise KeyError, 'Gender "%s" not recognised' % g
# Check bounds
if w < W[0] or w > W[-1]:
logger.warning('Input weight %.1f is outside of range: '\
'[%.1f, %.1f]', w, W[0], W[-1])
# Find closest weight
index = np.argmin(np.abs(w - W))
logger.debug('Closest weight: %f -> %.1f', w, W[index])
# Return the coefficient
return C[index]
# points
def points(g, w, total):
return coefficient(g, w) * total
# required_total
def required_total(g, w, points):
return float(points) / coefficient(g, w)
# required_weight
def required_weight(g, total, points):
# Get the Wilks dictionary
d = wilks_dictionary()
# Get the weight and coefficient arrays for the given gender
try:
W,C = d[g]
except KeyError:
raise KeyError, 'Gender "%s" not recognised' % g
# Calculate desired coefficient
coeff = float(points) / total
# Find closest coefficient at the largest weight
# NOTE May not be as accurate at the fringes of the wilks coefficients
# where the function is non-monotonic
indices = np.nonzero(C >= coeff)[0]
try:
max_index = np.amax(indices)
except ValueError:
raise ValueError, 'Coefficient required (%.3f) is too large' % \
coeff
return W[max_index]
|
{
"content_hash": "c048be25789dd73fc2cf7636aa3a9e65",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 74,
"avg_line_length": 22.928571428571427,
"alnum_prop": 0.6335315774568111,
"repo_name": "rstebbing/powerlifting-meet-manager",
"id": "b682c2b158de9195fe53d639de67a987bee48a68",
"size": "3987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wilks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52506"
}
],
"symlink_target": ""
}
|
from utils import CanadianScraper, CanadianPerson as Person
from pupa.scrape import Organization
import re
COUNCIL_PAGE = 'http://www.gov.pe.ca/mapp/municipalitites.php'
class PrinceEdwardIslandMunicipalitiesPersonScraper(CanadianScraper):
def scrape(self):
page = self.lxmlize(COUNCIL_PAGE)
districts = page.xpath('//div[@id="left-content" or @id="right-content"]//a')
for district in districts:
url = district.attrib['href']
page = self.lxmlize(url)
org = Organization(name=district.text_content() + ' Council', classification='legislature', jurisdiction_id=self.jurisdiction.jurisdiction_id)
org.add_source(url)
yield org
info = page.xpath('//div[@style="WIDTH:750"]/dl')
for contact in info:
contact_type = contact.xpath('./dt')[0].text_content()
contact = contact.xpath('./dd')[0].text_content().replace('(', '').replace(') ', '-')
if 'Officials' in contact_type:
break
if 'Tel' in contact_type:
phone = contact
if 'Fac' in contact_type:
fax = contact
if 'Address' in contact_type:
address = contact
if 'Email' in contact_type:
email = contact
if 'Website' in contact_type:
site = contact
councillors = page.xpath('//div[@style="WIDTH:750"]/dl/dt[contains(text(), "Elected Officials")]/parent::dl/dd/pre/text()')[0].splitlines(True)
for councillor in councillors:
name = councillor.replace('(Mayor)', '').replace('(Deputy Mayor)', '').replace('(Chairperson)', '').strip()
role = re.sub(r'\(|\)', '', councillor.replace(name, '').strip())
if not role:
role = 'Councillor'
p = Person(primary_org='legislature', name=name, district=district.text_content())
p.add_source(COUNCIL_PAGE)
p.add_source(url)
membership = p.add_membership(org, role=role, district=district.text_content())
membership.add_contact_detail('voice', self.clean_telephone_number(phone), 'legislature')
membership.add_contact_detail('fax', self.clean_telephone_number(fax), 'legislature')
membership.add_contact_detail('address', self.clean_address(address), 'legislature')
membership.add_contact_detail('email', email)
if site:
p.add_link(site)
yield p
|
{
"content_hash": "88b78d846cf52c81d47b4ef1784d41c0",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 155,
"avg_line_length": 48.45454545454545,
"alnum_prop": 0.5579737335834897,
"repo_name": "opencivicdata/scrapers-ca",
"id": "0a199b912e34ef8fd24e4b7512ea234b5482cdbc",
"size": "2665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "disabled/ca_pe_municipalities/people.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "832"
},
{
"name": "Python",
"bytes": "374889"
},
{
"name": "Shell",
"bytes": "1759"
}
],
"symlink_target": ""
}
|
"""Tests for deCONZ config flow."""
import asyncio
import pydeconz
from homeassistant import data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.deconz import config_flow
from homeassistant.components.deconz.config_flow import (
CONF_MANUAL_INPUT,
CONF_SERIAL,
DECONZ_MANUFACTURERURL,
)
from homeassistant.components.deconz.const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_MASTER_GATEWAY,
DOMAIN,
)
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from .test_gateway import API_KEY, BRIDGEID, setup_deconz_integration
from tests.async_mock import patch
async def test_flow_discovered_bridges(hass, aioclient_mock):
"""Test that config flow works for discovered bridges."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[
{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80},
{"id": "1234E567890A", "internalipaddress": "5.6.7.8", "internalport": 80},
],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_flow_manual_configuration_decision(hass, aioclient_mock):
"""Test that config flow for one discovered bridge works."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: CONF_MANUAL_INPUT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_flow_manual_configuration(hass, aioclient_mock):
"""Test that config flow works with manual configuration after no discovered bridges."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_manual_configuration_after_discovery_timeout(hass, aioclient_mock):
"""Test failed discovery fallbacks to manual configuration."""
aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=asyncio.TimeoutError)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
assert not hass.config_entries.flow._progress[result["flow_id"]].bridges
async def test_manual_configuration_after_discovery_ResponseError(hass, aioclient_mock):
"""Test failed discovery fallbacks to manual configuration."""
aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=config_flow.ResponseError)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
assert not hass.config_entries.flow._progress[result["flow_id"]].bridges
async def test_manual_configuration_update_configuration(hass, aioclient_mock):
"""Test that manual configuration can update existing config entry."""
gateway = await setup_deconz_integration(hass)
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "2.3.4.5", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://2.3.4.5:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://2.3.4.5:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "2.3.4.5"
async def test_manual_configuration_dont_update_configuration(hass, aioclient_mock):
"""Test that _create_entry work and that bridgeid can be requested."""
await setup_deconz_integration(hass)
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_manual_configuration_timeout_get_bridge(hass, aioclient_mock):
"""Test that _create_entry handles a timeout."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config", exc=asyncio.TimeoutError
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_bridges"
async def test_link_get_api_key_ResponseError(hass, aioclient_mock):
"""Test config flow should abort if no API key was possible to retrieve."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post("http://1.2.3.4:80/api", exc=pydeconz.errors.ResponseError)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
assert result["errors"] == {"base": "no_key"}
async def test_flow_ssdp_discovery(hass, aioclient_mock):
"""Test that config flow for one discovered bridge works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_ssdp_discovery_not_deconz_bridge(hass):
"""Test a non deconz bridge being discovered over ssdp."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={ssdp.ATTR_UPNP_MANUFACTURER_URL: "not deconz bridge"},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_deconz_bridge"
async def test_ssdp_discovery_update_configuration(hass):
"""Test if a discovered bridge is configured but updates with new attributes."""
gateway = await setup_deconz_integration(hass)
with patch(
"homeassistant.components.deconz.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://2.3.4.5:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "2.3.4.5"
assert len(mock_setup_entry.mock_calls) == 1
async def test_ssdp_discovery_dont_update_configuration(hass):
"""Test if a discovered bridge has already been configured."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "1.2.3.4"
async def test_ssdp_discovery_dont_update_existing_hassio_configuration(hass):
"""Test to ensure the SSDP discovery does not update an Hass.io entry."""
gateway = await setup_deconz_integration(hass, source="hassio")
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "1.2.3.4"
async def test_flow_hassio_discovery(hass):
"""Test hassio discovery flow works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
"addon": "Mock Addon",
CONF_HOST: "mock-deconz",
CONF_PORT: 80,
CONF_SERIAL: BRIDGEID,
CONF_API_KEY: API_KEY,
},
context={"source": "hassio"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "hassio_confirm"
assert result["description_placeholders"] == {"addon": "Mock Addon"}
with patch(
"homeassistant.components.deconz.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.deconz.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
CONF_HOST: "mock-deconz",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_hassio_discovery_update_configuration(hass):
"""Test we can update an existing config entry."""
gateway = await setup_deconz_integration(hass)
with patch(
"homeassistant.components.deconz.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
CONF_HOST: "2.3.4.5",
CONF_PORT: 8080,
CONF_API_KEY: "updated",
CONF_SERIAL: BRIDGEID,
},
context={"source": "hassio"},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "2.3.4.5"
assert gateway.config_entry.data[CONF_PORT] == 8080
assert gateway.config_entry.data[CONF_API_KEY] == "updated"
assert len(mock_setup_entry.mock_calls) == 1
async def test_hassio_discovery_dont_update_configuration(hass):
"""Test we can update an existing config entry."""
await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
CONF_SERIAL: BRIDGEID,
},
context={"source": "hassio"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_option_flow(hass):
"""Test config flow options."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.options.async_init(gateway.config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "deconz_devices"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_ALLOW_CLIP_SENSOR: False, CONF_ALLOW_DECONZ_GROUPS: False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_ALLOW_CLIP_SENSOR: False,
CONF_ALLOW_DECONZ_GROUPS: False,
CONF_MASTER_GATEWAY: True,
}
|
{
"content_hash": "a78f52ad727200193e7760169ead1cae",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 92,
"avg_line_length": 32.64685314685315,
"alnum_prop": 0.6230052479383099,
"repo_name": "tchellomello/home-assistant",
"id": "43536a44bbece1979595de6a87a124f0c169b6a5",
"size": "18674",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/deconz/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
}
|
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Vm(Object):
__typename__ = 'vm'
def __init__(self, env, name, boot=None):
self.env = env
self.name = name
self.interfaces = []
self.disks = []
self.boot = boot or 'hd'
def add_interface(self, **kwargs):
if 'interface' in kwargs:
interface = kwargs['interface']
else:
interface = Interface(**kwargs)
self.interfaces.append(interface)
return interface
def add_disk(self, **kwargs):
if 'disk' in kwargs:
disk = kwargs['disk']
else:
disk = Disk(**kwargs)
self.disks.append(disk)
return disk
def start(self):
if not self.status():
LOG.debug('Starting virtual machine %s' % self.name)
self.env.driver.vm_start(self)
def stop(self):
if self.status():
LOG.debug('Stopping virtual machine %s' % self.name)
self.env.driver.vm_stop(self)
def status(self):
status = self.env.driver.vm_status(self)
LOG.debug('Virtual machine %s status %s' % (self.name, status))
return status
class Interface(object):
def __init__(self, mac, network):
self.mac = mac
self.network = network
class Disk(object):
def __init__(self, size=None, base=None):
self.size = size
self.base = base
|
{
"content_hash": "a6840a66b1d2b7f44ad832a48f03690a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 71,
"avg_line_length": 25,
"alnum_prop": 0.559322033898305,
"repo_name": "Axam/nsx-web",
"id": "2e7c847cefc70b46bb5e2cec94b37cb00259c70c",
"size": "2053",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fuel_agent_ci/fuel_agent_ci/objects/vm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99402"
},
{
"name": "JavaScript",
"bytes": "553275"
},
{
"name": "Python",
"bytes": "2623980"
},
{
"name": "Ruby",
"bytes": "33345"
},
{
"name": "Shell",
"bytes": "29681"
}
],
"symlink_target": ""
}
|
import SimpleHTTPServer
import SocketServer
import requests
import md5
def calc_hash_terrain(s):
# When Ian built the hash for terrain tiles he used the path without
# the leading slash and the first 6 chars of the hex digest instead of 5
m = md5.new()
m.update(s[1:])
md5_hash = m.hexdigest()
return md5_hash[:6]
def calc_hash_vector(s):
m = md5.new()
m.update(s)
md5_hash = m.hexdigest()
return md5_hash[:5]
date_prefix = ''
base_url = ''
calc_hash = None
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
server_version = "0.1"
def do_GET(self):
query_params = self.path.split('?')
old_path = query_params[0]
md5_hash = calc_hash(old_path)
new_path = '%(date)s/%(md5)s%(path)s' % dict(
date=date_prefix,
md5=md5_hash,
path=self.path
)
url = '%s/%s' % (base_url, new_path)
res = requests.get(url)
self.send_response(res.status_code, res.reason)
for k, v in res.headers.iteritems():
if k != 'Server' and k != 'Date':
self.send_header(k, v)
if 'access-control-allow-origin' not in res.headers:
self.send_header('access-control-allow-origin', '*')
self.end_headers()
kilobyte = 1024 * 1000
chunk_size = 16 * kilobyte
for chunk in res.iter_content(chunk_size):
self.wfile.write(chunk)
self.wfile.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'port',
type=int,
help="Port to listen on")
parser.add_argument(
'date_prefix',
help="Date prefix string to append to the base URL")
parser.add_argument(
'base_url',
help="Base S3 URL to make requests to")
parser.add_argument(
'--terrain',
dest='variant',
action='store_const',
const='terrain',
default='vector',
help="Use Terrain tiles variant of hashing")
args = parser.parse_args()
date_prefix = args.date_prefix
base_url = args.base_url
if args.variant == 'vector':
calc_hash = calc_hash_vector
elif args.variant == 'terrain':
calc_hash = calc_hash_terrain
else:
print "Uh oh I don't know how to hash %s" % args.variant
httpd = SocketServer.TCPServer(("", args.port), Handler)
print "Serving at http://localhost:%d/" % args.port
httpd.serve_forever()
|
{
"content_hash": "cfef23e9ab444b30d77f5abc43fec9d1",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 76,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.584256329113924,
"repo_name": "mapzen/tile-hash-proxy",
"id": "3ee9e1a1f6e9caa15e30202e5ef379c24877bc94",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tile-hash-proxy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2062"
}
],
"symlink_target": ""
}
|
from mpi4py import MPI
def bcast_if_comm_is_not_null(x, comm):
"""
MPI broadcast. Broadcast object over given communicator only if the
communicator is not null.
Parameters
----------
x : any object
The object would shared over given communicator.
comm : MPI_Comm
MPI communicator. Could be a valid communicator or null communicator.
The object `x` will be broadcast only if `comm` is not null.
Returns
-------
x : any object | None
Return the object `x` if `comm` is not MPI.COMM_NULL, or return None if
`comm` is MPI.COMM_NULL.
"""
if comm != MPI.COMM_NULL:
x = comm.bcast(x)
else:
x = None
return x
|
{
"content_hash": "57387b0217ad99097ff19af01900662e",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 26.555555555555557,
"alnum_prop": 0.603905160390516,
"repo_name": "ibara1454/pyss",
"id": "1c5963fc17264bcaa0660b1397df9e0f180c3821",
"size": "764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyss/mpi/util/communicative.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "14795"
},
{
"name": "Python",
"bytes": "68840"
},
{
"name": "Shell",
"bytes": "1098"
}
],
"symlink_target": ""
}
|
import inspect
import six
from oslo_utils import strutils
from oslo_utils import uuidutils
import wsme
from wsme import types as wtypes
from magnum.common import exception
from magnum.common import utils
from magnum.i18n import _
class DNSListType(wtypes.UserType):
"""A comman delimited dns nameserver list"""
basetype = six.string_types
name = "dnslist"
@staticmethod
def validate(value):
return utils.validate_dns(value)
class MacAddressType(wtypes.UserType):
"""A simple MAC address type."""
basetype = wtypes.text
name = 'macaddress'
@staticmethod
def validate(value):
return utils.validate_and_normalize_mac(value)
@staticmethod
def frombasetype(value):
if value is None:
return None
return MacAddressType.validate(value)
class NameType(wtypes.UserType):
"""A logical name type."""
basetype = wtypes.text
name = 'name'
@staticmethod
def validate(value):
if not utils.is_name_safe(value):
raise exception.InvalidName(name=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return NameType.validate(value)
class UuidType(wtypes.UserType):
"""A simple UUID type."""
basetype = wtypes.text
name = 'uuid'
@staticmethod
def validate(value):
if not uuidutils.is_uuid_like(value):
raise exception.InvalidUUID(uuid=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return UuidType.validate(value)
class BooleanType(wtypes.UserType):
"""A simple boolean type."""
basetype = wtypes.text
name = 'boolean'
@staticmethod
def validate(value):
try:
return strutils.bool_from_string(value, strict=True)
except ValueError as e:
# raise Invalid to return 400 (BadRequest) in the API
raise exception.Invalid(e)
@staticmethod
def frombasetype(value):
if value is None:
return None
return BooleanType.validate(value)
class MultiType(wtypes.UserType):
"""A complex type that represents one or more types.
Used for validating that a value is an instance of one of the types.
:param types: Variable-length list of types.
"""
basetype = wtypes.text
def __init__(self, *types):
self.types = types
def __str__(self):
return ' | '.join(map(str, self.types))
def validate(self, value):
for t in self.types:
try:
return wtypes.validate_value(t, value)
except (exception.InvalidUUID, ValueError):
pass
else:
raise ValueError(
_("Wrong type. Expected '%(type)s', got '%(value)s'")
% {'type': self.types, 'value': type(value)})
dns_list = DNSListType()
macaddress = MacAddressType()
uuid = UuidType()
name = NameType()
uuid_or_name = MultiType(UuidType, NameType)
boolean = BooleanType()
class JsonPatchType(wtypes.Base):
"""A complex type that represents a single json-patch operation."""
path = wtypes.wsattr(wtypes.StringType(pattern=r'^(/[\w-]+)+$'),
mandatory=True)
op = wtypes.wsattr(wtypes.Enum(wtypes.text, 'add', 'replace', 'remove'),
mandatory=True)
value = MultiType(wtypes.text, int)
# The class of the objects being patched. Override this in subclasses.
# Should probably be a subclass of magnum.api.controllers.base.APIBase.
_api_base = None
# Attributes that are not required for construction, but which may not be
# removed if set. Override in subclasses if needed.
_extra_non_removable_attrs = set()
# Set of non-removable attributes, calculated lazily.
_non_removable_attrs = None
@staticmethod
def internal_attrs():
"""Returns a list of internal attributes.
Internal attributes can't be added, replaced or removed. This
method may be overwritten by derived class.
"""
return ['/created_at', '/id', '/links', '/updated_at',
'/uuid', '/project_id', '/user_id']
@classmethod
def non_removable_attrs(cls):
"""Returns a set of names of attributes that may not be removed.
Attributes whose 'mandatory' property is True are automatically added
to this set. To add additional attributes to the set, override the
field _extra_non_removable_attrs in subclasses, with a set of the form
{'/foo', '/bar'}.
"""
if cls._non_removable_attrs is None:
cls._non_removable_attrs = cls._extra_non_removable_attrs.copy()
if cls._api_base:
fields = inspect.getmembers(cls._api_base,
lambda a: not inspect.isroutine(a))
for name, field in fields:
if getattr(field, 'mandatory', False):
cls._non_removable_attrs.add('/%s' % name)
return cls._non_removable_attrs
@staticmethod
def validate(patch):
if patch.path in patch.internal_attrs():
msg = _("'%s' is an internal attribute and can not be updated")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.path in patch.non_removable_attrs() and patch.op == 'remove':
msg = _("'%s' is a mandatory attribute and can not be removed")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.op != 'remove':
if patch.value is None or patch.value == wtypes.Unset:
msg = _("'add' and 'replace' operations needs value")
raise wsme.exc.ClientSideError(msg)
ret = {'path': patch.path, 'op': patch.op}
if patch.value is not None and patch.value != wtypes.Unset:
ret['value'] = patch.value
return ret
|
{
"content_hash": "658cbedde85f18c7e052fc664abf493c",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 79,
"avg_line_length": 29.307317073170733,
"alnum_prop": 0.6126830892143809,
"repo_name": "ArchiFleKs/magnum",
"id": "5fa6c51fe45b1a680c82bc237bfb7094b3df7eba",
"size": "6637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magnum/api/controllers/v1/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "9809"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "2366803"
},
{
"name": "Shell",
"bytes": "569794"
}
],
"symlink_target": ""
}
|
import getpass
import os
import json
import requests
import uuid
class GithubException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Github:
def get_credential(self,scopes,save_path):
if os.path.isfile(save_path):
with open(save_path) as f:
return json.loads(f.read())
user = auth()
hash_str = uuid.uuid1()
payload = {"scopes": scopes, "note": "use gist_it #%s" % hash_str}
response = requests.post("https://api.github.com/authorizations", data=json.dumps(payload), auth=(user[0], user[1]))
if response.status_code is not 201:
raise GithubException("github oauth api error: %s" % response.text)
with open(save_path, "w") as f:
f.write(response.text)
return json.loads(response.text)
def auth():
user_name = raw_input('user_name : ')
password = getpass.getpass('password : ')
if not user_name or not password:
raise GithubException("username or password is not found")
return user_name, password
if __name__ == '__main__':
pass
|
{
"content_hash": "02244509b0f6aa7f561774a075cfc3c2",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 124,
"avg_line_length": 27.88095238095238,
"alnum_prop": 0.609735269000854,
"repo_name": "gk024kfd/gist_it",
"id": "b590a230c1456dda2dbe0a9c6d9f02032a459c9b",
"size": "1171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "github.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4407"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.context_processors import csrf
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
try:
from json import dumps as to_json
except ImportError:
from simplejson import dumps as to_json
from uuid import UUID
import cql, threading
__local = threading.local()
def __get_cursor():
if not hasattr(__local, "conn") or not getattr(__local, "conn"):
__local.conn = cql.connect(settings.CASSANDRA_HOST,
settings.CASSANDRA_PORT)
return __local.conn.cursor()
def __execute(query, keyspace=None):
# Lazily assign a cursor instance to a thread-local variable
cursor = __get_cursor()
if keyspace:
cursor.execute("USE " + keyspace)
cursor.execute(query)
return cursor
def __serialize(cursor):
def marshal(value):
if isinstance(value, (UUID,long)):
return str(value)
return value
if not hasattr(cursor, "result"):
return to_json({"void": "Success"})
rows = {}
for x in range(cursor.rowcount):
r = cursor.fetchone()
rows[r[0]] = []
for (j, column_value) in enumerate(r[1:]):
column_name = cursor.description[j+1][0]
rows[r[0]].append({"name": marshal(column_name),
"value": marshal(column_value)})
return to_json({"rows": rows})
# View methods
def index(request):
return render_to_response("index.html", csrf(request))
def query(request):
query_string = request.POST['post_data']
try:
if query_string.upper().startswith("USE"):
__execute(query_string)
keyspace = query_string.split()[1].strip(";")
# Giving people access to the system keyspace would be Bad
if keyspace == "system":
raise cql.DatabaseError("No. Not as stupid as I look.");
request.session["current_keyspace"] = keyspace
json = to_json({"void": "Using keyspace %s" % keyspace})
elif query_string.split()[1].upper().startswith("COUNT"):
current_keyspace = request.session.get("current_keyspace", None)
cursor = __execute(query_string, current_keyspace)
r = cursor.fetchone()
json = to_json({"int": r[0]})
else:
current_keyspace = request.session.get("current_keyspace", None)
cursor = __execute(query_string, current_keyspace)
json = __serialize(cursor)
except cql.DatabaseError, error:
json = to_json({"exception": str(error)})
return HttpResponse(json, mimetype="application/json")
def describe_keyspaces(request):
cursor = __get_cursor()
# ...because it's amazing how much random shit broke between 1.0.3 and 1.0.5
if hasattr(cursor, "_connection"): client = cursor._connection.client
else: client = cursor.parent_connection.client
schema = {}
for ksdef in client.describe_keyspaces():
if ksdef.name == "system":
continue
schema[ksdef.name] = {}
for cfdef in ksdef.cf_defs:
schema[ksdef.name][cfdef.name] = None
return HttpResponse(to_json(schema), mimetype="application/json")
|
{
"content_hash": "5c1d7e2e71105adc973d66b5f423cb6c",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 80,
"avg_line_length": 32.801980198019805,
"alnum_prop": 0.614246906127377,
"repo_name": "eevans/caqel",
"id": "5c0e86bea84d4ddba94395313aa5915d9bc978e4",
"size": "3314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "query/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8197"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ITDB_Main', '0010_auto_20150822_1221'),
]
operations = [
migrations.AddField(
model_name='cast',
name='end_year',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='cast',
name='is_alternating',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='cast',
name='is_understudy',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='cast',
name='start_year',
field=models.IntegerField(default=0),
),
]
|
{
"content_hash": "cbb4b2c34022a7cf3f2f718978103d0c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 53,
"avg_line_length": 25.696969696969695,
"alnum_prop": 0.5436320754716981,
"repo_name": "Plaudenslager/ITDB",
"id": "a0f01f66466fbfbf36099db17065e28f9bcdb073",
"size": "872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ITDB_Main/migrations/0011_auto_20150831_0148.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42485"
},
{
"name": "HTML",
"bytes": "12875"
},
{
"name": "JavaScript",
"bytes": "77703"
},
{
"name": "Python",
"bytes": "34623"
}
],
"symlink_target": ""
}
|
import struct
class Particle(object):
"""Information used to restart a specific particle that caused a simulation to
fail.
Parameters
----------
filename : str
Path to the particle restart file
Attributes
----------
current_batch : int
The batch containing the particle
gen_per_batch : int
Number of generations per batch
current_gen : int
The generation containing the particle
n_particles : int
Number of particles per generation
run_mode : int
Type of simulation (criticality or fixed source)
id : long
Identifier of the particle
weight : float
Weight of the particle
energy : float
Energy of the particle in MeV
xyz : list of float
Position of the particle
uvw : list of float
Directional cosines of the particle
"""
def __init__(self, filename):
import h5py
self._f = h5py.File(filename, 'r')
# Ensure filetype and revision are correct
if 'filetype' not in self._f or self._f[
'filetype'].value.decode() != 'particle restart':
raise IOError('{} is not a particle restart file.'.format(filename))
if self._f['revision'].value != 1:
raise IOError('Particle restart file has a file revision of {} '
'which is not consistent with the revision this '
'version of OpenMC expects ({}).'.format(
self._f['revision'].value, 1))
@property
def current_batch(self):
return self._f['current_batch'].value
@property
def current_gen(self):
return self._f['current_gen'].value
@property
def energy(self):
return self._f['energy'].value
@property
def gen_per_batch(self):
return self._f['gen_per_batch'].value
@property
def id(self):
return self._f['id'].value
@property
def n_particles(self):
return self._f['n_particles'].value
@property
def run_mode(self):
return self._f['run_mode'].value.decode()
@property
def uvw(self):
return self._f['uvw'].value
@property
def weight(self):
return self._f['weight'].value
@property
def xyz(self):
return self._f['xyz'].value
|
{
"content_hash": "838dd756ee30dcf21f578903915c1375",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 82,
"avg_line_length": 26.2,
"alnum_prop": 0.5797285835453775,
"repo_name": "kellyrowland/openmc",
"id": "72bf3ac3dec80b20e034aa7e1542a32ba86a3e31",
"size": "2358",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "openmc/particle_restart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "32103"
},
{
"name": "FORTRAN",
"bytes": "1300206"
},
{
"name": "Makefile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "867931"
},
{
"name": "Shell",
"bytes": "1831"
}
],
"symlink_target": ""
}
|
import numpy as np
import os
os.pathsep()
a = np.complex(1,1)
|
{
"content_hash": "803ad2e128644ca87e87adc093e72b2f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 19,
"avg_line_length": 10.666666666666666,
"alnum_prop": 0.6875,
"repo_name": "gregnordin/micropython_pyboard",
"id": "df26e189436dbf282169de5f2e870f9eb1ac1615",
"size": "64",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "150729_pyboard_to_pyqtgraph/test_atom_autosuggest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16459"
}
],
"symlink_target": ""
}
|
from .connectivity_test import ConnectivityTest, Endpoint, ReachabilityDetails
from .reachability import (
CreateConnectivityTestRequest,
DeleteConnectivityTestRequest,
GetConnectivityTestRequest,
ListConnectivityTestsRequest,
ListConnectivityTestsResponse,
OperationMetadata,
RerunConnectivityTestRequest,
UpdateConnectivityTestRequest,
)
from .trace import (
AbortInfo,
CloudSQLInstanceInfo,
DeliverInfo,
DropInfo,
EndpointInfo,
FirewallInfo,
ForwardInfo,
ForwardingRuleInfo,
GKEMasterInfo,
InstanceInfo,
LoadBalancerBackend,
LoadBalancerInfo,
NetworkInfo,
RouteInfo,
Step,
Trace,
VpnGatewayInfo,
VpnTunnelInfo,
)
__all__ = (
"ConnectivityTest",
"Endpoint",
"ReachabilityDetails",
"CreateConnectivityTestRequest",
"DeleteConnectivityTestRequest",
"GetConnectivityTestRequest",
"ListConnectivityTestsRequest",
"ListConnectivityTestsResponse",
"OperationMetadata",
"RerunConnectivityTestRequest",
"UpdateConnectivityTestRequest",
"AbortInfo",
"CloudSQLInstanceInfo",
"DeliverInfo",
"DropInfo",
"EndpointInfo",
"FirewallInfo",
"ForwardInfo",
"ForwardingRuleInfo",
"GKEMasterInfo",
"InstanceInfo",
"LoadBalancerBackend",
"LoadBalancerInfo",
"NetworkInfo",
"RouteInfo",
"Step",
"Trace",
"VpnGatewayInfo",
"VpnTunnelInfo",
)
|
{
"content_hash": "a7f2f3ed868518203f29dbfe46aa2dbf",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 22.88888888888889,
"alnum_prop": 0.7045769764216366,
"repo_name": "googleapis/python-network-management",
"id": "d19cc05aa65dc989154dbae751ee06fbefee96d0",
"size": "2042",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/network_management_v1/types/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "349982"
},
{
"name": "Shell",
"bytes": "30696"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.db import models
"""@package docstring
Documentation.
"""
class SCUser(AbstractBaseUser,PermissionsMixin):
"""La classe SCUser e' la rappresentazione della nostra classe utente che va a sostituire la classe utente default di
django, garantendo maggiore flessibilita' nella gestione dell'utente e la possibilita' di estendere i normali campi
della clsse User
"""
username = models.CharField(unique=True,max_length=20)
email = models.CharField (verbose_name='email address',unique=True,max_length=255)
first_name = models.CharField(max_length=30,null=True)
last_name = models.CharField(max_length=30,null=True)
date_joined = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True,null=False)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False,null=False)
profile_image = models.ImageField(upload_to="media/avatar/",blank=False,null=False,default="media/avatar/unknow_user.jpg")
user_bio = models.CharField(max_length=600,blank=True)
address = models.CharField(max_length=30,null=True,blank=True)
birth_date = models.DateField(null=False)
user = models.ManyToManyField('self',blank=True)
REQUIRED_FIELDS = ['email','first_name','last_name','birth_date']
USERNAME_FIELD = 'username'
objects = UserManager()
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
return self.first_name
def __unicode__(self):
return self.username
def date(self, date = None):
if(date != None):
self.birth_date = date
return str(self.__date_)
def get_set_address(self, address = None):
if(address != None):
self.address = address
return str(self.__address_)
def set_name(self,first_name=None, last_name = None):
if first_name != None:
self.first_name = first_name
if last_name != None:
self.last_name = last_name
def set_email (self,email):
self.email = email
def set_username (self,username):
self.username = username
class Post (models.Model):
"""La classe Post gestisce le pubblicazioni di un certo utente. Essa e' in rapport ManyToMany con la classe SCUser
perche' e' prevista la possibilita' di condividere un certo post. Il contenuto di un post puo' essere testuale oppure
multimediale
"""
post_date = models.DateTimeField(auto_now_add=True)
TEXT= 'is_text'
VIDEO = 'is_video'
PHOTO = 'is_photo'
POST_CHOICES = (
(TEXT, 'Text'),
(VIDEO, 'Video'),
(PHOTO, 'Photo'),
)
post_type = models.CharField(max_length=20, choices=POST_CHOICES,default=TEXT)
content = models.CharField(max_length=600,null=True,blank=True,default="")
file = models.FileField(upload_to='media/%Y/%m/%d', blank=True)
post_user = models.ManyToManyField(SCUser)
def __unicode__(self):
return 'Post ID: '+ str(self.id)
class Like (models.Model):
"""La classe like tiene traccia dei like che un certo utente ha inserito e a quali posts sono riferiti
"""
like_date = models.DateTimeField(auto_now_add=True)
like_user = models.ForeignKey(SCUser)
like_post = models.ForeignKey(Post)
def __unicode__(self):
return unicode(self.like_date) + " | "+ self.like_user.username
class ChatRoom(models.Model):
"""La classe ChatRoom tiene traccia del nome della "stanza" e degli utenti che ne fanno parte. E' in relazione
ManyToMany con SCUser di modo da poter aver potenzialmente N utenti all'interno della stessa stanza. In realta' la view
e soprattuto il template ne limitano il potenziale non gestendo chat di gruppo ma solo chat composte da 2 SCUser.
"""
name = models.CharField(max_length=200)
user = models.ManyToManyField(SCUser)
def __unicode__(self):
return self.name
class Crono_chat(models.Model):
"""Questa classe si occupa di mantenere una cronologia della chat in una certa chatRoom
"""
mex = models.CharField(max_length=300)
chatRoom = models.ForeignKey(ChatRoom)
receiver = models.ForeignKey(SCUser,related_name="receiver")
sender = models.ForeignKey(SCUser,related_name="sender")
|
{
"content_hash": "468b90d25b8fa2be3ef9431ddcf4b8da",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 126,
"avg_line_length": 36.79338842975206,
"alnum_prop": 0.681266846361186,
"repo_name": "samzek/social_circle",
"id": "91b34917e1eab15e1b31a7dd264b40d9f8e066ab",
"size": "4452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SN/socialcircle/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52713"
},
{
"name": "HTML",
"bytes": "792546"
},
{
"name": "JavaScript",
"bytes": "72014"
},
{
"name": "Python",
"bytes": "48878"
}
],
"symlink_target": ""
}
|
"""Test PyMongo's SlaveOkay with:
- A direct connection to a standalone.
- A direct connection to a slave.
- A direct connection to a mongos.
"""
import itertools
import unittest
from mockupdb import MockupDB, going
from pymongo import MongoClient
from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name
from pymongo.topology_description import TOPOLOGY_TYPE
from .operations import operations
def topology_type_name(client):
topology_type = client._topology._description.topology_type
return TOPOLOGY_TYPE._fields[topology_type]
class TestSlaveOkaySingle(unittest.TestCase):
def setUp(self):
self.server = MockupDB()
self.server.run()
self.addCleanup(self.server.stop)
def create_slave_ok_single_test(mode, server_type, ismaster, operation):
def test(self):
ismaster_with_version = ismaster.copy()
ismaster_with_version["minWireVersion"] = 2
ismaster_with_version["maxWireVersion"] = 6
self.server.autoresponds("ismaster", **ismaster_with_version)
self.assertIn(
operation.op_type, ("always-use-secondary", "may-use-secondary", "must-use-primary")
)
pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None)
client = MongoClient(self.server.uri, read_preference=pref)
self.addCleanup(client.close)
with going(operation.function, client):
request = self.server.receive()
request.reply(operation.reply)
self.assertIn(topology_type_name(client), ["Sharded", "Single"])
return test
def generate_slave_ok_single_tests():
modes = "primary", "secondary", "nearest"
server_types = [
("standalone", {"ismaster": True}),
("slave", {"ismaster": False}),
("mongos", {"ismaster": True, "msg": "isdbgrid"}),
]
matrix = itertools.product(modes, server_types, operations)
for entry in matrix:
mode, (server_type, ismaster), operation = entry
test = create_slave_ok_single_test(mode, server_type, ismaster, operation)
test_name = "test_%s_%s_with_mode_%s" % (
operation.name.replace(" ", "_"),
server_type,
mode,
)
test.__name__ = test_name
setattr(TestSlaveOkaySingle, test_name, test)
generate_slave_ok_single_tests()
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "631600f3d73c53a8ba7a76b772e42942",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 96,
"avg_line_length": 29.463414634146343,
"alnum_prop": 0.652317880794702,
"repo_name": "ShaneHarvey/mongo-python-driver",
"id": "bd36c77a048049d554e1230cc51b433a36adf991",
"size": "2990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mockupdb/test_slave_okay_single.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "183641"
},
{
"name": "Python",
"bytes": "2982667"
},
{
"name": "Shell",
"bytes": "30026"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class ChatAppConfig(AppConfig):
name = "openslides.chat"
verbose_name = "OpenSlides Chat"
def ready(self):
# Import all required stuff.
from ..utils.rest_api import router
from . import serializers # noqa
from .views import (
ChatGroupViewSet,
ChatMessageViewSet,
)
# Register viewsets.
router.register(
self.get_model("ChatGroup").get_collection_string(),
ChatGroupViewSet,
)
router.register(
self.get_model("ChatMessage").get_collection_string(), ChatMessageViewSet
)
def get_startup_elements(self):
"""
Yields all Cachables required on startup i. e. opening the websocket
connection.
"""
yield self.get_model("ChatGroup")
yield self.get_model("ChatMessage")
|
{
"content_hash": "5f9f52eea24622a4798e0148198c6797",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 85,
"avg_line_length": 28.28125,
"alnum_prop": 0.5966850828729282,
"repo_name": "CatoTH/OpenSlides",
"id": "71cede8377487c5c6a9372c8ae67c270ab5d62bc",
"size": "905",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/openslides/chat/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114698"
},
{
"name": "Dockerfile",
"bytes": "853"
},
{
"name": "HTML",
"bytes": "417865"
},
{
"name": "JavaScript",
"bytes": "159617"
},
{
"name": "Python",
"bytes": "1185432"
},
{
"name": "Smarty",
"bytes": "7188"
},
{
"name": "TypeScript",
"bytes": "2327301"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.