text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import threading
import time
from datetime import datetime
from ginga.misc import Bunch, Datasrc, Callback, Future
class ChannelError(Exception):
pass
class Channel(Callback.Callbacks):
"""Class to manage a channel.
Parameters
----------
name : str
Name of the channel.
fv : `~ginga.rv.Control.GingaShell`
The reference viewer shell.
settings : `~ginga.misc.Settings.SettingGroup`
Channel settings.
datasrc : `~ginga.misc.Datasrc.Datasrc`
Data cache.
"""
def __init__(self, name, fv, settings, datasrc=None):
super(Channel, self).__init__()
self.logger = fv.logger
self.fv = fv
self.settings = settings
self.logger = fv.logger
self.lock = threading.RLock()
# CHANNEL ATTRIBUTES
self.name = name
self.widget = None
self.container = None
self.workspace = None
self.opmon = None
# this is the image viewer we are connected to
self.fitsimage = None
# this is the currently active viewer
self.viewer = None
self.viewers = []
self.viewer_dict = {}
if datasrc is None:
num_images = self.settings.get('numImages', 1)
datasrc = Datasrc.Datasrc(num_images)
self.datasrc = datasrc
self.cursor = -1
self.history = []
self.image_index = {}
# external entities can attach stuff via this attribute
self.extdata = Bunch.Bunch()
self._configure_sort()
self.settings.getSetting('sort_order').add_callback(
'set', self._sort_changed_ext_cb)
def connect_viewer(self, viewer):
if not viewer in self.viewers:
self.viewers.append(viewer)
self.viewer_dict[viewer.vname] = viewer
def move_image_to(self, imname, channel):
if self == channel:
return
self.copy_image_to(imname, channel)
self.remove_image(imname)
def copy_image_to(self, imname, channel, silent=False):
if self == channel:
return
try:
# copy image to other channel's datasrc if still
# in memory
image = self.datasrc[imname]
except KeyError:
# transfer image info
info = self.image_index[imname]
channel._add_info(info)
return
#channel.datasrc[imname] = image
channel.add_image(image, silent=silent)
def remove_image(self, imname):
if self.datasrc.has_key(imname):
self.datasrc.remove(imname)
info = self.remove_history(imname)
return info
def get_image_names(self):
return [ info.name for info in self.history ]
def get_loaded_image(self, imname):
"""Get an image from memory.
Parameters
----------
imname : str
Key, usually image name and extension.
Returns
-------
image
Image object.
Raises
------
KeyError
Image is not in memory.
"""
image = self.datasrc[imname]
return image
def add_image(self, image, silent=False, bulk_add=False):
imname = image.get('name', None)
assert imname is not None, \
ValueError("image has no name")
self.logger.debug("Adding image '%s' in channel %s" % (
imname, self.name))
self.datasrc[imname] = image
idx = image.get('idx', None)
path = image.get('path', None)
image_loader = image.get('image_loader', None)
image_future = image.get('image_future', None)
info = self.add_history(imname, path,
image_loader=image_loader,
image_future=image_future,
idx=idx)
# we'll get notified if an image changes and can update
# metadata and make a chained callback
image.add_callback('modified', self._image_modified_cb)
if not silent:
if not bulk_add:
self._add_image_update(image, info)
return
# By using gui_do() here, more images may be bulk added
# before the _add_image_update executes--it will then
# only update the gui for the latest image, which saves
# work
self.fv.gui_do(self._add_image_update, image, info)
def add_image_info(self, info):
image_loader = info.get('image_loader', self.fv.load_image)
# create an image_future if one does not exist
image_future = info.get('image_future', None)
if (image_future is None) and (info.path is not None):
image_future = Future.Future()
image_future.freeze(image_loader, info.path)
info = self.add_history(info.name, info.path,
image_loader=image_loader,
image_future=image_future)
self.fv.make_async_gui_callback('add-image-info', self, info)
def get_image_info(self, imname):
return self.image_index[imname]
def _add_image_update(self, image, info):
self.fv.make_async_gui_callback('add-image', self.name, image, info)
current = self.datasrc.youngest()
curname = current.get('name')
self.logger.debug("image=%s youngest=%s" % (image.get('name'), curname))
if current != image:
return
# switch to current image?
if self.settings['switchnew']:
self.logger.debug("switching to new image '%s'" % (curname))
self.switch_image(image)
if self.settings['raisenew']:
channel = self.fv.get_current_channel()
if channel != self:
self.fv.change_channel(self.name)
def _image_modified_cb(self, image):
imname = image.get('name')
info = self.image_index[imname]
info.time_modified = datetime.utcnow()
self.logger.debug("image modified; making chained callback")
self.fv.make_async_gui_callback('add-image-info', self, info)
def refresh_cursor_image(self):
info = self.history[self.cursor]
if self.datasrc.has_key(info.name):
# image still in memory
image = self.datasrc[info.name]
self.switch_image(image)
else:
self.switch_name(info.name)
def prev_image(self, loop=True):
with self.lock:
self.logger.debug("Previous image")
if self.cursor <= 0:
n = len(self.history) - 1
if (not loop) or (n < 0):
self.logger.error("No previous image!")
return True
self.cursor = n
else:
self.cursor -= 1
self.refresh_cursor_image()
return True
def next_image(self, loop=True):
with self.lock:
self.logger.debug("Next image")
n = len(self.history) - 1
if self.cursor >= n:
if (not loop) or (n < 0):
self.logger.error("No next image!")
return True
self.cursor = 0
else:
self.cursor += 1
self.refresh_cursor_image()
return True
def _add_info(self, info):
if not info in self.image_index:
self.history.append(info)
self.image_index[info.name] = info
if self.hist_sort is not None:
self.history.sort(key=self.hist_sort)
def add_history(self, imname, path, idx=None,
image_loader=None, image_future=None):
if not (imname in self.image_index):
if image_loader is None:
image_loader = self.fv.load_image
# create an image_future if one does not exist
if (image_future is None) and (path is not None):
image_future = Future.Future()
image_future.freeze(image_loader, path)
info = Bunch.Bunch(name=imname, path=path,
idx=idx,
image_loader=image_loader,
image_future=image_future,
time_added=time.time(),
time_modified=None)
self._add_info(info)
else:
# already in history
info = self.image_index[imname]
return info
def remove_history(self, imname):
if imname in self.image_index:
info = self.image_index[imname]
del self.image_index[imname]
self.history.remove(info)
return info
return None
def get_current_image(self):
return self.viewer.get_image()
def view_object(self, dataobj):
# find available viewers that can view this kind of object
vnames = self.fv.get_viewer_names(dataobj)
if len(vnames) == 0:
raise ValueError("I don't know how to view objects of type '%s'" % (
str(type(dataobj))))
self.logger.debug("available viewers are: %s" % (str(vnames)))
# for now, pick first available viewer that can view this type
vname = vnames[0]
# if we don't have this viewer type then install one in the channel
if not vname in self.viewer_dict:
self.fv.make_viewer(vname, self)
self.viewer = self.viewer_dict[vname]
# find this viewer and raise it
idx = self.viewers.index(self.viewer)
self.widget.set_index(idx)
# and load the data
self.viewer.set_image(dataobj)
def switch_image(self, image):
with self.lock:
curimage = self.get_current_image()
if curimage != image:
self.logger.debug("updating viewer...")
self.view_object(image)
# update cursor to match image
imname = image.get('name')
if imname in self.image_index:
info = self.image_index[imname]
if info in self.history:
self.cursor = self.history.index(info)
self.fv.channel_image_updated(self, image)
# Check for preloading any images into memory
preload = self.settings.get('preload_images', False)
if not preload:
return
# queue next and previous files for preloading
index = self.cursor
if index < len(self.history)-1:
info = self.history[index+1]
if info.path is not None:
self.fv.add_preload(self.name, info)
if index > 0:
info = self.history[index-1]
if info.path is not None:
self.fv.add_preload(self.name, info)
else:
self.logger.debug("Apparently no need to set image.")
def switch_name(self, imname):
if self.datasrc.has_key(imname):
# Image is still in the heap
image = self.datasrc[imname]
self.switch_image(image)
return
if not (imname in self.image_index):
errmsg = "No image by the name '%s' found" % (imname)
self.logger.error("Can't switch to image '%s': %s" % (
imname, errmsg))
raise ChannelError(errmsg)
# Do we have a way to reconstruct this image from a future?
info = self.image_index[imname]
if info.image_future is not None:
self.logger.info("Image '%s' is no longer in memory; attempting "
"reloader" % (imname))
# TODO: recode this--it's a bit messy
def _switch(image):
# this will be executed in the gui thread
self.add_image(image, silent=True)
self.switch_image(image)
# reset modified timestamp
info.time_modified = None
self.fv.make_async_gui_callback('add-image-info', self, info)
def _load_n_switch(imname, path, image_future):
# this will be executed in a non-gui thread
# reconstitute the image
image = self.fv.error_wrap(image_future.thaw)
if isinstance(image, Exception):
errmsg = "Error reconstituting image: %s" % (str(image))
self.logger.error(errmsg)
raise image
# perpetuate the image_future
image.set(image_future=image_future, name=imname, path=path)
self.fv.gui_do(_switch, image)
self.fv.nongui_do(_load_n_switch, imname, info.path,
info.image_future)
elif info.path is not None:
# Do we have a path? We can try to reload it
self.logger.debug("Image '%s' is no longer in memory; attempting "
"to load from %s" % (imname, info.path))
#self.fv.load_file(path, chname=chname)
self.fv.nongui_do(self.load_file, info.path, chname=self.name)
else:
raise ChannelError("No way to recreate image '%s'" % (imname))
def _configure_sort(self):
self.hist_sort = lambda info: info.time_added
# set sorting function
sort_order = self.settings.get('sort_order', 'loadtime')
if sort_order == 'alpha':
# sort history alphabetically
self.hist_sort = lambda info: info.name
def _sort_changed_ext_cb(self, setting, value):
self._configure_sort()
self.history.sort(key=self.hist_sort)
# END
|
{
"content_hash": "817c9c6c605b84cb6cf304b4166676c6",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 80,
"avg_line_length": 32.959715639810426,
"alnum_prop": 0.5413041915306636,
"repo_name": "stscieisenhamer/ginga",
"id": "f1c79a9fd38ed446cddfa861ef40f5b43086dfcb",
"size": "14085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/rv/Channel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2151"
},
{
"name": "JavaScript",
"bytes": "82354"
},
{
"name": "Python",
"bytes": "2763201"
}
],
"symlink_target": ""
}
|
"""This file contains the USBStor keys plugins."""
import logging
from plaso.events import windows_events
from plaso.lib import eventdata
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'David Nides (david.nides@gmail.com)'
class USBStorPlugin(interface.KeyPlugin):
"""USBStor key plugin."""
NAME = 'winreg_usbstor'
DESCRIPTION = u'Parser for USB storage Registry data.'
REG_KEYS = [u'\\{current_control_set}\\Enum\\USBSTOR']
REG_TYPE = 'SYSTEM'
def GetEntries(
self, parser_context, key=None, registry_type=None, **unused_kwargs):
"""Collect Values under USBStor and return an event object for each one.
Args:
parser_context: A parser context object (instance of ParserContext).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
"""
for subkey in key.GetSubkeys():
text_dict = {}
text_dict['subkey_name'] = subkey.name
# Time last USB device of this class was first inserted.
event_object = windows_events.WindowsRegistryEvent(
subkey.last_written_timestamp, key.path, text_dict,
usage=eventdata.EventTimestamp.FIRST_CONNECTED, offset=key.offset,
registry_type=registry_type,
source_append=': USBStor Entries')
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
# TODO: Determine if these 4 fields always exist.
try:
device_type, vendor, product, revision = subkey.name.split('&')
except ValueError as exception:
logging.warning(
u'Unable to split string: {0:s} with error: {1:s}'.format(
subkey.name, exception))
text_dict['device_type'] = device_type
text_dict['vendor'] = vendor
text_dict['product'] = product
text_dict['revision'] = revision
for devicekey in subkey.GetSubkeys():
text_dict['serial'] = devicekey.name
friendly_name_value = devicekey.GetValue('FriendlyName')
if friendly_name_value:
text_dict['friendly_name'] = friendly_name_value.data
else:
text_dict.pop('friendly_name', None)
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = devicekey.GetValue('ParentIdPrefix')
if parent_id_prefix_value:
text_dict['parent_id_prefix'] = parent_id_prefix_value.data
else:
text_dict.pop('parent_id_prefix', None)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event_object = windows_events.WindowsRegistryEvent(
devicekey.last_written_timestamp, key.path, text_dict,
usage=eventdata.EventTimestamp.LAST_CONNECTED, offset=key.offset,
registry_type=registry_type,
source_append=': USBStor Entries')
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
# Build list of first Insertion times.
first_insert = []
device_parameter_key = devicekey.GetSubkey('Device Parameters')
if device_parameter_key:
first_insert.append(device_parameter_key.last_written_timestamp)
log_configuration_key = devicekey.GetSubkey('LogConf')
if (log_configuration_key and
log_configuration_key.last_written_timestamp not in first_insert):
first_insert.append(log_configuration_key.last_written_timestamp)
properties_key = devicekey.GetSubkey('Properties')
if (properties_key and
properties_key.last_written_timestamp not in first_insert):
first_insert.append(properties_key.last_written_timestamp)
# Add first Insertion times.
for timestamp in first_insert:
event_object = windows_events.WindowsRegistryEvent(
timestamp, key.path, text_dict,
usage=eventdata.EventTimestamp.LAST_CONNECTED, offset=key.offset,
registry_type=registry_type,
source_append=': USBStor Entries')
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
|
{
"content_hash": "4d9141345f28c2489439d973bb3bd9e6",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 38.69724770642202,
"alnum_prop": 0.6645329540066383,
"repo_name": "cvandeplas/plaso",
"id": "1fc820277d981bb9c586ad116d0e0dc082d6c0a9",
"size": "4915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/winreg_plugins/usbstor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2812257"
},
{
"name": "Shell",
"bytes": "22724"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import collections
try: # pragma: no cover
from collections import OrderedDict # NOQA
except ImportError: # pragma: no cover
from ordereddict import OrderedDict # NOQA
import six
from six.moves import configparser
try: # pragma: no cover
from urllib import urlretrieve # NOQA
except ImportError: # pragma: no cover
# PY3K
from urllib.request import urlretrieve # NOQA
from .bobexceptions import ConfigurationError
def nest_variables(variables):
nested = dict()
for key, value in variables.items():
segments = key.split('.')
location = nested
for segment in segments[:-1]:
if segment not in location:
location[segment] = dict()
location = location[segment]
if not isinstance(location, dict):
raise ConfigurationError('Cannot assign "%s" to group "%s", subgroup is already used.' % (value, key))
k = segments[-1]
if isinstance(location.get(k, None), dict):
raise ConfigurationError('Cannot assign "%s" to group "%s", subgroup is already used.' % (value, k))
if six.PY3: # pragma: no cover
location[k] = value
else: # pragma: no cover
location[k] = value.decode('utf-8')
return nested
def parse_config(configname):
tmpfile = None
if configname.startswith('http'):
tmpfile = tempfile.NamedTemporaryFile()
urlretrieve(configname, tmpfile.name)
configname = tmpfile.name
if not os.path.exists(configname):
raise ConfigurationError('config file does not exist: %s' % configname)
parser = configparser.SafeConfigParser(dict_type=OrderedDict)
parser.read(configname)
config = dict()
for section in ['variables', 'defaults', 'mr.bob', 'questions', 'template']:
if parser.has_section(section):
items = parser.items(section)
if section == 'questions':
config[section + "_order"] = [key[:-9] for key, value in items if key.endswith('.question')]
if section in ['variables', 'defaults']:
if six.PY3: # pragma: no cover
config[section] = dict(items)
else: # pragma: no cover
config[section] = dict([(key, value.decode('utf-8')) for key, value in items])
else:
config[section] = nest_variables(dict(items))
else:
config[section] = {}
if tmpfile:
tmpfile.close()
return config
def write_config(fs_config, section, data):
parser = configparser.SafeConfigParser(dict_type=OrderedDict)
parser.add_section(section)
for key, value in data.items():
if not isinstance(value, six.string_types):
value = str(value)
if not six.PY3: # pragma: no cover
value = value.encode('utf-8')
parser.set(section, key, value)
with open(fs_config, 'w') as f:
parser.write(f)
def update_config(first_config, second_config):
for k, v in second_config.items():
if isinstance(v, collections.Mapping):
r = update_config(first_config.get(k, {}), v)
first_config[k] = r
else:
first_config[k] = second_config[k]
return first_config
def pretty_format_config(config):
l = []
def format_config(dict_, namespace=''):
for key, value in dict_.items():
if namespace:
namespace_new = namespace + ".%s" % key
else:
namespace_new = key
if isinstance(value, dict):
format_config(value, namespace=namespace_new)
else:
l.append("%s = %s" % (namespace_new, value))
format_config(config)
return sorted(l)
|
{
"content_hash": "843c7b3165762aefffd3a7af7c04bd91",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 118,
"avg_line_length": 32.9051724137931,
"alnum_prop": 0.5939219282158763,
"repo_name": "domenkozar/mr.bob",
"id": "17b0750116d04a24c76ce5f0a0ce34d63706d602",
"size": "3817",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mrbob/parsing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "147"
},
{
"name": "Python",
"bytes": "90824"
}
],
"symlink_target": ""
}
|
import collections
import sys
import json
import psycopg2
def organizer(funkcja, pelnaLinia):
"""make new admin"""
if pelnaLinia[funkcja]["secret"] == "d8578edf8458ce06fbc5bb76a58c5ca4":
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user where login = '{0}' ;"""
.format(pelnaLinia[funkcja]["newlogin"]))
wynik = cur.fetchall()
if wynik == [(0,)]:
x_string = """
INSERT INTO con_user(login,password,role) VALUES
('{0}', '{1}','A');
""".format(pelnaLinia[funkcja]["newlogin"], pelnaLinia[funkcja]["newpassword"])
cur.execute(x_string)
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
cur.close()
conn.commit()
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def event(funkcja, pelnaLinia):
"""make new event"""
try:
cur = conn.cursor()
cur.execute("""SELECT role from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [("A",)]:
x_string = """
INSERT INTO event(event_name,s_date,e_date) VALUES
('{0}', '{1}','{2}');
""".format(pelnaLinia[funkcja]["eventname"],
pelnaLinia[funkcja]["start_timestamp"],
pelnaLinia[funkcja]["end_timestamp"])
cur.execute(x_string)
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
cur.close()
conn.commit()
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.commit()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def user(funkcja, pelnaLinia):
"""make new user"""
cur = conn.cursor()
cur.execute("""SELECT role from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [("A",)]:
try:
x_string = """
INSERT INTO con_user(login,password,role) VALUES
('{0}', '{1}','U');
""".format(pelnaLinia[funkcja]["newlogin"],
pelnaLinia[funkcja]["newpassword"])
cur.execute(x_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def talk(funkcja, pelnaLinia):
"""make new talk or accept egsisting talk"""
try:
cur = conn.cursor()
cur.execute("""SELECT role from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
cur.execute("""SELECT count(*) from talk
where id = '{0}'
and status!='P';"""
.format(pelnaLinia[funkcja]["talk"]))
ifEgsisting = cur.fetchall()
if uprawnienia == [("A",)]:
if ifEgsisting == [(1,)]:
if pelnaLinia[funkcja]["eventname"] == "":
egsist_string = """
UPDATE talk SET status='P', room='{4}' WHERE id = '{0}';
INSERT INTO raiting_by_user(talk_id, user_login, raiting, a_date) VALUES
('{0}', '{1}','{2}','{3}');
""".format(pelnaLinia[funkcja]["talk"],
pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["initial_evaluation"],
pelnaLinia[funkcja]["start_timestamp"],
pelnaLinia[funkcja]["room"])
try:
cur.execute(egsist_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
else:
egsist_string = """
UPDATE talk SET status='P', event_name='{4}', room='{5}' WHERE id = '{0}';
INSERT INTO raiting_by_user(talk_id, user_login, raiting, a_date) VALUES
('{0}', '{1}','{2}','{3}');
""".format(pelnaLinia[funkcja]["talk"],
pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["initial_evaluation"],
pelnaLinia[funkcja]["start_timestamp"],
pelnaLinia[funkcja]["eventname"],
pelnaLinia[funkcja]["room"])
try:
cur.execute(egsist_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
else:
new_string = """
INSERT INTO talk(id, event_name, speaker_login, title, s_date, room, status) VALUES
('{0}', '{1}','{2}','{3}','{4}','{5}','P');
INSERT INTO raiting_by_user(talk_id, user_login, raiting, a_date) VALUES
('{0}', '{7}','{6}','{4}');
""".format(pelnaLinia[funkcja]["talk"],
pelnaLinia[funkcja]["eventname"],
pelnaLinia[funkcja]["speakerlogin"],
pelnaLinia[funkcja]["title"],
pelnaLinia[funkcja]["start_timestamp"],
pelnaLinia[funkcja]["room"],
pelnaLinia[funkcja]["initial_evaluation"],
pelnaLinia[funkcja]["login"])
try:
cur.execute(new_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def register_user_for_event(funkcja, pelnaLinia):
"""register user for event"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
x_string = """
INSERT INTO user_on_event(login,event_name) VALUES
('{0}', '{1}');
""".format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["eventname"])
cur.execute(x_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def attendance(funkcja, pelnaLinia):
"""check attendance"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
x_string = """
INSERT INTO attendance(talk_id,user_login) VALUES
('{0}', '{1}');
""".format(pelnaLinia[funkcja]["talk"],
pelnaLinia[funkcja]["login"])
cur.execute(x_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def evaluation(funkcja, pelnaLinia):
"""add talk raiting by user"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)] and pelnaLinia[funkcja]["rating"] >= 0 and \
pelnaLinia[funkcja]["rating"] <= 10:
x_string = """
INSERT INTO raiting_by_user(talk_id,user_login,raiting) VALUES
('{0}', '{1}', '{2}');
""".format(pelnaLinia[funkcja]["talk"],
pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["rating"])
cur.execute(x_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def reject(funkcja, pelnaLinia):
"""reject talk from waiting list"""
try:
cur = conn.cursor()
cur.execute("""SELECT role from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [("A",)]:
x_string = """
UPDATE talk SET status='R'
WHERE id = '{0}';
""".format(pelnaLinia[funkcja]["talk"])
cur.execute(x_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def proposal(funkcja, pelnaLinia):
"""add talk to waiting list"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
x_string = """
INSERT INTO talk(id, speaker_login, title, s_date, status) VALUES
('{0}', '{1}','{2}','{3}', 'W');
""".format(pelnaLinia[funkcja]["talk"],
pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["title"],
pelnaLinia[funkcja]["start_timestamp"])
cur.execute(x_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def friends(funkcja, pelnaLinia):
"""add friend request"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login1"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
x_string = """
INSERT INTO friend_request(login1, login2) VALUES
('{0}', '{1}');
""".format(pelnaLinia[funkcja]["login1"],
pelnaLinia[funkcja]["login2"])
cur.execute(x_string)
cur.close()
conn.commit()
dict1 = {'status': "OK"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def user_plan(funkcja, pelnaLinia):
"""show user plan"""
try:
cur = conn.cursor()
x_string = """
SELECT distinct u.login, t.id, t.s_date, t.title, t.room
FROM user_on_event u
join event e using (event_name)
join talk t using (event_name)
where u.login = '{0}'
AND t.status='P'
order BY 3 desc
""".format(pelnaLinia[funkcja]["login"])
if pelnaLinia[funkcja]["limit"] == "0":
x_string = x_string+";"
else:
x_string = x_string+"limit {0};".format(pelnaLinia[funkcja]["limit"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('login', 'talk', 'start_timestamp', 'title', 'room')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def day_plan(funkcja, pelnaLinia):
"""show day plan"""
try:
cur = conn.cursor()
x_string = """
SELECT distinct t.id, t.s_date, t.title, t.room
FROM event e
join talk t using (event_name)
where date_trunc('day',t.s_date) = date_trunc('day',timestamp '{0}')
AND t.status='P'
order BY 4,2 desc;
""".format(pelnaLinia[funkcja]["timestamp"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'start_timestamp', 'title', 'room')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def best_talks(funkcja, pelnaLinia):
"""show best talks"""
cur = conn.cursor()
if pelnaLinia[funkcja]["all"] == 1:
try:
x_string = """
SELECT distinct t.id, t.s_date, t.title, t.room, AVG(rbu.raiting)
FROM talk t
join raiting_by_user rbu on (t.id=rbu.talk_id)
where t.s_date >= '{0}'
and t.s_date <= '{1}'
AND t.status='P'
group by t.id
order BY 3 desc
""".format(pelnaLinia[funkcja]["start_timestamp"], pelnaLinia[funkcja]["end_timestamp"])
if pelnaLinia[funkcja]["limit"] == "0":
x_string = x_string+";"
else:
x_string = x_string+"limit {0};".format(pelnaLinia[funkcja]["limit"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'start_timestamp', 'title', 'room')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
else:
try:
x_string = """
SELECT distinct t.id, t.s_date, t.title, t.room, AVG(rbu.raiting)
FROM talk t
join attendance a on (t.id=a.talk_id)
join raiting_by_user rbu on (rbu.user_login = a.user_login)
where t.s_date >= '{0}'
and t.s_date <= '{1}'
AND t.status='P'
group by t.id
order BY 3 desc
""".format(pelnaLinia[funkcja]["start_timestamp"], pelnaLinia[funkcja]["end_timestamp"])
if pelnaLinia[funkcja]["limit"] == "0":
x_string = x_string+";"
else:
x_string = x_string+"limit {0};".format(pelnaLinia[funkcja]["limit"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'start_timestamp', 'title', 'room')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def most_popular_talks(funkcja, pelnaLinia):
"""show most_popular_talks"""
try:
cur = conn.cursor()
x_string = """
SELECT distinct t.id, t.s_date, t.title, t.room, count(t.id)
FROM talk t
join attendance a on (t.id=a.talk_id)
where t.s_date >= '{0}'
and t.s_date <= '{1}'
AND t.status='P'
group by t.id
order BY 5 desc
""".format(pelnaLinia[funkcja]["start_timestamp"], pelnaLinia[funkcja]["end_timestamp"])
if pelnaLinia[funkcja]["limit"] == "0":
x_string = x_string+";"
else:
x_string = x_string+"limit {0};".format(pelnaLinia[funkcja]["limit"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'start_timestamp', 'title', 'room')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def attended_talks(funkcja, pelnaLinia):
"""show attended_talks"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
x_string = """
SELECT distinct t.id, t.s_date, t.title, t.room
FROM talk t
join attendance a on (t.id=a.talk_id)
where a.user_login = '{0}'
AND t.status='P';
--order BY 5 desc
""".format(pelnaLinia[funkcja]["login"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'start_timestamp', 'title', 'room')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def abandoned_talks(funkcja, pelnaLinia):
"""show abandoned_talks"""
try:
cur = conn.cursor()
cur.execute("""SELECT role from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [("A",)]:
x_string = """
SELECT distinct t.id, t.s_date, t.title, t.room, count_on_event(t.event_name) - count(t.id) as number
FROM talk t
join attendance a on (t.id=a.talk_id)
where t.status='P'
group by t.id
order BY 5 desc
"""
if pelnaLinia[funkcja]["limit"] == "0":
x_string = x_string+";"
else:
x_string = x_string+"limit {0};".format(pelnaLinia[funkcja]["limit"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'start_timestamp', 'title', 'room', 'number')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def recently_added_talks(funkcja, pelnaLinia):
"""show recently added talks"""
try:
cur = conn.cursor()
x_string = """
SELECT distinct t.id, t.speaker_login, t.s_date, t.title, t.room
FROM talk t
where t.status='P'
order BY 3 desc
"""
if pelnaLinia[funkcja]["limit"] == "0":
x_string = x_string+";"
else:
x_string = x_string+"limit {0};".format(pelnaLinia[funkcja]["limit"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'speakerlogin', 'start_timestamp', 'title', 'room')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def rejected_talks(funkcja, pelnaLinia):
"""show rejected_talks"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
x_string = """
SELECT distinct t.id, t.speaker_login, t.s_date, t.title
FROM talk t
where t.status = 'R'
--order BY 5 desc
"""
if uprawnienia == [("A",)]:
x_string = x_string + ";"
else:
x_string = x_string + """AND t.speaker_login = '{0}';
""".format(pelnaLinia[funkcja]["login"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'speakerlogin', 'start_timestamp', 'title')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def proposals(funkcja, pelnaLinia):
"""show proposals"""
try:
cur = conn.cursor()
cur.execute("""SELECT role from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [("A",)]:
x_string = """
SELECT distinct t.id, t.speaker_login, t.s_date, t.title
FROM talk t
where t.status = 'W';
--order BY 5 desc
"""
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'speakerlogin', 'start_timestamp', 'title')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def friends_talks(funkcja, pelnaLinia):
"""show friends_talks"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
x_string = """
SELECT distinct t.id, f.login2, t.s_date, t.title, t.room
FROM friends f
join talk t on (t.speaker_login=f.login2)
where t.s_date >= '{0}'
and t.s_date <= '{1}'
and f.login1 = '{2}'
AND t.status='P'
order BY 3 desc
""".format(pelnaLinia[funkcja]["start_timestamp"],
pelnaLinia[funkcja]["end_timestamp"],
pelnaLinia[funkcja]["login"])
if pelnaLinia[funkcja]["limit"] == "0":
x_string = x_string+";"
else:
x_string = x_string+"limit {0};".format(pelnaLinia[funkcja]["limit"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'speakerlogin', 'start_timestamp', 'title', 'room')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def friends_events(funkcja, pelnaLinia):
"""show friends_events"""
try:
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
x_string = """
SELECT distinct f.login1, u.event_name , f.login2
FROM friends f
join user_on_event u on (f.login2 = u.login)
where f.login1 = '{0}'
and u.event_name = '{1}';
--order BY 5 desc
""".format(pelnaLinia[funkcja]["login"], pelnaLinia[funkcja]["eventname"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('login', 'eventname', 'friendlogin')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def recommended_talks(funkcja, pelnaLinia):
"""show recomanded talks"""
cur = conn.cursor()
cur.execute("""SELECT count(*) from con_user
where login = '{0}' and password = '{1}' ;"""
.format(pelnaLinia[funkcja]["login"],
pelnaLinia[funkcja]["password"]))
uprawnienia = cur.fetchall()
if uprawnienia == [(1,)]:
try:
x_string = """
SELECT distinct t.id, t.speaker_login, t.s_date, t.title, t.room, score_of_talk('{2}', t.id)
FROM talk t
where t.s_date >= '{0}'
and t.s_date <= '{1}'
AND t.status='P'
order BY 6 desc
""".format(pelnaLinia[funkcja]["start_timestamp"],
pelnaLinia[funkcja]["end_timestamp"],
pelnaLinia[funkcja]["login"])
if pelnaLinia[funkcja]["limit"] == "0":
x_string = x_string+";"
else:
x_string = x_string+"limit {0};".format(pelnaLinia[funkcja]["limit"])
#print x_string
cur.execute(x_string)
rows = cur.fetchall()
cur.close()
conn.commit()
columns = ('talk', 'speakerlogin', 'start_timestamp', 'title', 'room', 'score')
results = []
for t_row in rows:
temp_dict1 = collections.OrderedDict()
for i in range(len(columns)):
temp_dict1[columns[i]] = str(t_row[i])
results.append(temp_dict1)
#print "dupa ", temp_dict1
dict1 = {'status': "OK", "data": results}
r = json.dumps(dict1)
print r
except:
conn.rollback()
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
else:
dict1 = {'status': "ERROR"}
r = json.dumps(dict1)
print r
def notImplemented(nOf, fFL):
dict1 = {'status': "NOT IMPLEMENTED"}
r = json.dumps(dict1)
print r
def API_func(nOf, fFL):
"""nOf-name of function, fFL-full function line"""
#print nOf
if nOf == "organizer":
organizer(nOf, fFL)
elif nOf == "event":
event(nOf, fFL)
elif nOf == "user":
user(nOf, fFL)
elif nOf == "talk":
talk(nOf, fFL)
elif nOf == "register_user_for_event":
register_user_for_event(nOf, fFL)
elif nOf == "attendance":
attendance(nOf, fFL)
elif nOf == "evaluation":
evaluation(nOf, fFL)
elif nOf == "reject":
reject(nOf, fFL)
elif nOf == "proposal":
proposal(nOf, fFL)
elif nOf == "friends":
friends(nOf, fFL)
elif nOf == "user_plan":
user_plan(nOf, fFL)
elif nOf == "day_plan":
day_plan(nOf, fFL)
elif nOf == "best_talks":
best_talks(nOf, fFL)
elif nOf == "most_popular_talks":
most_popular_talks(nOf, fFL)
elif nOf == "attended_talks":
attended_talks(nOf, fFL)
elif nOf == "abandoned_talks":
abandoned_talks(nOf, fFL)
elif nOf == "recently_added_talks":
recently_added_talks(nOf, fFL)
elif nOf == "rejected_talks":
rejected_talks(nOf, fFL)
elif nOf == "proposals":
proposals(nOf, fFL)
elif nOf == "friends_talks":
friends_talks(nOf, fFL)
elif nOf == "friends_events":
friends_events(nOf, fFL)
elif nOf == "recommended_talks":
recommended_talks(nOf, fFL)
else:
notImplemented(nOf, fFL)
#start bazy
first_line = sys.stdin.readline()
data = json.loads(first_line)
#polaczenie sie z baza
try:
conn = psycopg2.connect("""dbname='{0}' user='{1}' host='localhost' password='{2}'"""
.format(data["open"]["baza"],
data["open"]["login"],
data["open"]["password"]))
temp_dict = {'status': "OK"}
temp_r = json.dumps(temp_dict)
print temp_r
except:
temp_dict = {'status': "ERROR"}
temp_r = json.dumps(temp_dict)
print temp_r
#utworzenie bazy danych
try:
cursor = conn.cursor()
cursor.execute(open("../baza/baza.sql", "r").read())
cursor.close()
conn.commit()
except:
conn.rollback()
for line in sys.stdin:
data = json.loads(line)
x = data.keys()[0]
API_func(x, data)
|
{
"content_hash": "6e3663676af54024a942ffa97826eff2",
"timestamp": "",
"source": "github",
"line_count": 1152,
"max_line_length": 113,
"avg_line_length": 32.146701388888886,
"alnum_prop": 0.46150730429616826,
"repo_name": "iCarrrot/Current_university",
"id": "139dd5a12fadd3f287f628f43fbfe11c9ff33d76",
"size": "37055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BD/Projekt/program/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5946"
},
{
"name": "C++",
"bytes": "78067"
},
{
"name": "Makefile",
"bytes": "1857"
},
{
"name": "PLSQL",
"bytes": "4195"
},
{
"name": "PLpgSQL",
"bytes": "4720"
},
{
"name": "Python",
"bytes": "37055"
},
{
"name": "SQLPL",
"bytes": "581"
},
{
"name": "Shell",
"bytes": "2062"
},
{
"name": "TeX",
"bytes": "2240"
}
],
"symlink_target": ""
}
|
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
X, y = datasets.load_diabetes(return_X_y=True)
print("Computing regularization path using the LARS ...")
_, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
|
{
"content_hash": "53c4fff32ccb99f436b3d70807c30d64",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.702729044834308,
"repo_name": "bnaul/scikit-learn",
"id": "06d4c94bbed70501c7051d5a4513e1e24e9ae5e4",
"size": "1048",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "examples/linear_model/plot_lasso_lars.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7229182"
},
{
"name": "Shell",
"bytes": "19938"
}
],
"symlink_target": ""
}
|
import os
import subprocess
number_of_subjects = 5
subjects_data = [filename.split('.')[0].split('_') for filename in os.listdir('corpora/RAVDESS') if filename.endswith('.wav')]
dummy_files = ["should_be_ignored.txt"]
for id, emotion, scenario, repitition in subjects_data:
subject_number = int(id[len('RAVDESS'):])
if subject_number <= number_of_subjects and scenario == 'kid' and repitition == '1' and emotion in ['happy', 'sad']:
print subject_number
dummy_files.append("_".join([id[len('RAVDESS'):], emotion, scenario, repitition]) + ".wav")
subprocess.call(['touch'] + map(lambda x: 'test/dummy_data/' + x, dummy_files))
print dummy_files
print len(dummy_files)
|
{
"content_hash": "a94d8fa467fe6c40ad9d381c51d7e079",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 126,
"avg_line_length": 34.9,
"alnum_prop": 0.6762177650429799,
"repo_name": "coopie/lazychef",
"id": "0f8b559b928d23b7d409ded00ed6932e085b5b67",
"size": "698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/make_dummy_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19791"
}
],
"symlink_target": ""
}
|
from save_beer import save_beer
from lookup_beer import lookup_beer
|
{
"content_hash": "59d8c18dd9e315883a3b88e85ac79808",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 35,
"avg_line_length": 33.5,
"alnum_prop": 0.835820895522388,
"repo_name": "stormpython/brewmaster",
"id": "38b3dd136911f4288f1b4dc88054c2215fd8b97a",
"size": "67",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brewmaster/database/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1199"
},
{
"name": "JavaScript",
"bytes": "795797"
},
{
"name": "Python",
"bytes": "19337"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class TemplateLink(Model):
"""
Entity representing the reference to the template.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar uri: URI referencing the template. Default value:
"https://azuresdkci.blob.core.windows.net/templatehost/CreateVnetGateway_2016-08-08/azuredeploy.json"
.
:vartype uri: str
:param content_version: If included it must match the ContentVersion in
the template.
:type content_version: str
"""
_validation = {
'uri': {'required': True, 'constant': True},
}
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
}
uri = "https://azuresdkci.blob.core.windows.net/templatehost/CreateVnetGateway_2016-08-08/azuredeploy.json"
def __init__(self, content_version=None):
self.content_version = content_version
|
{
"content_hash": "25262f6bb3c383471015fbedda16841c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 111,
"avg_line_length": 31,
"alnum_prop": 0.6602822580645161,
"repo_name": "BurtBiel/azure-cli",
"id": "859cb6718b5276db510b491f8a654b963a91e324",
"size": "1680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_vnet_gateway/lib/models/template_link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "429"
},
{
"name": "Python",
"bytes": "2108820"
},
{
"name": "Shell",
"bytes": "3300"
}
],
"symlink_target": ""
}
|
from itertools import count
__author__ = 'litleleprikon'
import json
class FrequencyCounter:
def __init__(self):
self.letters = {}
self.sorted = None
def count_from_file(self, file_name):
with open(file_name) as file:
self.count(file.read())
def add(self, letter):
if self.letters.get(letter) is None:
self.letters[letter] = 1
else:
self.letters[letter] += 1
def count(self, text):
for letter in text:
self.add(letter)
def sort(self):
letters_array = [(k, v) for k, v in self.letters.items()]
letters_array.sort(key=lambda x: x[1], reverse=True)
self.sorted = letters_array
def save(self, file_name):
with open(file_name, 'w') as file:
json.dump(self.sorted, file)
@staticmethod
def load_from_file(file_name):
with open(file_name) as file:
return json.load(file)
def main():
counter = FrequencyCounter()
counter.count_from_file('Open text.txt')
counter.sort()
counter.save('stat.json')
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt as ex:
print("Keyboard interrupt")
|
{
"content_hash": "5345fb113faa2caebd122f9481aa6ee5",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 65,
"avg_line_length": 22.796296296296298,
"alnum_prop": 0.5767668562144598,
"repo_name": "litleleprikon/information_security_labs",
"id": "8f2aed8a51408e7fac0b0651ea7034ddfcda3dd9",
"size": "1276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fifth_lab/frequency_counter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25917"
}
],
"symlink_target": ""
}
|
from django import template
register=template.Library()
@register.filter
def autowidth(lst, total=100.0):
if hasattr(lst, '__len__'): lst=lst.__len__()
if not isinstance(lst, float): lst=float(lst)
if lst<1: lst=1
return round(total/lst, 1)
|
{
"content_hash": "43a5a50804c1d0fc7c2f0466fe58402f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 48,
"avg_line_length": 28.22222222222222,
"alnum_prop": 0.6811023622047244,
"repo_name": "kensonman/webframe",
"id": "237ba8a204e1a51da828b2ee48f009a44ba429a6",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templatetags/autowidth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6263"
},
{
"name": "HTML",
"bytes": "58285"
},
{
"name": "JavaScript",
"bytes": "377759"
},
{
"name": "Python",
"bytes": "262504"
},
{
"name": "SCSS",
"bytes": "3786"
},
{
"name": "Vue",
"bytes": "13512"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class YpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ypad", parent_name="layout.coloraxis.colorbar", **kwargs
):
super(YpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "86c4791589ed283d4f39574343639277",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 33.86666666666667,
"alnum_prop": 0.5846456692913385,
"repo_name": "plotly/python-api",
"id": "f2f9d76e9e4deb26f94bbe531ffe4300778ac0c6",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/coloraxis/colorbar/_ypad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""
On the long term this will implement several methods to make videos
out of VideoClips
"""
import subprocess as sp
import os
import numpy as np
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
from moviepy.config import get_setting
from moviepy.tools import verbose_print
class FFMPEG_VideoWriter:
""" A class for FFMPEG-based video writing.
A class to write videos using ffmpeg. ffmpeg will write in a large
choice of formats.
Parameters
-----------
filename
Any filename like 'video.mp4' etc. but if you want to avoid
complications it is recommended to use the generic extension
'.avi' for all your videos.
size
Size (width,height) of the output video in pixels.
fps
Frames per second in the output video file.
codec
FFMPEG codec. It seems that in terms of quality the hierarchy is
'rawvideo' = 'png' > 'mpeg4' > 'libx264'
'png' manages the same lossless quality as 'rawvideo' but yields
smaller files. Type ``ffmpeg -codecs`` in a terminal to get a list
of accepted codecs.
Note for default 'libx264': by default the pixel format yuv420p
is used. If the video dimensions are not both even (e.g. 720x405)
another pixel format is used, and this can cause problem in some
video readers.
audiofile
Optional: The name of an audio file that will be incorporated
to the video.
preset
Sets the time that FFMPEG will take to compress the video. The slower,
the better the compression rate. Possibilities are: ultrafast,superfast,
veryfast, faster, fast, medium (default), slow, slower, veryslow,
placebo.
bitrate
Only relevant for codecs which accept a bitrate. "5000k" offers
nice results in general.
withmask
Boolean. Set to ``True`` if there is a mask in the video to be
encoded.
"""
def __init__(self, filename, size, fps, codec="libx264", audiofile=None,
preset="medium", bitrate=None, withmask=False,
logfile=None, threads=None, ffmpeg_params=None):
if logfile is None:
logfile = sp.PIPE
self.filename = filename
self.codec = codec
self.ext = self.filename.split(".")[-1]
# order is important
cmd = [
get_setting("FFMPEG_BINARY"),
'-y',
'-loglevel', 'error' if logfile == sp.PIPE else 'info',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', '%dx%d' % (size[0], size[1]),
'-pix_fmt', 'rgba' if withmask else 'rgb24',
'-r', '%.02f' % fps,
'-i', '-', '-an',
]
if audiofile is not None:
cmd.extend([
'-i', audiofile,
'-acodec', 'copy'
])
cmd.extend([
'-vcodec', codec,
'-preset', preset,
])
if ffmpeg_params is not None:
cmd.extend(ffmpeg_params)
if bitrate is not None:
cmd.extend([
'-b', bitrate
])
if threads is not None:
cmd.extend(["-threads", str(threads)])
if ((codec == 'libx264') and
(size[0] % 2 == 0) and
(size[1] % 2 == 0)):
cmd.extend([
'-pix_fmt', 'yuv420p'
])
cmd.extend([
filename
])
popen_params = {"stdout": DEVNULL,
"stderr": logfile,
"stdin": sp.PIPE}
# This was added so that no extra unwanted window opens on windows
# when the child process is created
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
self.proc = sp.Popen(cmd, **popen_params)
def write_frame(self, img_array):
""" Writes one frame in the file."""
try:
self.proc.stdin.write(img_array.tostring())
except IOError as err:
ffmpeg_error = self.proc.stderr.read()
error = (str(err) + ("\n\nMoviePy error: FFMPEG encountered "
"the following error while writing file %s:"
"\n\n %s" % (self.filename, ffmpeg_error)))
if "Unknown encoder" in ffmpeg_error:
error = error+("\n\nThe video export "
"failed because FFMPEG didn't find the specified "
"codec for video encoding (%s). Please install "
"this codec or change the codec when calling "
"write_videofile. For instance:\n"
" >>> clip.write_videofile('myvid.webm', codec='libvpx')")%(self.codec)
elif "incorrect codec parameters ?" in ffmpeg_error:
error = error+("\n\nThe video export "
"failed, possibly because the codec specified for "
"the video (%s) is not compatible with the given "
"extension (%s). Please specify a valid 'codec' "
"argument in write_videofile. This would be 'libx264' "
"or 'mpeg4' for mp4, 'libtheora' for ogv, 'libvpx for webm. "
"Another possible reason is that the audio codec was not "
"compatible with the video codec. For instance the video "
"extensions 'ogv' and 'webm' only allow 'libvorbis' (default) as a"
"video codec."
)%(self.codec, self.ext)
elif "encoder setup failed" in ffmpeg_error:
error = error+("\n\nThe video export "
"failed, possibly because the bitrate you specified "
"was too high or too low for the video codec.")
elif "Invalid encoder type" in ffmpeg_error:
error = error + ("\n\nThe video export failed because the codec "
"or file extension you provided is not a video")
raise IOError(error)
def close(self):
self.proc.stdin.close()
if self.proc.stderr is not None:
self.proc.stderr.close()
self.proc.wait()
del self.proc
def ffmpeg_write_video(clip, filename, fps, codec="libx264", bitrate=None,
preset="medium", withmask=False, write_logfile=False,
audiofile=None, verbose=True, threads=None, ffmpeg_params=None):
""" Write the clip to a videofile. See VideoClip.write_videofile for details
on the parameters.
"""
if write_logfile:
logfile = open(filename + ".log", 'w+')
else:
logfile = None
verbose_print(verbose, "[MoviePy] Writing video %s\n"%filename)
writer = FFMPEG_VideoWriter(filename, clip.size, fps, codec = codec,
preset=preset, bitrate=bitrate, logfile=logfile,
audiofile=audiofile, threads=threads,
ffmpeg_params=ffmpeg_params)
nframes = int(clip.duration*fps)
import sys
for t,frame in clip.iter_frames(progress_bar=True, with_times=True,
fps=fps, dtype="uint8"):
if withmask:
mask = (255*clip.mask.get_frame(t))
if mask.dtype != "uint8":
mask = mask.astype("uint8")
frame = np.dstack([frame,mask])
writer.write_frame(frame)
print("")
writer.close()
if write_logfile:
logfile.close()
verbose_print(verbose, "[MoviePy] Done.\n")
def ffmpeg_write_image(filename, image, logfile=False):
""" Writes an image (HxWx3 or HxWx4 numpy array) to a file, using
ffmpeg. """
if image.dtype != 'uint8':
image = image.astype("uint8")
cmd = [ get_setting("FFMPEG_BINARY"), '-y',
'-s', "%dx%d"%(image.shape[:2][::-1]),
"-f", 'rawvideo',
'-pix_fmt', "rgba" if (image.shape[2] == 4) else "rgb24",
'-i','-', filename]
if logfile:
log_file = open(filename + ".log", 'w+')
else:
log_file = sp.PIPE
popen_params = {"stdout": DEVNULL,
"stderr": log_file,
"stdin": sp.PIPE}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
proc = sp.Popen(cmd, **popen_params)
out, err = proc.communicate(image.tostring())
if proc.returncode:
err = "\n".join(["[MoviePy] Running : %s\n" % cmd,
"WARNING: this command returned an error:",
err.decode('utf8')])
raise IOError(err)
del proc
|
{
"content_hash": "58f580a2b20f2152a4196a599065242e",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 90,
"avg_line_length": 32.85660377358491,
"alnum_prop": 0.5457677730561618,
"repo_name": "jonathanlurie/timelapseComposer",
"id": "948a355efb82d792e348ddf00c97b2d3efdd4219",
"size": "8707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python/moviepy/video/io/ffmpeg_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "413983"
},
{
"name": "Shell",
"bytes": "1434"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from .models import elbv2_backends
from ..core.models import base_decorator
elb_backend = elbv2_backends["us-east-1"]
mock_elbv2 = base_decorator(elbv2_backends)
|
{
"content_hash": "9db1738d0b8568b1830e99b3343fdde8",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 43,
"avg_line_length": 33.833333333333336,
"alnum_prop": 0.7783251231527094,
"repo_name": "william-richard/moto",
"id": "61c4a37ff9dda5a345b7096e1773480ca0947980",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/elbv2/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
while True:
pin = int(input("pin"))
state = int(input("state"))
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, state)
|
{
"content_hash": "93fe5329338f95a4fe4b4d33f98db82f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 31,
"avg_line_length": 17.454545454545453,
"alnum_prop": 0.6510416666666666,
"repo_name": "gideontong/RaspiRobot",
"id": "7066f2ed6d50cab14ca2b43e106e8639ad9b8a9c",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pin_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16803"
}
],
"symlink_target": ""
}
|
'''
Written by JT Fuchs, UNC.
PURPOSE: This program takes ZZ Ceti observations with Goodman and runs the full pipeline on a night. Uses ReduceSpec.py, spectral_extraction.py, Wavelenght_Calibration.py, continuum_normalization.py, flux_calibration.py, and diagnostics.py (and all dependencies therein).
DIRECTORY FILES THAT SHOULD EXIST:
listZero - text file containing list of bias images to combine
listFlat - text file containing list of flat field images to combine. If both blue and red set, give all blue files first, then all red files.
listSpec - text file containing list of spectra to combine. Organize by target.
listFe - text file containing list of Iron lamps to combine. If both blue and red set, give all blue files first, then all red files.
'''
import ReduceSpec
import spectral_extraction
import Wavelength_Calibration
import continuum_normalization
import flux_calibration
import diagnostics
from glob import glob
#=========================
#Begin Fits Reduction
#=========================
ReduceSpec.reduce_now(['script_name','listZero','listFlat','listSpec','listFe'])
#========================
#Begin Spectral Extraction
#========================
print 'Beginning spectral extraction.'
spec_files = sorted(glob('cftb*fits'))
single_spec_list = []
for x in spec_files:
if ('cftb.0' in x) or ('cftb.1' in x) or ('cftb.2' in x):
single_spec_list.append(x)
for x in single_spec_list:
spec_files.remove(x)
spec_files = sorted(spec_files)
lamp_file_blue = sorted(glob('tFe*blue*fits'))
lamp_file_red = sorted(glob('tFe*red*fits'))
#Search for FWHM and trace file for each spectrum. If it does not exist, these go to None and will be fit and saved during the extraction.
trace_files = []
FWHM_files = []
for x in spec_files:
trace_name = '*' + x[5:-5] + '*trace.npy'
new_trace = glob(trace_name)
if len(new_trace) == 0:
trace_files.append(None)
else:
trace_files.append(new_trace[0])
fwhm_name = '*' + x[5:-5] + '*poly.npy'
new_fwhm = glob(fwhm_name)
if len(new_fwhm) == 0:
FWHM_files.append(None)
else:
FWHM_files.append(new_fwhm[0])
for x in spec_files:
if 'blue' in x.lower():
lamp_file = lamp_file_blue[0]
elif 'red' in x.lower():
lamp_file = lamp_file_red[0]
FWHM_thisfile = FWHM_files[spec_files.index(x)]
trace_thisfile = trace_files[spec_files.index(x)]
if trace_thisfile != None:
trace_exist_file = True
else:
trace_exist_file = False
print ''
print x, lamp_file,trace_thisfile, FWHM_thisfile
#Must add in option of not have trace file or FWHM file
#if no FWHMfile, FWHMfile=None
spectral_extraction.extract_now(x,lamp_file,FWHMfile=FWHM_thisfile,tracefile=trace_thisfile,trace_exist=trace_exist_file)
#=========================
# Begin Wavelength Calibration
#=========================
print '\n Beginning Wavelength Calibration'
spec_files = sorted(glob('cftb*ms.fits'))
lamp_files = sorted(glob('tFe*ms.fits'))
offset_file = glob('offsets.txt') #Offset file must be structured as blue, then red
if len(offset_file) == 0:
offset_file = None
else:
offset_file = offset_file[0]
#print spec_files
#print lamp_files
#Need to carefully match up the correct lamp and spectrum files. This seems to work well.
for x in lamp_files:
if 'blue' in x.lower():
lamp_color = 'blue'
elif 'red' in x.lower():
lamp_color = 'red'
for y in spec_files:
###if (y[5:y.find('_930')] in x) and (y[y.find('_930'):y.find('_930')+8] in x):
if (lamp_color in y.lower()) and (y[5:y.find('_930')] in x):
print x, y, offset_file
if offset_file == None:
plotalot = True
else:
plotalot = False
Wavelength_Calibration.calibrate_now(x,y,'no','yes',offset_file,plotall=plotalot)
#=========================
#Begin Continuum Normalization
#=========================
print '\n Begin continuum normalization.'
continuum_files = sorted(glob('wcftb*ms.fits'))
#print continuum_files
x = 0
while x < len(continuum_files):
if x == len(continuum_files)-1:
#print continuum_files[x]
continuum_normalization.normalize_now(continuum_files[x],None,False,plotall=False)
x += 1
elif continuum_files[x][0:continuum_files[x].find('930')] == continuum_files[x+1][0:continuum_files[x].find('930')]:
#print continuum_files[x],continuum_files[x+1]
continuum_normalization.normalize_now(continuum_files[x],continuum_files[x+1],True,plotall=False)
x += 2
else:
#print continuum_files[x]
continuum_normalization.normalize_now(continuum_files[x],None,False,plotall=False)
x += 1
#=========================
#Begin Flux Calibration
#=========================
print '\nBegin flux calibration.'
#We should use the same files are for the continuum normalization. But if you want to change that for some reason, adjust below.
'''
continuum_files = sorted(glob('wcftb*ms.fits'))
single_spec_list = []
for x in continuum_files:
if 'flux' in x:
single_spec_list.append(x)
for x in single_spec_list:
continuum_files.remove(x)
continuum_files = sorted(continuum_files)
#print continuum_files
'''
stdlist = None
fluxlist = None
flux_calibration.flux_calibrate_now(stdlist,fluxlist,continuum_files,extinct_correct=True,masterresp=True)
#=========================
#Begin Flux Calibration
#=========================
print 'Running diagnostics.'
diagnostics.diagnostic_now()
|
{
"content_hash": "f66d0accd0772485436ea253f353518c",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 271,
"avg_line_length": 33.493975903614455,
"alnum_prop": 0.6440647482014389,
"repo_name": "joshfuchs/ZZCeti_pipeline",
"id": "2d42938697ecedcb1ae85977539025d90b599607",
"size": "5560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reduceall.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "318991"
}
],
"symlink_target": ""
}
|
import json
from pprint import pprint
from flask import Response
import requests
from urllib.parse import quote_plus, unquote_plus, urlencode
from .base_class import ZmirrorTestBase
from .utils import *
class TestVerification(ZmirrorTestBase):
"""testing using https://httpbin.org/"""
class C(ZmirrorTestBase.C):
my_host_name = 'b.test.com'
my_host_scheme = 'https://'
target_domain = 'httpbin.org'
target_scheme = 'https://'
external_domains = ('eu.httpbin.org',)
force_https_domains = 'ALL'
enable_automatic_domains_whitelist = False
# verbose_level = 4
possible_charsets = None
human_ip_verification_enabled = True
identity_verify_required = True
enable_custom_access_cookie_generate_and_verify = True
human_ip_verification_questions = (
('Unittest question one', '答案', 'Placeholder (Optional)'),
)
human_ip_verification_identity_record = (
("Please input your student/teacher ID number", "student_id", "text"),
("Please input your student/teacher password", "password", "password"),
)
must_verify_cookies = True
class CaseCfg(ZmirrorTestBase.CaseCfg):
tip_texts_in_verification_page = "你需要回答出以下<b>所有问题</b>"
def setUp(self):
super().setUp()
self.query_string_dict = {
"zmirror": "love_lucia",
"zhi": "撒吱吱pr(顺便测试中文)",
}
self.query_string = urlencode(self.query_string_dict) # type: str
self.verify_page_url = self.url(
"/ip_ban_verify_page?origin=aHR0cDovL2Iud"
"GVzdC5jb20vZ2V0P3ptaXJyb3I9bG92ZV9sdWNpY"
"SZ6aGk95pKS5ZCx5ZCxcHIo6aG65L6_5rWL6K-V5"
"Lit5paHKQ=="
) # type: str
self.origin = self.verify_page_url[self.verify_page_url.find("origin=") + 7:] # type: str
def test_redirect_to_verification_page(self):
"""https://httpbin.org/get?zmirror=love_lucia"""
self.rv = self.client.get(
self.url("/get"),
query_string=self.query_string_dict,
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
# 当需要验证出现重定向
self.assertEqual(302, self.rv.status_code, msg=self.dump())
self.assertIn(
"/ip_ban_verify_page?origin=",
self.rv.location, msg=self.dump())
self.assertIn(b"Redirecting...", self.rv.data, msg=self.dump())
# verify_page_url = self.rv.location # type: str
# print("verify_page_url", verify_page_url)
def test_verification_page(self):
"""验证页面本身"""
self.rv = self.client.get(
self.verify_page_url,
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
page_content = self.rv.data.decode() # type: str
self.assertIn(self.CaseCfg.tip_texts_in_verification_page,
page_content, msg=self.dump())
self.assertIn(self.zmirror.human_ip_verification_title,
page_content, msg=self.dump())
self.assertIn(self.C.human_ip_verification_questions[0][0],
page_content, msg=self.dump())
self.assertIn(self.C.human_ip_verification_questions[0][2],
page_content, msg=self.dump())
self.assertIn('type="text" name="0"',
page_content, msg=self.dump())
self.assertIn(self.C.human_ip_verification_identity_record[0][0],
page_content, msg=self.dump())
self.assertIn(self.C.human_ip_verification_identity_record[1][0],
page_content, msg=self.dump())
self.assertIn('type="password"', page_content, msg=self.dump())
self.assertIn('type="hidden" name="origin"', page_content, msg=self.dump())
self.assertIn('name="{}"'.format(self.C.human_ip_verification_identity_record[0][1]),
page_content, msg=self.dump())
self.assertIn('name="{}"'.format(self.C.human_ip_verification_identity_record[1][1]),
page_content, msg=self.dump())
self.assertIn("<form method='post'>", page_content, msg=self.dump())
def test_not_answer_question(self):
"""未回答问题"""
self.rv = self.client.post(
self.verify_page_url,
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
page_content = self.rv.data.decode() # type: str
self.assertIn("Please answer question: " +
self.C.human_ip_verification_questions[0][0],
page_content, msg=self.dump())
def test_wrong_answer(self):
"""回答错误"""
self.rv = self.client.post(
self.verify_page_url,
data={
"0": "错误的答案",
"origin": self.origin,
},
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
page_content = self.rv.data.decode() # type: str
self.assertIn("Wrong answer in: " +
self.C.human_ip_verification_questions[0][0],
page_content, msg=self.dump())
def test_lost_identity(self):
"""答案正确, 但是没有填写 [student/teacher ID number] """
self.rv = self.client.post(
self.verify_page_url,
data={
"0": self.C.human_ip_verification_questions[0][1],
"origin": self.origin,
},
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
page_content = self.rv.data.decode() # type: str
self.assertIn("Param Missing or Blank: " +
self.C.human_ip_verification_identity_record[0][0],
page_content, msg=self.dump())
def test_correct_answer(self):
"""答案正确, 并且信息完全"""
self.rv = self.client.post(
self.verify_page_url,
data={
"0": self.C.human_ip_verification_questions[0][1],
self.C.human_ip_verification_identity_record[0][1]: "Unittest",
self.C.human_ip_verification_identity_record[1][1]: "!Password1",
"origin": self.origin,
},
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
page_content = self.rv.data.decode() # type: str
self.assertIn("Page Redirect", page_content, msg=self.dump())
self.assertIn(self.zmirror.human_ip_verification_success_msg, page_content, msg=self.dump())
self.assertIn(self.query_string_dict["zhi"], page_content, msg=self.dump())
self.assertIn(self.query_string_dict["zmirror"], page_content, msg=self.dump())
self.assertTrue(os.path.exists(zmirror_file("ip_whitelist.log")), msg=self.dump())
self.assertTrue(os.path.exists(zmirror_file("ip_whitelist.txt")), msg=self.dump())
with open(zmirror_file("ip_whitelist.txt"), 'r', encoding='utf-8') as fp:
self.assertIn(
"1.2.3.4",
fp.read(), msg=self.dump()
)
with open(zmirror_file("ip_whitelist.log"), 'r', encoding='utf-8') as fp:
self.assertIn(
"Unittest",
fp.read(), msg=self.dump()
)
with open(zmirror_file("ip_whitelist.log"), 'r', encoding='utf-8') as fp:
self.assertIn(
"!Password1",
fp.read(), msg=self.dump()
)
self.assertIn("zmirror_verify=", self.rv.headers.get("Set-Cookie"))
# 再请求一次 httpbin, 确认已经被授权
self.rv2 = self.client.get(
self.url("/get"),
query_string=self.query_string_dict,
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
def test_add_whitelist_by_cookie(self):
"""当一个陌生IP访问时, 检查Cookie并放行"""
# 首先需要获得一个Cookie
self.rv = self.client.post(
self.verify_page_url,
data={
"0": self.C.human_ip_verification_questions[0][1],
self.C.human_ip_verification_identity_record[0][1]: "Unittest",
self.C.human_ip_verification_identity_record[1][1]: "!Password1",
"origin": self.origin,
},
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
self.rv2 = self.client.get(
self.url("/get"),
query_string=self.query_string_dict,
environ_base=env(
ip='2.33.233.233' # 更改IP
),
headers=headers(),
) # type: Response
class TestVerificationSingleAnswer(TestVerification):
"""testing using https://httpbin.org/"""
class C(TestVerification.C):
human_ip_verification_answer_any_one_questions_is_ok = True
enable_custom_access_cookie_generate_and_verify = False
human_ip_verification_questions = (
('Unittest question one', '答案', 'Placeholder (Optional)'),
('Unittest question two', '答案2', ''),
)
human_ip_verification_identity_record = (
("Id verify question 1 stuid", "student_id", "text"),
("Id verify question 2 passwd", "password", "password"),
)
class CaseCfg(TestVerification.CaseCfg):
tip_texts_in_verification_page = "只需要回答出以下<b>任意一个</b>问题即可"
def test_not_answer_question(self):
"""未回答问题"""
with self.app.test_client() as c:
self.rv = c.post(
self.verify_page_url,
environ_base=env(
ip='1.2.3.4'
),
headers=headers(),
) # type: Response
page_content = self.rv.data.decode() # type: str
self.assertIn("Please answer at least ONE question",
page_content, msg=self.dump())
|
{
"content_hash": "5180df690f0e2fe24757fbad3d2923fc",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 100,
"avg_line_length": 35.944250871080136,
"alnum_prop": 0.5368359829391237,
"repo_name": "Aploium/EasyWebsiteMirror",
"id": "7c8a40f7e783b823edb4e85efc2d85892151683a",
"size": "10597",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_verification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159149"
}
],
"symlink_target": ""
}
|
import os.path
from flask import Blueprint, render_template
from werkzeug.exceptions import NotFound
from browsepy import stream_template, get_cookie_browse_sorting, \
browse_sortkey_reverse
from browsepy.file import OutsideDirectoryBase
from .playable import PlayableFile, PlayableDirectory, \
PlayListFile, detect_playable_mimetype
__basedir__ = os.path.dirname(os.path.abspath(__file__))
player = Blueprint(
'player',
__name__,
url_prefix='/play',
template_folder=os.path.join(__basedir__, 'templates'),
static_folder=os.path.join(__basedir__, 'static'),
)
@player.route('/audio/<path:path>')
def audio(path):
try:
file = PlayableFile.from_urlpath(path)
if file.is_file:
return render_template('audio.player.html', file=file)
except OutsideDirectoryBase:
pass
return NotFound()
@player.route('/list/<path:path>')
def playlist(path):
try:
file = PlayListFile.from_urlpath(path)
if file.is_file:
return stream_template(
'audio.player.html',
file=file,
playlist=True
)
except OutsideDirectoryBase:
pass
return NotFound()
@player.route("/directory", defaults={"path": ""})
@player.route('/directory/<path:path>')
def directory(path):
sort_property = get_cookie_browse_sorting(path, 'text')
sort_fnc, sort_reverse = browse_sortkey_reverse(sort_property)
try:
file = PlayableDirectory.from_urlpath(path)
if file.is_directory:
return stream_template(
'audio.player.html',
file=file,
sort_property=sort_property,
sort_fnc=sort_fnc,
sort_reverse=sort_reverse,
playlist=True
)
except OutsideDirectoryBase:
pass
return NotFound()
def register_arguments(manager):
'''
Register arguments using given plugin manager.
This method is called before `register_plugin`.
:param manager: plugin manager
:type manager: browsepy.manager.PluginManager
'''
# Arguments are forwarded to argparse:ArgumentParser.add_argument,
# https://docs.python.org/3.7/library/argparse.html#the-add-argument-method
manager.register_argument(
'--player-directory-play', action='store_true',
help='enable directories as playlist'
)
def register_plugin(manager):
'''
Register blueprints and actions using given plugin manager.
:param manager: plugin manager
:type manager: browsepy.manager.PluginManager
'''
manager.register_blueprint(player)
manager.register_mimetype_function(detect_playable_mimetype)
# add style tag
manager.register_widget(
place='styles',
type='stylesheet',
endpoint='player.static',
filename='css/browse.css'
)
# register link actions
manager.register_widget(
place='entry-link',
type='link',
endpoint='player.audio',
filter=PlayableFile.detect
)
manager.register_widget(
place='entry-link',
icon='playlist',
type='link',
endpoint='player.playlist',
filter=PlayListFile.detect
)
# register action buttons
manager.register_widget(
place='entry-actions',
css='play',
type='button',
endpoint='player.audio',
filter=PlayableFile.detect
)
manager.register_widget(
place='entry-actions',
css='play',
type='button',
endpoint='player.playlist',
filter=PlayListFile.detect
)
# check argument (see `register_arguments`) before registering
if manager.get_argument('player_directory_play'):
# register header button
manager.register_widget(
place='header',
type='button',
endpoint='player.directory',
text='Play directory',
filter=PlayableDirectory.detect
)
|
{
"content_hash": "ecd2a04aa898cdfbbaf874228df5638d",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 27.486486486486488,
"alnum_prop": 0.6157817109144543,
"repo_name": "ergoithz/browsepy",
"id": "357b8bca3da26320f887bff4864a0362285475bc",
"size": "4115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "browsepy/plugin/player/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19357"
},
{
"name": "HTML",
"bytes": "81226"
},
{
"name": "JavaScript",
"bytes": "2119"
},
{
"name": "Makefile",
"bytes": "1521"
},
{
"name": "Python",
"bytes": "200446"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from glob import glob
import os
import time
try:
from xml.etree.ElementTree import ElementTree
except ImportError:
try:
from elementtree import ElementTree
except ImportError:
pass
from monasca_agent.collector.checks import AgentCheck
from monasca_agent.common.util import get_hostname
class Skip(Exception):
"""Raised by :class:`Jenkins` when it comes across
a build or job that should be excluded from being checked.
"""
def __init__(self, reason, dir_name):
message = 'skipping build or job at %s because %s' % (dir_name, reason)
Exception.__init__(self, message)
class Jenkins(AgentCheck):
datetime_format = '%Y-%m-%d_%H-%M-%S'
def __init__(self, name, init_config, agent_config):
AgentCheck.__init__(self, name, init_config, agent_config)
self.high_watermarks = {}
def _extract_timestamp(self, dir_name):
if not os.path.isdir(dir_name):
raise Skip('its not a build directory', dir_name)
try:
# Parse the timestamp from the directory name
date_str = os.path.basename(dir_name)
time_tuple = time.strptime(date_str, self.datetime_format)
return time.mktime(time_tuple)
except ValueError:
raise Exception("Error with build directory name, not a parsable date: %s" % (dir_name))
def _get_build_metadata(self, dir_name):
if os.path.exists(os.path.join(dir_name, 'jenkins_build.tar.gz')):
raise Skip('the build has already been archived', dir_name)
# Read the build.xml metadata file that Jenkins generates
build_metadata = os.path.join(dir_name, 'build.xml')
if not os.access(build_metadata, os.R_OK):
self.log.debug("Can't read build file at %s" % (build_metadata))
raise Exception("Can't access build.xml at %s" % (build_metadata))
else:
tree = ElementTree()
tree.parse(build_metadata)
keys = ['result', 'number', 'duration']
kv_pairs = ((k, tree.find(k)) for k in keys)
d = dict([(k, v.text) for k, v in kv_pairs if v is not None])
try:
d['branch'] = tree.find('actions') \
.find('hudson.plugins.git.util.BuildData') \
.find('buildsByBranchName') \
.find('entry') \
.find('hudson.plugins.git.util.Build') \
.find('revision') \
.find('branches') \
.find('hudson.plugins.git.Branch') \
.find('name') \
.text
except Exception:
pass
return d
def _get_build_results(self, instance_key, job_dir):
job_name = os.path.basename(job_dir)
try:
dirs = glob(os.path.join(job_dir, 'builds', '*_*'))
if len(dirs) > 0:
dirs = sorted(dirs, reverse=True)
# We try to get the last valid build
for index in xrange(0, len(dirs) - 1):
dir_name = dirs[index]
try:
timestamp = self._extract_timestamp(dir_name)
except Skip:
continue
# Check if it's a new build
if timestamp > self.high_watermarks[instance_key][job_name]:
# If we can't get build metadata, we try the previous one
try:
build_metadata = self._get_build_metadata(dir_name)
except Exception:
continue
output = {
'job_name': job_name,
'timestamp': timestamp,
'event_type': 'build result'
}
output.update(build_metadata)
self.high_watermarks[instance_key][job_name] = timestamp
yield output
# If it not a new build, stop here
else:
break
except Exception as e:
self.log.error("Error while working on job %s, exception: %s" % (job_name, e))
def check(self, instance):
if self.high_watermarks.get(instance.get('name'), None) is None:
# On the first run of check(), prime the high_watermarks dict
# (Setting high_watermarks in the next statement prevents
# any kind of infinite loop (assuming nothing ever sets
# high_watermarks to None again!))
self.high_watermarks[instance.get('name')] = defaultdict(lambda: 0)
self.check(instance)
jenkins_home = instance.get('jenkins_home', None)
if not jenkins_home:
raise Exception("No jenkins_home directory set in the config file")
jenkins_jobs_dir = os.path.join(jenkins_home, 'jobs', '*')
job_dirs = glob(jenkins_jobs_dir)
if not job_dirs:
raise Exception('No jobs found in `%s`! '
'Check `jenkins_home` in your config' % (jenkins_jobs_dir))
for job_dir in job_dirs:
for output in self._get_build_results(instance.get('name'), job_dir):
output['host'] = get_hostname()
|
{
"content_hash": "aad85fb322c53fed3cd6e671b8a1b3fa",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 100,
"avg_line_length": 38.13286713286713,
"alnum_prop": 0.5332844305886668,
"repo_name": "sapcc/monasca-agent",
"id": "c5876fd0f3db795e249e3212f1abda79d4c46c21",
"size": "5530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monasca_agent/collector/checks_d/jenkins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "1855"
},
{
"name": "Makefile",
"bytes": "3221"
},
{
"name": "Nginx",
"bytes": "1211"
},
{
"name": "PowerShell",
"bytes": "2396"
},
{
"name": "Python",
"bytes": "1280190"
},
{
"name": "Roff",
"bytes": "2000"
},
{
"name": "Shell",
"bytes": "39112"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.core.tasks.task import Task
from pants.base.payload_field import DeferredSourcesField
from pants.build_graph.address_lookup_error import AddressLookupError
logger = logging.getLogger(__name__)
class DeferredSourcesMapper(Task):
"""Map DeferredSourcesFields to files that produce product 'unpacked_archives', like UnpackJars.
If you want a task to be able to map sources like this, make it require the 'deferred_sources'
product.
"""
class SourcesTargetLookupError(AddressLookupError):
"""Raised when the referenced target cannot be found in the build graph"""
pass
class NoUnpackedSourcesError(AddressLookupError):
"""Raised when there are no files found unpacked from the archive"""
pass
@classmethod
def product_types(cls):
"""
Declare product produced by this task
deferred_sources does not have any data associated with it. Downstream tasks can
depend on it just make sure that this task completes first.
:return:
"""
return ['deferred_sources']
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data('unpacked_archives')
def execute(self):
deferred_sources_fields = []
def find_deferred_sources_fields(target):
for name, payload_field in target.payload.fields:
if isinstance(payload_field, DeferredSourcesField):
deferred_sources_fields.append((target, name, payload_field))
addresses = [target.address for target in self.context.targets()]
self.context.build_graph.walk_transitive_dependency_graph(addresses,
find_deferred_sources_fields)
unpacked_sources = self.context.products.get_data('unpacked_archives')
for (target, name, payload_field) in deferred_sources_fields:
sources_target = self.context.build_graph.get_target(payload_field.address)
if not sources_target:
raise self.SourcesTargetLookupError(
"Couldn't find {sources_spec} referenced from {target} field {name} in build graph"
.format(sources_spec=payload_field.address.spec, target=target.address.spec, name=name))
if not sources_target in unpacked_sources:
raise self.NoUnpackedSourcesError(
"Target {sources_spec} referenced from {target} field {name} did not unpack any sources"
.format(spec=sources_target.address.spec, target=target.address.spec, name=name))
sources, rel_unpack_dir = unpacked_sources[sources_target]
# We have no idea if rel_unpack_dir matches any of our source root patterns, so
# we explicitly register it here.
self.context.source_roots.add_source_root(rel_unpack_dir)
payload_field.populate(sources, rel_unpack_dir)
|
{
"content_hash": "f442ce4aafc3b0ce70c1e21f5bb18a48",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 98,
"avg_line_length": 42.405797101449274,
"alnum_prop": 0.7115516062884484,
"repo_name": "megaserg/pants",
"id": "d68c55cf07f1036c86712b29774d81d2a3e7cb59",
"size": "3073",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/core/tasks/deferred_sources_mapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "64029"
},
{
"name": "Java",
"bytes": "307373"
},
{
"name": "JavaScript",
"bytes": "28962"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4127534"
},
{
"name": "Scala",
"bytes": "85457"
},
{
"name": "Shell",
"bytes": "49640"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
}
|
"""
Helper for looping over sequences, particular in templates.
Often in a loop in a template it's handy to know what's next up,
previously up, if this is the first or last item in the sequence, etc.
These can be awkward to manage in a normal Python loop, but using the
looper you can get a better sense of the context. Use like::
>>> for loop, item in looper(['a', 'b', 'c']):
... print loop.number, item
... if not loop.last:
... print '---'
1 a
---
2 b
---
3 c
"""
import sys
from compat3 import basestring_
__all__ = ['looper']
class looper(object):
"""
Helper for looping (particularly in templates)
Use this like::
for loop, item in looper(seq):
if loop.first:
...
"""
def __init__(self, seq):
self.seq = seq
def __iter__(self):
return looper_iter(self.seq)
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__, self.seq)
class looper_iter(object):
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
if sys.version < "3":
next = __next__
class loop_pos(object):
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[self.pos], self.pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
__next__ = property(__next__)
if sys.version < "3":
next = __next__
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq) - 1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, basestring_)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif hasattr(getter, '__call__'):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
|
{
"content_hash": "826e71e0dbf1d844a190fe5fc58791d2",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 74,
"avg_line_length": 25.478527607361965,
"alnum_prop": 0.5463520346737298,
"repo_name": "ravenac95/virtstrap",
"id": "ee47adb5a8b94b30e219e637dae26cdf0e8a1ab2",
"size": "4153",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "virtstrap-core/virtstrap/packages/tempita/_looper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "566172"
},
{
"name": "Ruby",
"bytes": "54"
},
{
"name": "Shell",
"bytes": "37428"
}
],
"symlink_target": ""
}
|
from CIM15.IEC61970.Wires.RegulatingControl import RegulatingControl
class TapChangerControl(RegulatingControl):
"""TapChangerControl discribe behaviour specific to tap changers, e.g. how the voltage at the end of a line varies with the load level and compensation of the voltage drop by tap adjustment.TapChangerControl discribe behaviour specific to tap changers, e.g. how the voltage at the end of a line varies with the load level and compensation of the voltage drop by tap adjustment.
"""
def __init__(self, lineDropR=0.0, lineDropX=0.0, reverseLineDropX=0.0, reverseLineDropR=0.0, lineDropCompensation=False, limitVoltage=0.0, TapChanger=None, *args, **kw_args):
"""Initialises a new 'TapChangerControl' instance.
@param lineDropR: Line drop compensator resistance setting for normal (forward) power flow.
@param lineDropX: Line drop compensator reactance setting for normal (forward) power flow.
@param reverseLineDropX: Line drop compensator reactance setting for reverse power flow.
@param reverseLineDropR: Line drop compensator resistance setting for reverse power flow.
@param lineDropCompensation: If true, the line drop compensation is to be applied.
@param limitVoltage: Maximum allowed regulated voltage on the PT secondary base, regardless of line drop compensation. Sometimes referred to as first-house protection.
@param TapChanger: copy from reg conduting eq
"""
#: Line drop compensator resistance setting for normal (forward) power flow.
self.lineDropR = lineDropR
#: Line drop compensator reactance setting for normal (forward) power flow.
self.lineDropX = lineDropX
#: Line drop compensator reactance setting for reverse power flow.
self.reverseLineDropX = reverseLineDropX
#: Line drop compensator resistance setting for reverse power flow.
self.reverseLineDropR = reverseLineDropR
#: If true, the line drop compensation is to be applied.
self.lineDropCompensation = lineDropCompensation
#: Maximum allowed regulated voltage on the PT secondary base, regardless of line drop compensation. Sometimes referred to as first-house protection.
self.limitVoltage = limitVoltage
self._TapChanger = []
self.TapChanger = [] if TapChanger is None else TapChanger
super(TapChangerControl, self).__init__(*args, **kw_args)
_attrs = ["lineDropR", "lineDropX", "reverseLineDropX", "reverseLineDropR", "lineDropCompensation", "limitVoltage"]
_attr_types = {"lineDropR": float, "lineDropX": float, "reverseLineDropX": float, "reverseLineDropR": float, "lineDropCompensation": bool, "limitVoltage": float}
_defaults = {"lineDropR": 0.0, "lineDropX": 0.0, "reverseLineDropX": 0.0, "reverseLineDropR": 0.0, "lineDropCompensation": False, "limitVoltage": 0.0}
_enums = {}
_refs = ["TapChanger"]
_many_refs = ["TapChanger"]
def getTapChanger(self):
"""copy from reg conduting eq
"""
return self._TapChanger
def setTapChanger(self, value):
for x in self._TapChanger:
x.TapChangerControl = None
for y in value:
y._TapChangerControl = self
self._TapChanger = value
TapChanger = property(getTapChanger, setTapChanger)
def addTapChanger(self, *TapChanger):
for obj in TapChanger:
obj.TapChangerControl = self
def removeTapChanger(self, *TapChanger):
for obj in TapChanger:
obj.TapChangerControl = None
|
{
"content_hash": "32b9ad56fd94eade609ebb653017cb01",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 381,
"avg_line_length": 51.94202898550725,
"alnum_prop": 0.705078125,
"repo_name": "rwl/PyCIM",
"id": "7cca9a04e0f1d7903280b382b83f86e67c6705a9",
"size": "4684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Wires/TapChangerControl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
}
|
import unittest
from abelfunctions.differentials import (
mnuk_conditions,
recenter_curve,
differentials_numerators,
differentials,
validate_differentials,
Differential
)
from abelfunctions.riemann_surface import RiemannSurface
from abelfunctions.tests.test_abelfunctions import AbelfunctionsTestCase
from sage.all import QQ, CC
class DummyRS:
def __init__(self, f):
self.f = f
class TestDifferentialsNumerators(AbelfunctionsTestCase):
def test_f1(self):
x,y = self.f1.parent().gens()
a = differentials_numerators(self.f1)
b = []
self.assertEqual(a,b)
def test_f2(self):
x,y = self.f2.parent().gens()
a = differentials_numerators(self.f2)
b = [x*y, x**3]
self.assertEqual(a,b)
def test_f4(self):
x,y = self.f4.parent().gens()
a = differentials_numerators(self.f4)
b = []
self.assertEqual(a,b)
# def test_f5(self):
# x,y = self.f5.parent().gens()
# a = differentials_numerators(self.f5)
# b = [(x**2 + y**2)]
# self.assertEqual(a,b)
def test_f7(self):
x,y = self.f7.parent().gens()
a = differentials_numerators(self.f7)
b = [1, y, x, x**2]
self.assertEqual(a,b)
@unittest.skip("Takes too much time")
def test_f8(self):
x,y = self.f8.parent().gens()
a = differentials_numerators(self.f8)
b = [y, x*y**3, x*y**4]
self.assertEqual(a,b)
class TestDifferentials(AbelfunctionsTestCase):
def test_f1(self):
x,y = self.f1.parent().gens()
dfdy = self.f1.derivative(y)
X = DummyRS(self.f1)
a = [omega.as_expression() for omega in differentials(X)]
b = []
self.assertEqual(a, b)
def test_f2(self):
x,y = self.f2.parent().gens()
dfdy = self.f2.derivative(y)
X = DummyRS(self.f2)
a = [omega.as_expression() for omega in differentials(X)]
b = [x*y/dfdy, x**3/dfdy]
self.assertEqual(a, b)
def test_validation_success(self):
x,y = self.f2.parent().gens()
dfdy = self.f2.derivative(y)
X = DummyRS(self.f2)
diffs = [Differential(X, x*y, dfdy), Differential(X, x**3, dfdy)]
self.assertTrue(validate_differentials(diffs, 2))
def test_validation_failures(self):
x,y = self.f2.parent().gens()
dfdy = self.f2.derivative(y)
X = DummyRS(self.f2)
Y = DummyRS(self.f2)
Xdiffs = [Differential(X, x*y, dfdy), Differential(X, x**3, dfdy)]
Ydiffs = [Differential(Y, x*y, dfdy), Differential(Y, x**3, dfdy)]
g = len(Xdiffs)
# Type failure: add a non-Differential value
self.assertFalse(validate_differentials(Xdiffs[:-1] + [0], g))
# Surface failure: defined on different Riemann surfaces
self.assertFalse(validate_differentials(Xdiffs[:-1] + Ydiffs[-1:], g))
# Genus failure: too few or too many differentials
self.assertFalse(validate_differentials(Xdiffs, g + 1))
self.assertFalse(validate_differentials(Xdiffs, g - 1))
class TestCenteredAtRegularPlace(AbelfunctionsTestCase):
# tests if differentials are correctly evaluated at regular places on the
# Riemann surface. see Issue #123.
#
# test: check if evaluating the differential at the (x,y)-projection of the
# place is equal (or, nomerically close to) to evaluating the centered
# differential at t=0
def test_f1_regular_places(self):
X = RiemannSurface(self.f1)
omegas = differentials(X)
# the places above x=-1 are regular
places = X(-1)
for P in places:
a,b = P.x,P.y
for omega in omegas:
omega_P = omega.centered_at_place(P)
val1 = omega(a,b)
val2 = omega_P(CC(0))
self.assertLess(abs(val1-val2), 1e-8)
def test_f2_regular_places(self):
X = self.X2
omegas = differentials(X)
# the places above x=1 are regular
places = X(1)
for P in places:
a,b = P.x,P.y
for omega in omegas:
omega_P = omega.centered_at_place(P)
val1 = omega(a,b)
val2 = omega_P(CC(0))
self.assertLess(abs(val1-val2), 1e-8)
def test_hyperelliptic_regular_places(self):
R = QQ['x,y']
x,y = R.gens()
X = RiemannSurface(y**2 - (x+1)*(x-1)*(x-2)*(x+2))
omegas = differentials(X)
# the places above x=0 are regular
places = X(0)
for P in places:
a,b = P.x,P.y
for omega in omegas:
omega_P = omega.centered_at_place(P)
val1 = omega(a,b)
val2 = omega_P(CC(0))
self.assertLess(abs(val1-val2), 1e-8)
# the places above x=oo are regular: P = (1/t, \pm 1/t**2 + O(1))
# (however, all places at infinity are treated as discriminant)
#
# in this particular example, omega[0] = 1/(2*y). At the places at
# infinity, these are equal to \mp 0.5, respectively. (the switch in
# sign comes from the derivative dxdt = -1/t**2)
places = X('oo')
for P in places:
sign = P.puiseux_series.ypart[-2]
for omega in omegas:
omega_P = omega.centered_at_place(P)
val1 = -sign*0.5
val2 = omega_P(CC(0))
self.assertLess(abs(val1-val2), 1e-8)
|
{
"content_hash": "422309dcea8a93ccc3ccc25c1b1fc1e8",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 79,
"avg_line_length": 32.52046783625731,
"alnum_prop": 0.5641071749685308,
"repo_name": "abelfunctions/abelfunctions",
"id": "3c331153284c27ddaca01cfb9fefa9b8ff7fcc41",
"size": "5561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abelfunctions/tests/test_differentials.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17832"
},
{
"name": "Cython",
"bytes": "60524"
},
{
"name": "Jupyter Notebook",
"bytes": "116412"
},
{
"name": "Python",
"bytes": "585240"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from datetime import date, datetime, timedelta
from smtplib import SMTPException
from slacker import Error as SlackerError
import icalendar
from django.contrib.auth import models as auth_models
from django.contrib.auth.models import Group
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from django_date_extensions.fields import ApproximateDate, ApproximateDateField
from easy_thumbnails.exceptions import InvalidImageFormatError
from easy_thumbnails.files import get_thumbnailer
from .slack_client import user_invite
from .validators import validate_approximatedate
from .default_eventpage_content import (
get_default_eventpage_data,
get_default_menu,
)
from .emails import notify_existing_user, notify_new_user
class UserManager(auth_models.BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError("Users must have an email address")
user = self.model(email=self.normalize_email(email))
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password=password)
user.is_superuser = user.is_staff = True
user.save(using=self._db)
return user
@python_2_unicode_compatible
class User(auth_models.AbstractBaseUser, auth_models.PermissionsMixin):
email = models.EmailField(unique=True)
first_name = models.CharField(max_length=30, blank=True)
last_name = models.CharField(max_length=30, blank=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(auto_now_add=True)
objects = UserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
class Meta:
verbose_name = "Organizer"
verbose_name_plural = "Organizers"
def invite_to_slack(self):
user_invite(self.email, self.first_name)
def generate_password(self):
password = User.objects.make_random_password()
self.set_password(password)
self.save()
return password
def add_to_organizers_group(self):
try:
group = Group.objects.get(name="Organizers")
except Group.DoesNotExist:
return
self.groups.add(group)
def __str__(self):
if self.first_name == '' and self.last_name == '':
return '{0}'.format(self.email)
return '{0} ({1})'.format(self.get_full_name(), self.email)
def get_short_name(self):
return self.first_name
def get_full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
class EventManager(models.Manager):
def get_queryset(self):
return (super(EventManager, self).get_queryset()
.filter(is_deleted=False))
def public(self):
"""
Only include events that are on the homepage.
"""
return self.get_queryset().filter(is_on_homepage=True)
def future(self):
return self.public().filter(
date__gte=datetime.now().strftime("%Y-%m-%d")
).order_by("date")
def past(self):
return self.public().filter(
date__isnull=False,
date__lt=datetime.now().strftime("%Y-%m-%d")
).order_by("-date")
# Event date can't be a year only
@python_2_unicode_compatible
class Event(models.Model):
name = models.CharField(max_length=200, null=False, blank=False)
date = ApproximateDateField(
null=True, blank=False, validators=[validate_approximatedate])
city = models.CharField(max_length=200, null=False, blank=False)
country = models.CharField(max_length=200, null=False, blank=False)
latlng = models.CharField("latitude and longitude", max_length=30, null=True, blank=True)
photo = models.ImageField(upload_to="event/cities/", null=True, blank=True,
help_text="The best would be 356 x 210px")
photo_credit = models.CharField(
max_length=200, null=True, blank=True,
help_text=mark_safe(
"Only use pictures with a "
"<a href='https://creativecommons.org/licenses/'>Creative Commons license</a>."))
photo_link = models.URLField("photo URL", null=True, blank=True)
email = models.EmailField(
"event email", max_length=75, null=True, blank=True)
main_organizer = models.ForeignKey(
User, null=True, blank=True, related_name="main_organizer")
team = models.ManyToManyField(User, blank=True)
is_on_homepage = models.BooleanField("visible on homepage?", default=True)
is_deleted = models.BooleanField("deleted?", default=False)
created_at = models.DateTimeField(auto_now_add=True, null=True)
page_title = models.CharField("title", max_length=200, blank=True)
page_description = models.TextField(
"description", blank=True,
default="Django Girls is a one-day workshop about programming "
"in Python and Django tailored for women.")
page_main_color = models.CharField(
"main color", max_length=6, blank=True,
help_text="Main color of the chapter in HEX", default="FF9400")
page_custom_css = models.TextField("custom CSS rules", blank=True)
page_url = models.CharField(
"URL slug", max_length=200, blank=True,
help_text="Will be used as part of the event URL (djangogirls.org/______/)")
is_page_live = models.BooleanField("Website is ready", default=False)
attendees_count = models.IntegerField(
null=True, blank=True, verbose_name="Number of attendees")
applicants_count = models.IntegerField(
null=True, blank=True, verbose_name="Number of applicants")
objects = EventManager()
all_objects = models.Manager() # This includes deleted objects
# Flags for email states
thank_you_email_sent = models.DateTimeField(null=True, blank=True)
submit_information_email_sent = models.DateTimeField(null=True, blank=True)
offer_help_email_sent = models.DateTimeField(null=True, blank=True)
def __str__(self):
return '{}, {}'.format(self.name, self.date)
class Meta:
ordering = ('-date', )
verbose_name_plural = "List of events"
def is_upcoming(self):
if not self.date:
return False
now = timezone.now()
now = ApproximateDate(year=now.year, month=now.month, day=now.day)
if now < self.date:
return True
return False
@property
def ical_uid(self):
return "event%d@djangogirls.org" % self.pk
@property
def date_is_approximate(self):
if not self.date:
return True
if not all((self.date.year, self.date.month, self.date.day)):
return True
return False
def as_ical(self):
"""
Return a representation of the current event as an icalendar.Event.
"""
if self.date_is_approximate:
return None
ymd = (self.date.year, self.date.month, self.date.day)
event_date = date(*ymd)
event = icalendar.Event()
event.add("dtstart", event_date)
event.add("dtend", event_date + timedelta(days=1))
event.add("uid", self.ical_uid)
event.add("summary", "Django Girls %s" % self.city)
event.add("location", "%s, %s" % (self.country, self.city))
return event
def organizers(self):
members = ["{} <{}>".format(x.get_full_name(), x.email)
for x in self.team.all()]
return ", ".join(members)
@property
def has_stats(self):
return bool(self.applicants_count and self.attendees_count)
def delete(self):
self.is_deleted = True
self.save()
def add_default_content(self):
"""Populate EventPageContent with default layout"""
data = get_default_eventpage_data()
for i, section in enumerate(data):
section['position'] = i
section['content'] = render_to_string(section['template'])
del section['template']
self.content.create(**section)
def add_default_menu(self):
"""Populate EventPageMenu with default links"""
data = get_default_menu()
for i, link in enumerate(data):
link['position'] = i
self.menu.create(**link)
def invite_organizer_to_team(self, user, is_new_user, password):
self.team.add(user)
if is_new_user:
errors = []
try:
user.invite_to_slack()
except (ConnectionError, SlackerError) as e:
errors.append(
'Slack invite unsuccessful, reason: {}'.format(e)
)
notify_new_user(user, event=self, password=password, errors=errors)
else:
notify_existing_user(user, event=self)
def add_organizer(self, email, first_name, last_name):
"""
Add organizer to the event.
TODO: we need to think if create_organizers and create_events
are the best place for these logic. Maybe we should move it back to
the models.
"""
defaults = {
"first_name": first_name,
"last_name": last_name,
"is_staff": True,
"is_active": True
}
user, created = User.objects.get_or_create(
email=email,
defaults=defaults
)
password = None
if created:
password = user.generate_password()
user.add_to_organizers_group()
self.invite_organizer_to_team(user, created, password)
return user
@python_2_unicode_compatible
class ContactEmail(models.Model):
CHAPTER, SUPPORT = 'chapter', 'support'
CONTACT_TYPE_CHOICES = (
(CHAPTER, 'Django Girls Local Organizers'),
(SUPPORT, 'Django Girls HQ (Support Team)'),
)
name = models.CharField(max_length=128)
email = models.EmailField(max_length=128)
sent_to = models.EmailField(max_length=128)
message = models.TextField()
event = models.ForeignKey(
'core.Event', help_text='required for contacting a chapter',
null=True, blank=True
)
contact_type = models.CharField(
verbose_name="Who do you want to contact?",
max_length=20, choices=CONTACT_TYPE_CHOICES, blank=False,
default=CHAPTER
)
created_at = models.DateTimeField(auto_now_add=True)
sent_successfully = models.BooleanField(default=True)
class Meta:
ordering = ('-created_at',)
def __str__(self):
return "%s to %s" % (self.email, self.sent_to)
def save(self, *args, **kwargs):
self.sent_to = self._get_to_email()
email = EmailMessage(
self._get_subject(),
self.message,
"Django Girls Contact <hello@djangogirls.org>",
[self.sent_to],
reply_to=["{} <{}>".format(self.name, self.email)],
headers={'Reply-To': "{} <{}>".format(self.name, self.email)}
# Seems like this is needed for Mandrill
)
try:
email.send(fail_silently=False)
except SMTPException:
self.sent_successfully = False
super(ContactEmail, self).save(*args, **kwargs)
def _get_to_email(self):
if self.event and self.event.email:
return self.event.email
return 'hello@djangogirls.org'
def _get_subject(self):
return "%s - from the djangogirls.org website" % self.name
@python_2_unicode_compatible
class EventPageContent(models.Model):
event = models.ForeignKey(Event, null=False,
blank=False, related_name="content")
name = models.CharField(null=False, blank=False, max_length=100)
content = models.TextField(
null=False, blank=False, help_text="HTML allowed")
background = models.ImageField(
upload_to="event/backgrounds/", null=True, blank=True,
help_text="Optional background photo")
position = models.PositiveIntegerField(
null=False, blank=False,
help_text="Position of the block on the website")
is_public = models.BooleanField(null=False, blank=False, default=False)
coaches = models.ManyToManyField("coach.Coach", verbose_name='Coaches')
sponsors = models.ManyToManyField("sponsor.Sponsor", verbose_name='Sponsors')
def __str__(self):
return "%s at %s" % (self.name, self.event)
class Meta:
ordering = ("position", )
verbose_name = "Website Content"
@python_2_unicode_compatible
class EventPageMenu(models.Model):
event = models.ForeignKey(Event, null=False,
blank=False, related_name="menu")
title = models.CharField(max_length=255, null=False, blank=False)
url = models.CharField(
max_length=255, null=False, blank=False,
help_text="http://djangogirls.org/city/<the value you enter here>")
position = models.PositiveIntegerField(
null=False, blank=False, help_text="Order of menu")
def __str__(self):
return self.title
class Meta:
ordering = ("position", )
verbose_name = "Website Menu"
|
{
"content_hash": "44f8aee81ce79f68600aa99c59f92638",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 93,
"avg_line_length": 35.19845360824742,
"alnum_prop": 0.6302262575968368,
"repo_name": "patjouk/djangogirls",
"id": "0e593ca72ef8deed8e20787d3f5ff2e3e44f9c46",
"size": "13657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32625"
},
{
"name": "HTML",
"bytes": "285328"
},
{
"name": "JavaScript",
"bytes": "12129"
},
{
"name": "Python",
"bytes": "396039"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
}
|
import simplejson
import json
import urllib
import time
import re
import BeautifulSoup
from BeautifulSoup import BeautifulStoneSoup
import hashlib
import nose
from nose.tools import assert_equals
import sys
import os
import ConfigParser
# This hack is to add current path when running script from command line
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import BasePlugin
from BasePlugin.BasePlugin import BasePluginClass
from BasePlugin.BasePlugin import TestBasePluginClass
# Permissions: RWX for owner, WX for others. Set this here so that .pyc are created with these permissions
os.umask(022)
# Conforms to API specified here: https://github.com/mhahnel/Total-Impact/wiki/Plugin-requirements
# To do automated tests with nosy
# nosy SlidesharePlugin.py -A \'not skip\'
def skip(f):
f.skip = True
return f
class PluginClass(BasePluginClass):
# each plugin needs to customize this stuff
SOURCE_NAME = "Slideshare"
SOURCE_DESCRIPTION = "The best way to share presentations, documents and professional videos."
SOURCE_URL = "http://www.slideshare.net/"
SOURCE_ICON = "http://www.slideshare.net/favicon.ico" #rgb(244, 138, 52)
SOURCE_METRICS = dict( downloads="the number of downloads of the presentation",
favorites="the number of times a presentation has been favorited",
comments="the number of comments on the presentation",
views="the number of views of the presentation"
)
DEBUG = False
SLIDESHARE_KEY = ""
SLIDESHARE_SECRET = ""
SLIDESHARE_API_URL = ""
SLIDESHARE_URL_PATTERN = re.compile("http://www.slideshare.net/.+")
def __init__(self):
config = ConfigParser.ConfigParser()
config.readfp(open('../../../config/creds.ini'))
self.SLIDESHARE_SECRET = config.get('apis', 'Slideshare_secret')
url_str = "http://www.slideshare.net/api/2/get_slideshow?api_key=%s&detailed=1&ts=%s&hash=%s&slideshow_url=%s"
self.SLIDESHARE_API_URL = url_str % (config.get('apis', 'Slideshare_key'), '%s', '%s', '%s')
# each plugin needs to write one of these
def get_page(self, id):
if not id:
return(None)
ts = time.time()
hash_combo = hashlib.sha1(self.SLIDESHARE_SECRET + str(ts)).hexdigest()
query_url = self.SLIDESHARE_API_URL %(ts, hash_combo, id)
try:
response = self.get_cache_timeout_response(query_url)
except:
response = None
return(response)
def get_as_int(self, mystr):
try:
response = int(mystr.text)
except:
response = None
return(response)
def extract_stats(self, page, id=None):
if not page:
return(None)
(header, xml) = page
soup = BeautifulStoneSoup(xml)
#print(soup)
downloads = self.get_as_int(soup.numdownloads)
views = self.get_as_int(soup.numviews)
comments = self.get_as_int(soup.numcomments)
favorites = self.get_as_int(soup.numfavorites)
try:
title = soup.title.text
title = title.encode("latin1")
except:
title = ""
try:
upload_year = soup.created.text[-4:]
except:
upload_year = ""
response = {"show_details_url":id, "upload_year":upload_year, "downloads":downloads, "views":views, "comments":comments, "favorites":favorites, "title":title}
return(response)
# each plugin needs to write relevant versions of this
def is_slideshare_url(self, id):
response = (self.SLIDESHARE_URL_PATTERN.search(id) != None)
return(response)
# each plugin needs to write relevant versions of this
def artifact_type_recognized(self, id):
if id:
is_recognized = self.is_slideshare_url(id)
else:
is_recognized = False
return(is_recognized)
# list of possible ids should be in order of preference, most prefered first
# returns the first valid one, or None if none are valid
def get_valid_id(self, list_of_possible_ids):
for id in list_of_possible_ids:
if (self.artifact_type_recognized(id)):
return(id)
return(None)
## this changes for every plugin
def build_artifact_response(self, id):
if not id:
return(None)
metrics_response = self.get_metric_values(id)
if not metrics_response:
return(None)
response = dict(type="slides")
response.update(metrics_response)
return(response)
## every plugin should check API limitations and make sure they are respected here
def get_artifacts_metrics(self, query):
response_dict = dict()
error_msg = None
time_started = time.time()
for artifact_id in query:
(artifact_id, lookup_id) = self.get_relevant_id(artifact_id, query[artifact_id], ["url"])
if (artifact_id):
artifact_response = self.build_artifact_response(lookup_id)
if artifact_response:
response_dict[artifact_id] = artifact_response
if (time.time() - time_started > self.MAX_ELAPSED_TIME):
error_msg = "TIMEOUT"
break
return(response_dict, error_msg)
class TestPluginClass(TestBasePluginClass):
def setup(self):
self.plugin = SlidesharePluginClass()
self.test_parse_input = self.testinput.TEST_INPUT_SLIDESHARE_URL
## this changes for every plugin
def test_build_artifact_response(self):
response = self.plugin.build_artifact_response("http://www.slideshare.net/phylogenomics/eisen")
assert_equals(response, {'favorites': 2, 'upload_year': u'2010', 'title': 'Jonathan Eisen talk at #ievobio 2010', 'downloads': 10, 'views': 71984, 'type': 'slides', 'comments': 0})
## this changes for every plugin
def test_get_artifacts_metrics(self):
response = self.plugin.get_artifacts_metrics(self.test_parse_input)
print self.test_parse_input
assert_equals(response, ({'http://www.slideshare.net/phylogenomics/eisen': {'favorites': 2, 'upload_year': u'2010', 'title': 'Jonathan Eisen talk at #ievobio 2010', 'downloads': 10, 'views': 71984, 'type': 'slides', 'comments': 0}}, None))
#each plugin should make sure its range of inputs are covered
def test_run_plugin_slideshare(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_SLIDESHARE_URL))
print response
assert_equals(len(response), 852)
def test_run_plugin_doi(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_DOI))
print response
assert_equals(len(response), 649)
def test_run_plugin_pmid(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_PMID))
print response
assert_equals(len(response), 649)
def test_run_plugin_url(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_URL))
print response
assert_equals(len(response), 649)
def test_run_plugin_invalid_id(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_DUD))
print response
assert_equals(len(response), 649)
def test_run_plugin_multiple(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_ALL))
print response
assert_equals(len(response), 852)
|
{
"content_hash": "0f601e239bf8477e86d9e9a2bf522cc8",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 247,
"avg_line_length": 39.18407960199005,
"alnum_prop": 0.6253174200101574,
"repo_name": "figshare/Total-Impact",
"id": "36b10267dbcb2fcea3841af07d831baad6e7704e",
"size": "7898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/metrics/Slideshare/Plugin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "586556"
},
{
"name": "PHP",
"bytes": "3194227"
},
{
"name": "Python",
"bytes": "178602"
},
{
"name": "Shell",
"bytes": "11494"
}
],
"symlink_target": ""
}
|
"""Support for Melnor RainCloud sprinkler water timer."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import ATTR_ATTRIBUTION, CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import (
ALLOWED_WATERING_TIME, ATTRIBUTION, CONF_WATERING_TIME, DATA_RAINCLOUD,
DEFAULT_WATERING_TIME, SWITCHES, RainCloudEntity)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SWITCHES)):
vol.All(cv.ensure_list, [vol.In(SWITCHES)]),
vol.Optional(CONF_WATERING_TIME, default=DEFAULT_WATERING_TIME):
vol.All(vol.In(ALLOWED_WATERING_TIME)),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a raincloud device."""
raincloud = hass.data[DATA_RAINCLOUD].data
default_watering_timer = config.get(CONF_WATERING_TIME)
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
# create a sensor for each zone managed by faucet
for zone in raincloud.controller.faucet.zones:
sensors.append(
RainCloudSwitch(default_watering_timer, zone, sensor_type))
add_entities(sensors, True)
class RainCloudSwitch(RainCloudEntity, SwitchDevice):
"""A switch implementation for raincloud device."""
def __init__(self, default_watering_timer, *args):
"""Initialize a switch for raincloud device."""
super().__init__(*args)
self._default_watering_timer = default_watering_timer
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
if self._sensor_type == 'manual_watering':
self.data.watering_time = self._default_watering_timer
elif self._sensor_type == 'auto_watering':
self.data.auto_watering = True
self._state = True
def turn_off(self, **kwargs):
"""Turn the device off."""
if self._sensor_type == 'manual_watering':
self.data.watering_time = 'off'
elif self._sensor_type == 'auto_watering':
self.data.auto_watering = False
self._state = False
def update(self):
"""Update device state."""
_LOGGER.debug("Updating RainCloud switch: %s", self._name)
if self._sensor_type == 'manual_watering':
self._state = bool(self.data.watering_time)
elif self._sensor_type == 'auto_watering':
self._state = self.data.auto_watering
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
'default_manual_timer': self._default_watering_timer,
'identifier': self.data.serial
}
|
{
"content_hash": "5422a3316f294a20f0446cd3973cb39c",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 75,
"avg_line_length": 35.674698795180724,
"alnum_prop": 0.6511313745356299,
"repo_name": "molobrakos/home-assistant",
"id": "e320a956f118006e791f196a16239dff934d7472",
"size": "2961",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "homeassistant/components/raincloud/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15057917"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
import sys
import traceback
import pytimeparse
from ast import literal_eval
from distutils.util import strtobool
from flask import current_app
from .base import db
class Setting(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
value = db.Column(db.Text())
defaults = {
'maintenance': False,
'fullscreen_layout': True,
'record_helper': True,
'login_ldap_first': True,
'default_record_table_size': 15,
'default_domain_table_size': 10,
'auto_ptr': False,
'record_quick_edit': True,
'pretty_ipv6_ptr': False,
'dnssec_admins_only': False,
'allow_user_create_domain': False,
'allow_user_remove_domain': False,
'allow_user_view_history': False,
'delete_sso_accounts': False,
'bg_domain_updates': False,
'enable_api_rr_history': True,
'site_name': 'PowerDNS-Admin',
'site_url': 'http://localhost:9191',
'session_timeout': 10,
'warn_session_timeout': True,
'pdns_api_url': '',
'pdns_api_key': '',
'pdns_api_timeout': 30,
'pdns_version': '4.1.1',
'verify_ssl_connections': True,
'local_db_enabled': True,
'signup_enabled': True,
'autoprovisioning': False,
'urn_value':'',
'autoprovisioning_attribute': '',
'purge': False,
'verify_user_email': False,
'ldap_enabled': False,
'ldap_type': 'ldap',
'ldap_uri': '',
'ldap_base_dn': '',
'ldap_admin_username': '',
'ldap_admin_password': '',
'ldap_filter_basic': '',
'ldap_filter_group': '',
'ldap_filter_username': '',
'ldap_filter_groupname': '',
'ldap_sg_enabled': False,
'ldap_admin_group': '',
'ldap_operator_group': '',
'ldap_user_group': '',
'ldap_domain': '',
'github_oauth_enabled': False,
'github_oauth_key': '',
'github_oauth_secret': '',
'github_oauth_scope': 'email',
'github_oauth_api_url': 'https://api.github.com/user',
'github_oauth_token_url':
'https://github.com/login/oauth/access_token',
'github_oauth_authorize_url':
'https://github.com/login/oauth/authorize',
'google_oauth_enabled': False,
'google_oauth_client_id': '',
'google_oauth_client_secret': '',
'google_token_url': 'https://oauth2.googleapis.com/token',
'google_oauth_scope': 'openid email profile',
'google_authorize_url': 'https://accounts.google.com/o/oauth2/v2/auth',
'google_base_url': 'https://www.googleapis.com/oauth2/v3/',
'azure_oauth_enabled': False,
'azure_oauth_key': '',
'azure_oauth_secret': '',
'azure_oauth_scope': 'User.Read openid email profile',
'azure_oauth_api_url': 'https://graph.microsoft.com/v1.0/',
'azure_oauth_token_url':
'https://login.microsoftonline.com/[tenancy]/oauth2/v2.0/token',
'azure_oauth_authorize_url':
'https://login.microsoftonline.com/[tenancy]/oauth2/v2.0/authorize',
'azure_sg_enabled': False,
'azure_admin_group': '',
'azure_operator_group': '',
'azure_user_group': '',
'azure_group_accounts_enabled': False,
'azure_group_accounts_name': 'displayName',
'azure_group_accounts_name_re': '',
'azure_group_accounts_description': 'description',
'azure_group_accounts_description_re': '',
'oidc_oauth_enabled': False,
'oidc_oauth_key': '',
'oidc_oauth_secret': '',
'oidc_oauth_scope': 'email',
'oidc_oauth_api_url': '',
'oidc_oauth_token_url': '',
'oidc_oauth_authorize_url': '',
'oidc_oauth_logout_url': '',
'oidc_oauth_username': 'preferred_username',
'oidc_oauth_firstname': 'given_name',
'oidc_oauth_last_name': 'family_name',
'oidc_oauth_email': 'email',
'oidc_oauth_account_name_property': '',
'oidc_oauth_account_description_property': '',
'forward_records_allow_edit': {
'A': True,
'AAAA': True,
'AFSDB': False,
'ALIAS': False,
'CAA': True,
'CERT': False,
'CDNSKEY': False,
'CDS': False,
'CNAME': True,
'DNSKEY': False,
'DNAME': False,
'DS': False,
'HINFO': False,
'KEY': False,
'LOC': True,
'LUA': False,
'MX': True,
'NAPTR': False,
'NS': True,
'NSEC': False,
'NSEC3': False,
'NSEC3PARAM': False,
'OPENPGPKEY': False,
'PTR': True,
'RP': False,
'RRSIG': False,
'SOA': False,
'SPF': True,
'SSHFP': False,
'SRV': True,
'TKEY': False,
'TSIG': False,
'TLSA': False,
'SMIMEA': False,
'TXT': True,
'URI': False
},
'reverse_records_allow_edit': {
'A': False,
'AAAA': False,
'AFSDB': False,
'ALIAS': False,
'CAA': False,
'CERT': False,
'CDNSKEY': False,
'CDS': False,
'CNAME': False,
'DNSKEY': False,
'DNAME': False,
'DS': False,
'HINFO': False,
'KEY': False,
'LOC': True,
'LUA': False,
'MX': False,
'NAPTR': False,
'NS': True,
'NSEC': False,
'NSEC3': False,
'NSEC3PARAM': False,
'OPENPGPKEY': False,
'PTR': True,
'RP': False,
'RRSIG': False,
'SOA': False,
'SPF': False,
'SSHFP': False,
'SRV': False,
'TKEY': False,
'TSIG': False,
'TLSA': False,
'SMIMEA': False,
'TXT': True,
'URI': False
},
'ttl_options': '1 minute,5 minutes,30 minutes,60 minutes,24 hours',
'otp_field_enabled': True,
'custom_css': '',
'max_history_records': 1000
}
def __init__(self, id=None, name=None, value=None):
self.id = id
self.name = name
self.value = value
# allow database autoincrement to do its own ID assignments
def __init__(self, name=None, value=None):
self.id = None
self.name = name
self.value = value
def set_maintenance(self, mode):
maintenance = Setting.query.filter(
Setting.name == 'maintenance').first()
if maintenance is None:
value = self.defaults['maintenance']
maintenance = Setting(name='maintenance', value=str(value))
db.session.add(maintenance)
mode = str(mode)
try:
if maintenance.value != mode:
maintenance.value = mode
db.session.commit()
return True
except Exception as e:
current_app.logger.error('Cannot set maintenance to {0}. DETAIL: {1}'.format(
mode, e))
current_app.logger.debug(traceback.format_exec())
db.session.rollback()
return False
def toggle(self, setting):
current_setting = Setting.query.filter(Setting.name == setting).first()
if current_setting is None:
value = self.defaults[setting]
current_setting = Setting(name=setting, value=str(value))
db.session.add(current_setting)
try:
if current_setting.value == "True":
current_setting.value = "False"
else:
current_setting.value = "True"
db.session.commit()
return True
except Exception as e:
current_app.logger.error('Cannot toggle setting {0}. DETAIL: {1}'.format(
setting, e))
current_app.logger.debug(traceback.format_exec())
db.session.rollback()
return False
def set(self, setting, value):
current_setting = Setting.query.filter(Setting.name == setting).first()
if current_setting is None:
current_setting = Setting(name=setting, value=None)
db.session.add(current_setting)
value = str(value)
try:
current_setting.value = value
db.session.commit()
return True
except Exception as e:
current_app.logger.error('Cannot edit setting {0}. DETAIL: {1}'.format(
setting, e))
current_app.logger.debug(traceback.format_exec())
db.session.rollback()
return False
def get(self, setting):
if setting in self.defaults:
if setting.upper() in current_app.config:
result = current_app.config[setting.upper()]
else:
result = self.query.filter(Setting.name == setting).first()
if result is not None:
if hasattr(result,'value'):
result = result.value
return strtobool(result) if result in [
'True', 'False'
] else result
else:
return self.defaults[setting]
else:
current_app.logger.error('Unknown setting queried: {0}'.format(setting))
def get_records_allow_to_edit(self):
return list(
set(self.get_forward_records_allow_to_edit() +
self.get_reverse_records_allow_to_edit()))
def get_forward_records_allow_to_edit(self):
records = self.get('forward_records_allow_edit')
f_records = literal_eval(records) if isinstance(records,
str) else records
r_name = [r for r in f_records if f_records[r]]
# Sort alphabetically if python version is smaller than 3.6
if sys.version_info[0] < 3 or (sys.version_info[0] == 3
and sys.version_info[1] < 6):
r_name.sort()
return r_name
def get_reverse_records_allow_to_edit(self):
records = self.get('reverse_records_allow_edit')
r_records = literal_eval(records) if isinstance(records,
str) else records
r_name = [r for r in r_records if r_records[r]]
# Sort alphabetically if python version is smaller than 3.6
if sys.version_info[0] < 3 or (sys.version_info[0] == 3
and sys.version_info[1] < 6):
r_name.sort()
return r_name
def get_ttl_options(self):
return [(pytimeparse.parse(ttl), ttl)
for ttl in self.get('ttl_options').split(',')]
|
{
"content_hash": "53f5f9857007b9c72c6f69f557541e22",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 89,
"avg_line_length": 34.557993730407524,
"alnum_prop": 0.5122460087082729,
"repo_name": "ngoduykhanh/PowerDNS-Admin",
"id": "a46cfb6bb067a3f758b31d57c7dc3c2e5d229692",
"size": "11024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "powerdnsadmin/models/setting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "558"
},
{
"name": "Dockerfile",
"bytes": "4237"
},
{
"name": "HTML",
"bytes": "280959"
},
{
"name": "JavaScript",
"bytes": "11375"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "387609"
},
{
"name": "Shell",
"bytes": "1483"
},
{
"name": "TSQL",
"bytes": "3112"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from tables.tests import common
from tables.tests.common import verbose
from tables.tests.common import unittest
from tables.tests.common import PyTablesTestCase as TestCase
# Check indexes from PyTables version 2.0
class IndexesTestCase(common.TestFileMixin, TestCase):
def setUp(self):
super(IndexesTestCase, self).setUp()
self.table1 = self.h5file.root.table1
self.table2 = self.h5file.root.table2
self.il = 0
self.sl = self.table1.cols.var1.index.slicesize
def test00_version(self):
"""Checking index version."""
t1var1 = self.table1.cols.var1
if "2_0" in self.h5fname:
self.assertEqual(t1var1.index._v_version, "2.0")
elif "2_1" in self.h5fname:
self.assertEqual(t1var1.index._v_version, "2.1")
def test01_string(self):
"""Checking string indexes."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_string..." % self.__class__.__name__)
table1 = self.table1
table2 = self.table2
# Convert the limits to the appropriate type
il = str(self.il).encode('ascii')
sl = str(self.sl).encode('ascii')
# Do some selections and check the results
# First selection
t1var1 = table1.cols.var1
self.assertTrue(t1var1 is not None)
results1 = [p["var1"] for p in
table1.where('(il<=t1var1)&(t1var1<=sl)')]
results2 = [p["var1"] for p in table2 if il <= p["var1"] <= sl]
results1.sort()
results2.sort()
if verbose:
print("Should look like:", results2)
print("Length results:", len(results1))
print("Should be:", len(results2))
self.assertEqual(len(results1), len(results2))
self.assertEqual(results1, results2)
def test02_bool(self):
"""Checking bool indexes."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_bool..." % self.__class__.__name__)
table1 = self.table1
table2 = self.table2
# Do some selections and check the results
t1var2 = table1.cols.var2
self.assertTrue(t1var2 is not None)
results1 = [p["var2"] for p in table1.where('t1var2 == True')]
results2 = [p["var2"] for p in table2 if p["var2"] is True]
if verbose:
print("Selection results (index):", results1)
print("Should look like:", results2)
print("Length results:", len(results1))
print("Should be:", len(results2))
self.assertEqual(len(results1), len(results2))
self.assertEqual(results1, results2)
def test03_int(self):
"""Checking int indexes."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03_int..." % self.__class__.__name__)
table1 = self.table1
table2 = self.table2
# Convert the limits to the appropriate type
il = int(self.il)
sl = int(self.sl)
# Do some selections and check the results
t1col = table1.cols.var3
self.assertTrue(t1col is not None)
# First selection
results1 = [p["var3"] for p in table1.where('(il<=t1col)&(t1col<=sl)')]
results2 = [p["var3"] for p in table2
if il <= p["var3"] <= sl]
# sort lists (indexing does not guarantee that rows are returned in
# order)
results1.sort()
results2.sort()
if verbose:
print("Length results:", len(results1))
print("Should be:", len(results2))
self.assertEqual(len(results1), len(results2))
self.assertEqual(results1, results2)
def test04_float(self):
"""Checking float indexes."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04_float..." % self.__class__.__name__)
table1 = self.table1
table2 = self.table2
# Convert the limits to the appropriate type
il = float(self.il)
sl = float(self.sl)
# Do some selections and check the results
t1col = table1.cols.var4
self.assertTrue(t1col is not None)
# First selection
results1 = [p["var4"] for p in table1.where('(il<=t1col)&(t1col<=sl)')]
results2 = [p["var4"] for p in table2
if il <= p["var4"] <= sl]
# sort lists (indexing does not guarantee that rows are returned in
# order)
results1.sort()
results2.sort()
if verbose:
print("Length results:", len(results1))
print("Should be:", len(results2))
self.assertEqual(len(results1), len(results2))
self.assertEqual(results1.sort(), results2.sort())
# Check indexes from PyTables version 2.0
class Indexes2_0TestCase(IndexesTestCase):
h5fname = TestCase._testFilename("indexes_2_0.h5")
# Check indexes from PyTables version 2.1
class Indexes2_1TestCase(IndexesTestCase):
h5fname = TestCase._testFilename("indexes_2_1.h5")
def suite():
theSuite = unittest.TestSuite()
niter = 1
for n in range(niter):
theSuite.addTest(unittest.makeSuite(Indexes2_0TestCase))
theSuite.addTest(unittest.makeSuite(Indexes2_1TestCase))
return theSuite
if __name__ == '__main__':
import sys
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
|
{
"content_hash": "402a519583f1fa0153b45a22aac1e116",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 79,
"avg_line_length": 32.44444444444444,
"alnum_prop": 0.5870583994232156,
"repo_name": "jack-pappas/PyTables",
"id": "fc188bd42172f7a50b1e5744983f5e4d7e9e19c1",
"size": "5573",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tables/tests/test_index_backcompat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "901914"
},
{
"name": "C++",
"bytes": "97381"
},
{
"name": "CSS",
"bytes": "2717"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "JavaScript",
"bytes": "3491"
},
{
"name": "Makefile",
"bytes": "11351"
},
{
"name": "Objective-C",
"bytes": "31966"
},
{
"name": "Python",
"bytes": "3594491"
},
{
"name": "Shell",
"bytes": "23613"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from app import app
import unittest
class BaseTestCase(unittest.TestCase):
__test__ = False
def setUp(self):
# Load testing configuration
app.config.from_object('config.TestingConfig')
self.app = app.test_client()
# Initialize the request context
app.test_request_context().push()
|
{
"content_hash": "ae2d06977715f352049843cdf06a01c5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 54,
"avg_line_length": 24.294117647058822,
"alnum_prop": 0.6755447941888619,
"repo_name": "pipex/gitbot",
"id": "254c743e8b17477de89ce2a7cf802469ab70b93f",
"size": "413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10"
},
{
"name": "Python",
"bytes": "47243"
},
{
"name": "Shell",
"bytes": "88"
}
],
"symlink_target": ""
}
|
def unique(iterable, key=None):
seen = set()
if key is None:
# Optimize the common case
for item in iterable:
if item in seen:
continue
seen.add(item)
yield item
else:
for item in iterable:
keyitem = key(item)
if keyitem in seen:
continue
seen.add(keyitem)
yield item
if __name__ == "__main__":
assert list(unique("abcd")) == list("abcd"), (list(unique("abcd")), "abcd".split())
assert list(unique("abca")) == list("abc")
assert list(unique("baaca")) == list("bac")
assert list(unique("")) == []
assert list(unique("to be or not to be".split())) == "to be or not".split()
assert list(unique("to be or not to be".split(), key=len)) == "to not".split()
assert list(unique(set("cabbage"))) == list(unique(set("cabbage")))
print("All tests passed.")
|
{
"content_hash": "f85ef161c0fbcc3f1564c1749f1db3f2",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 87,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.5304812834224599,
"repo_name": "ActiveState/code",
"id": "74304e46567a4e2cd24dfbc36c6b25fededb61fc",
"size": "935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577768_lazy_ordered_unique_elements/recipe-577768.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
import numpy as np
from ..base import BaseEstimator
from ._base import SelectorMixin
from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(SelectorMixin, BaseEstimator):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, default=0
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Notes
-----
Allows NaN in the input.
Raises ValueError if no feature in X meets the variance threshold.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse=('csr', 'csc'),
dtype=np.float64,
force_all_finite='allow-nan')
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) |
(self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.variances_ > self.threshold
def _more_tags(self):
return {'allow_nan': True}
|
{
"content_hash": "8dd97e7cd2cd02ea4ceb30297d775774",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 33.46,
"alnum_prop": 0.5777047220561865,
"repo_name": "ryfeus/lambda-packs",
"id": "39892876a6478f9d326bd62775e87d742c4f1684",
"size": "3395",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Sklearn_arm/source/sklearn/feature_selection/_variance_threshold.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
# basically check if the number can be divided by 4
return n&3 != 0
|
{
"content_hash": "7519bfb0cfb0f55f2307ef227a7e250f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 59,
"avg_line_length": 25.25,
"alnum_prop": 0.504950495049505,
"repo_name": "tedye/leetcode",
"id": "90e1076fa09e9db3bb2e67c3d76e6facf87a4d79",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/leetcode.292.nim-game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "188491"
},
{
"name": "Python",
"bytes": "219034"
},
{
"name": "SQLPL",
"bytes": "424"
},
{
"name": "Shell",
"bytes": "643"
}
],
"symlink_target": ""
}
|
"""
lighting.py
--------------
Hold basic information about lights.
Forked from the light model in `pyrender`:
https://github.com/mmatl/pyrender
"""
import numpy as np
from .. import util
from .. import visual
from .. import transformations
# default light color
_DEFAULT_RGBA = np.array([60, 60, 60, 255], dtype=np.uint8)
class Light(util.ABC):
"""
Base class for all light objects.
Attributes
----------
name : str, optional
Name of the light.
color : (4,) uint8
RGBA value for the light's color in linear space.
intensity : float
Brightness of light. The units that this is defined in depend
on the type of light: point and spot lights use luminous intensity
in candela (lm/sr) while directional lights use illuminance
in lux (lm/m2).
radius : float
Cutoff distance at which light's intensity may be considered to
have reached zero. Supported only for point and spot lights
Must be > 0.0
If None, the radius is assumed to be infinite.
"""
def __init__(self,
name=None,
color=None,
intensity=None,
radius=None):
if name is None:
# if name is not passed, make it something unique
self.name = 'light_{}'.format(util.unique_id(6).upper())
else:
# otherwise assign it
self.name = name
self.color = color
self.intensity = intensity
self.radius = radius
@property
def color(self):
return self._color
@color.setter
def color(self, value):
if value is None:
self._color = _DEFAULT_RGBA
else:
value = visual.to_rgba(value)
if len(value.shape) == 2:
value = value[0]
if value.shape != (4,):
raise ValueError("couldn't convert color to RGBA!")
# uint8 RGB color
self._color = value
@property
def intensity(self):
return self._intensity
@intensity.setter
def intensity(self, value):
if value is not None:
self._intensity = float(value)
else:
self._intensity = 1.0
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
if value is None or value < 0.0:
self._radius = value
else:
self._radius = float(value)
class DirectionalLight(Light):
"""
Directional lights are light sources that act as though they are
infinitely far away and emit light in the direction of the local -z axis.
This light type inherits the orientation of the node that it belongs to;
position and scale are ignored except for their effect on the inherited
node orientation. Because it is at an infinite distance, the light is
not attenuated. Its intensity is defined in lumens per metre squared,
or lux (lm/m2).
Attributes
----------
name : str, optional
Name of the light.
color : (4,) unit8
RGBA value for the light's color in linear space.
intensity : float
Brightness of light. The units that this is defined in depend
on the type of light.
point and spot lights use luminous intensity in candela (lm/sr),
while directional lights use illuminance in lux (lm/m2).
radius : float
Cutoff distance at which light's intensity may be considered to
have reached zero. Supported only for point and spot lights, must be > 0.
If None, the radius is assumed to be infinite.
"""
def __init__(self,
name=None,
color=None,
intensity=None,
radius=None):
super(DirectionalLight, self).__init__(
name=name,
color=color,
intensity=intensity,
radius=radius
)
class PointLight(Light):
"""
Point lights emit light in all directions from their position in space;
rotation and scale are ignored except for their effect on the inherited
node position. The brightness of the light attenuates in a physically
correct manner as distance increases from the light's position (i.e.
brightness goes like the inverse square of the distance). Point light
intensity is defined in candela, which is lumens per square radian (lm/sr).
Attributes
----------
name : str, optional
Name of the light.
color : (4,) uint8
RGBA value for the light's color in linear space.
intensity : float
Brightness of light. The units that this is defined in depend
on the type of light.
point and spot lights use luminous intensity in candela (lm/sr),
while directional lights use illuminance in lux (lm/m2).
radius : float
Cutoff distance at which light's intensity may be considered to
have reached zero. Supported only for point and spot lights, must be > 0.
If None, the radius is assumed to be infinite.
"""
def __init__(self,
name=None,
color=None,
intensity=None,
radius=None):
super(PointLight, self).__init__(
name=name,
color=color,
intensity=intensity,
radius=radius
)
class SpotLight(Light):
"""
Spot lights emit light in a cone in the direction of the local -z axis.
The angle and falloff of the cone is defined using two numbers, the
`innerConeAngle` and `outerConeAngle`. As with point lights, the brightness
also attenuates in a physically correct manner as distance increases from
the light's position (i.e. brightness goes like the inverse square of the
distance). Spot light intensity refers to the brightness inside the
`innerConeAngle` (and at the location of the light) and is defined in
candela, which is lumens per square radian (lm/sr). A spot light's position
and orientation are inherited from its node transform. Inherited scale does
not affect cone shape, and is ignored except for its effect on position
and orientation.
Attributes
----------
name : str, optional
Name of the light.
color : (4,) uint8
RGBA value for the light's color in linear space.
intensity : float
Brightness of light. The units that this is defined in depend
on the type of light.
point and spot lights use luminous intensity in candela (lm/sr),
while directional lights use illuminance in lux (lm/m2).
radius : float
Cutoff distance at which light's intensity may be considered to
have reached zero. Supported only for point and spot lights, must be > 0.
If None, the radius is assumed to be infinite.
innerConeAngle : float
Angle, in radians, from centre of spotlight where falloff begins.
Must be greater than or equal to `0` and less than `outerConeAngle`.
outerConeAngle : float
Angle, in radians, from centre of spotlight where falloff ends.
Must be greater than `innerConeAngle` and less than or equal to `PI / 2.0`.
"""
def __init__(self,
name=None,
color=None,
intensity=None,
radius=None,
innerConeAngle=0.0,
outerConeAngle=np.pi / 4.0):
super(SpotLight, self).__init__(
name=name,
color=color,
intensity=intensity,
radius=radius
)
self.outerConeAngle = outerConeAngle
self.innerConeAngle = innerConeAngle
@property
def innerConeAngle(self):
return self._innerConeAngle
@innerConeAngle.setter
def innerConeAngle(self, value):
if value < 0.0 or value > self.outerConeAngle:
raise ValueError('Invalid value for inner cone angle')
self._innerConeAngle = float(value)
@property
def outerConeAngle(self):
return self._outerConeAngle
@outerConeAngle.setter
def outerConeAngle(self, value):
if value < 0.0 or value > np.pi / 2.0 + 1e-9:
raise ValueError('Invalid value for outer cone angle')
self._outerConeAngle = float(value)
def autolight(scene):
"""
Generate a list of lights for a scene that looks decent.
Parameters
--------------
scene : trimesh.Scene
Scene with geometry
Returns
--------------
lights : [Light]
List of light objects
transforms : (len(lights), 4, 4) float
Transformation matrices for light positions.
"""
# create two default point lights
lights = [PointLight(), PointLight()]
# create two translation matrices for bounds corners
transforms = [transformations.translation_matrix(b)
for b in scene.bounds]
return lights, transforms
|
{
"content_hash": "b6fdcc330a10800a9ce13a707ef77468",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 83,
"avg_line_length": 32.47653429602888,
"alnum_prop": 0.6159404179635394,
"repo_name": "mikedh/trimesh",
"id": "83c90ca3cf76dd6be041873bcd320b9f03dee08a",
"size": "8996",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "trimesh/scene/lighting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "580"
},
{
"name": "JavaScript",
"bytes": "5887"
},
{
"name": "Makefile",
"bytes": "1862"
},
{
"name": "Python",
"bytes": "2142314"
},
{
"name": "Shell",
"bytes": "5161"
}
],
"symlink_target": ""
}
|
import re
import logging
from datetime import datetime
from xml.etree import ElementTree as etree
from unidecode import unidecode
class Result(object):
def __init__(self, xml):
logging.info(xml)
tree = etree.fromstring(bytes(xml))
self.root = tree
@property
def success(self):
return self.root.findall("success")[0].text == "1"
@property
def notifications(self):
return [child.text for child in self.root.findall("notifications/notification")]
def raise_for_status(self):
if not self.success:
raise ResultException(self.notifications)
class ResultException(Exception):
def __init__(self, notifications):
msg = "API exception(s) occurred:\n" + "\n".join(notifications)
super(ResultException, self).__init__(msg)
class Request(object):
def __init__(self, command, **kwargs):
self.request = etree.Element("request")
etree.SubElement(self.request, "command").text = command
for k, v in kwargs.items():
if v is not None:
etree.SubElement(self.request, k).text = v
def get(self):
e = self._get()
logging.info(e)
return e
def _get(self):
return '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' \
+ etree.tostring(self.request, encoding='unicode')
class AuthenticateRequest(Request):
def __init__(self, key, passphrase):
super(AuthenticateRequest, self).__init__("authenticateWithUserAndPass", userName=key, passPhrase=passphrase)
class AuthenticateResult(Result):
@property
def sessionId(self):
return self.root.findall("sessionId")[0].text
class TransactionRequest(Request):
def __init__(self, limit=None, offset=None):
super(TransactionRequest, self).__init__("listTransactions", limit=limit, offset=offset)
self.filters = etree.SubElement(self.request, "filters")
def filterDate(self, date_start=None, date_end=None):
f = etree.SubElement(self.filters, "filter")
if date_start is not None:
etree.SubElement(f, "dateStart").text = date_start.strftime("%Y-%m-%d")
if date_end is not None:
etree.SubElement(f, "dateEnd").text = date_end.strftime("%Y-%m-%d")
class TransactionResult(Result):
@property
def transactions(self):
return [TransactionXML(transaction) for transaction in self.root.findall("transactions/transaction")
if self.is_interesting_transaction(transaction)]
@staticmethod
def is_interesting_transaction(transaction):
regexp = r"T\#\[\d*\]"
return re.search(regexp, transaction.findall("description")[0].text) is not None
class TransactionXML:
def __init__(self, node_or_id, reference="", description=""):
if type(node_or_id) == int:
self.identifier = node_or_id
self.reference = reference
self.description = description
self.date = datetime.now().date()
self.rows = []
self.transactionid = None
else:
self.node = node_or_id
regexp = r"Tantalus ID\:\s*T\#\[(\d*)\]"
self.description = re.sub(regexp, "", self.node.findall("description")[0].text)
self.identifier = int(re.search(regexp, self.node.findall("description")[0].text).group(1))
self.date = datetime.strptime(self.node.findall("date")[0].text, "%Y-%m-%d").date()
self.rows = [TransactionXMLRow(row) for row in self.node.findall("transactionRows")[0]]
self.reference = self.rows[0].reference
self.transactionid = int(self.node.findall("transactionId")[0].text)
def toxml(self):
transaction = etree.Element("transaction")
if self.transactionid is not None:
etree.SubElement(transaction, "transactionId").text = str(self.transactionid)
etree.SubElement(transaction, "description").text = "{}\nTantalus ID: T#[{}]".format(unidecode(self.description), self.identifier)
etree.SubElement(transaction, "date").text = self.date.strftime("%Y-%m-%d")
xmlrows = etree.SubElement(transaction, "transactionRows")
for row in self.rows:
row.toxml("{}".format(self.reference), xmlrows)
return transaction
def __eq__(self, other):
return other.identifier == self.identifier
def int_to_money(x):
s = str(int(x))
if len(s) > 2:
return f"{s[:-2]},{s[-2:]}"
elif len(s) == 2:
return f"0,{s}"
elif len(s) == 1:
return f"0,0{s}"
def money_to_int(s):
if ',' not in s:
return int(s) * 100
if s[-1] == ',':
return int(s[:-1])
if s[-2] == ',':
return int(f"{s[:-2]}{s[-1]}") * 10
if s[-3] == ',':
return int(f"{s[:-3]}{s[-2]}{s[-1]}")
class TransactionXMLRow:
def __init__(self, node=None, amount=0, account=999, credit=True, vatcode="", vat=0):
if node is not None:
self.credit = node.findall("side")[0].text == "credit"
self.account = int(node.findall("accountNr")[0].text)
self.reference = node.findall("reference")[0].text
self.vatCode = node.findall("vatCode")[0].text
self.vatAmount = money_to_int(node.findall("vatAmount")[0].text)
self.amount = money_to_int(node.findall("amount")[0].text) - self.vatAmount
else:
self.amount = amount
self.credit = credit
self.account = account
self.reference = ""
self.vatAmount = vat
self.vatCode = vatcode
def toxml(self, reference, addto):
node = etree.SubElement(addto, "transactionRow")
etree.SubElement(node, "amount").text = int_to_money(self.amount + self.vatAmount)
etree.SubElement(node, "side").text = "credit" if self.credit else "debet"
etree.SubElement(node, "accountNr").text = str(self.account)
etree.SubElement(node, "reference").text = reference
if self.vatCode != "":
etree.SubElement(node, "vatCode").text = self.vatCode
etree.SubElement(node, "vatAmount").text = int_to_money(self.vatAmount)
def __repr__(self):
return "{} to {} {}".format(self.amount, self.account, "Credit" if self.credit else "Debet")
def __str__(self):
return self.__repr__()
class ListAccountsRequest(Request):
def __init__(self, date=None):
super(ListAccountsRequest, self).__init__("listAccounts")
if date is None:
date = datetime.now().date()
etree.SubElement(self.request, "date").text = date.strftime("%Y-%m-%d")
class ListAccountsResult(Result):
@property
def accounts(self):
return [AccountXML(account) for account in self.root.findall("accounts/account")]
class AccountXML:
def __init__(self, node):
self.account = int(node.findall("accountNr")[0].text)
self.name = node.findall("accountName")[0].text
self.result = node.findall("type")[0].text == "result"
def __repr__(self):
return "{} {}[{}]".format("Result" if self.result else "Balance", self.name, self.account)
def __str__(self):
return str(self.account)
def __int__(self):
return self.account
class TransactionPutRequest(Request):
def __init__(self, transaction):
super(TransactionPutRequest, self).__init__("addChangeTransaction")
self.request.extend(transaction.toxml())
class TransactionPutResult(Result):
def __init__(self, data, transaction):
super(TransactionPutResult, self).__init__(data)
if self.success:
transaction.transactionid = int(self.root.findall("transactionId")[0].text)
class ListVatCodesRequest(Request):
def __init__(self, date=None):
super(ListVatCodesRequest, self).__init__("listVatCodes")
if date is None:
date = datetime.now().date()
etree.SubElement(self.request, "date").text = date.strftime("%Y-%m-%d")
class ListVatCodeResult(Result):
def __init__(self, data):
super(ListVatCodeResult, self).__init__(data)
|
{
"content_hash": "d60d269b0c596459ffd28b9a7baaad71",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 138,
"avg_line_length": 34.791489361702126,
"alnum_prop": 0.6103228962818004,
"repo_name": "thijsmie/tantalus",
"id": "54f5abd17a4304cf08f097c57ac0809eab49148a",
"size": "8176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ConscriboPyAPI/conscribo_mapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3241"
},
{
"name": "Dockerfile",
"bytes": "805"
},
{
"name": "HTML",
"bytes": "89630"
},
{
"name": "JavaScript",
"bytes": "21325"
},
{
"name": "Python",
"bytes": "149799"
},
{
"name": "Shell",
"bytes": "363"
}
],
"symlink_target": ""
}
|
""" CircuitDict class """
# pylint: disable=no-member
from chartingperformance import db_session
from chartingperformance.models import Circuits
from flask import jsonify
class CircuitDict(object):
""" Helper query and response methods. Used for circuits endpoint and in ViewUsage class. """
def __init__(self, house_id):
self.circuits = self.get_circuits(house_id)
def get_circuits(self, house_id):
""" Get, store and return list of circuits from database. """
circuits = db_session.query(Circuits). \
filter(Circuits.house_id == house_id).all()
self.json_items = []
for circuit in circuits:
data = {'circuit_id': circuit.circuit_id,
'name': circuit.name,
'description': circuit.description}
self.json_items.append(data)
return circuits
def get_response(self):
""" Return response in json format. """
return jsonify(circuits=self.json_items)
|
{
"content_hash": "1cb5d1c7be3897989b3f116239ac5946",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 97,
"avg_line_length": 31.5,
"alnum_prop": 0.6299603174603174,
"repo_name": "netplusdesign/home-performance-flask-api",
"id": "e20b517823f7fd9297078795c9b44d1183bde916",
"size": "1008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chartingperformance/views/circuit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "98416"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import, print_function
import ctypes
from ctypes.util import find_library
from ctypes import c_void_p, c_char_p, c_uint32, POINTER, c_long
cf_path = find_library('CoreFoundation')
CoreFoundation = ctypes.CDLL(cf_path, use_errno=True)
CFIndex = c_long
CFStringEncoding = c_uint32
CFString = c_void_p
CFArray = c_void_p
CFDictionary = c_void_p
CFError = c_void_p
CFType = c_void_p
CFAllocatorRef = c_void_p
CFStringRef = POINTER(CFString)
CFArrayRef = POINTER(CFArray)
CFDictionaryRef = POINTER(CFDictionary)
CFErrorRef = POINTER(CFError)
CFTypeRef = POINTER(CFType)
CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
CoreFoundation.CFStringGetCString.argtypes = [CFStringRef, c_char_p, CFIndex, CFStringEncoding]
CoreFoundation.CFStringGetCString.restype = ctypes.c_bool
CoreFoundation.CFStringCreateWithCString.argtypes = [CFAllocatorRef, c_char_p, CFStringEncoding]
CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
CoreFoundation.CFArrayGetCount.restype = CFIndex
CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
CoreFoundation.CFArrayGetValueAtIndex.restype = CFTypeRef
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
setattr(CoreFoundation, 'kCFAllocatorDefault', CFAllocatorRef.in_dll(CoreFoundation, 'kCFAllocatorDefault'))
setattr(CoreFoundation, 'CFIndex', CFIndex)
setattr(CoreFoundation, 'CFStringRef', CFStringRef)
setattr(CoreFoundation, 'CFTypeRef', CFTypeRef)
setattr(CoreFoundation, 'CFAllocatorRef', CFAllocatorRef)
setattr(CoreFoundation, 'CFArrayRef', CFArrayRef)
setattr(CoreFoundation, 'CFDictionaryRef', CFDictionaryRef)
setattr(CoreFoundation, 'CFErrorRef', CFErrorRef)
def cfstring_to_unicode(value):
"""
Creates a python unicode string from a CoreFoundation CFStringRef
:param value:
A CFStringRef
:return:
A unicode string
"""
string = CoreFoundation.CFStringGetCStringPtr(
ctypes.cast(value, CFStringRef),
kCFStringEncodingUTF8
)
if string is None:
buf = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
ctypes.cast(value, CFStringRef),
buf,
1024,
kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = buf.value
if string is not None:
string = string.decode('utf-8')
return string
def unicode_to_cfstring(value):
"""
Creates a CoreFoundation CFStringRef from a python unicode string
:param value:
A unicode string
:return:
A CFStringRef
"""
return CoreFoundation.CFStringCreateWithCString(
CoreFoundation.kCFAllocatorDefault,
value.encode('utf-8'),
kCFStringEncodingUTF8
)
|
{
"content_hash": "5abaf11ab23db2dff56100f61a995bca",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 108,
"avg_line_length": 30.19191919191919,
"alnum_prop": 0.7433924389427903,
"repo_name": "alvarolm/GoRename",
"id": "110f92e758ea0d89911b673bef72aa4095b0ff0f",
"size": "3005",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "dep/shellenv/_osx/core_foundation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37094"
}
],
"symlink_target": ""
}
|
import logging
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)
logger = logging.getLogger('spyne.test.interop.server.soap_http_basic')
from spyne.server.wsgi import WsgiApplication
from spyne.test.interop.server._service import services
from spyne.application import Application
from spyne.protocol.soap import Soap12
soap12_application = Application(services, 'spyne.test.interop.server',
in_protocol=Soap12(validator='lxml', cleanup_namespaces=True),
out_protocol=Soap12())
host = '127.0.0.1'
port = 9754
def main():
try:
from wsgiref.simple_server import make_server
from wsgiref.validate import validator
wsgi_application = WsgiApplication(soap12_application)
server = make_server(host, port, validator(wsgi_application))
logger.info('Starting interop server at %s:%s.' % ('0.0.0.0', 9754))
logger.info('WSDL is at: /?wsdl')
server.serve_forever()
except ImportError:
print("Error: example server code requires Python >= 2.5")
if __name__ == '__main__':
main()
|
{
"content_hash": "47cb044b7fab388d12210f25733f922a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 31.72222222222222,
"alnum_prop": 0.6935201401050788,
"repo_name": "deevarvar/myLab",
"id": "27fe6f037ee71d5bef4f7cf523c0ece1fc3fafae",
"size": "1938",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "baidu_code/soap_mockserver/spyne/test/interop/server/soap12/soap_http_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "850"
},
{
"name": "C",
"bytes": "856044"
},
{
"name": "C++",
"bytes": "2988"
},
{
"name": "CSS",
"bytes": "6488"
},
{
"name": "DIGITAL Command Language",
"bytes": "282400"
},
{
"name": "HTML",
"bytes": "119253"
},
{
"name": "JavaScript",
"bytes": "445705"
},
{
"name": "Makefile",
"bytes": "20119"
},
{
"name": "Objective-C",
"bytes": "108"
},
{
"name": "PHP",
"bytes": "2502"
},
{
"name": "Python",
"bytes": "2305843"
},
{
"name": "Roff",
"bytes": "106"
},
{
"name": "Ruby",
"bytes": "478"
},
{
"name": "Shell",
"bytes": "68858"
}
],
"symlink_target": ""
}
|
"""Walkers based on an actuated jumping ball."""
import os
from dm_control import composer
from dm_control import mjcf
from dm_control.locomotion.walkers import legacy_base
import numpy as np
_ASSETS_PATH = os.path.join(os.path.dirname(__file__),
'assets/jumping_ball')
class JumpingBallWithHead(legacy_base.Walker):
"""A rollable and jumpable ball with a head."""
def _build(self, name='walker', marker_rgba=None, camera_control=False,
initializer=None, add_ears=False, camera_height=None):
"""Build a JumpingBallWithHead.
Args:
name: name of the walker.
marker_rgba: RGBA value set to walker.marker_geoms to distinguish between
walkers (in multi-agent setting).
camera_control: If `True`, the walker exposes two additional actuated
degrees of freedom to control the egocentric camera height and tilt.
initializer: (Optional) A `WalkerInitializer` object.
add_ears: a boolean. Same as the nose above but the red/blue balls are
placed to the left/right of the agent. Better for egocentric vision.
camera_height: A float specifying the height of the camera, or `None` if
the camera height should be left as specified in the XML model.
"""
super()._build(initializer=initializer)
self._mjcf_root = self._mjcf_root = mjcf.from_path(self._xml_path)
if name:
self._mjcf_root.model = name
if camera_height is not None:
self._mjcf_root.find('body', 'egocentric_camera').pos[2] = camera_height
if add_ears:
# Large ears
head = self._mjcf_root.find('body', 'head_body')
head.add('site', type='sphere', size=(.26,),
pos=(.22, 0, 0),
rgba=(.7, 0, 0, 1))
head.add('site', type='sphere', size=(.26,),
pos=(-.22, 0, 0),
rgba=(0, 0, .7, 1))
# Set corresponding marker color if specified.
if marker_rgba is not None:
for geom in self.marker_geoms:
geom.set_attributes(rgba=marker_rgba)
self._root_joints = None
self._camera_control = camera_control
if not camera_control:
for name in ('camera_height', 'camera_tilt'):
self._mjcf_root.find('actuator', name).remove()
self._mjcf_root.find('joint', name).remove()
@property
def _xml_path(self):
return os.path.join(_ASSETS_PATH, 'jumping_ball_with_head.xml')
@property
def marker_geoms(self):
return [self._mjcf_root.find('geom', 'head')]
def create_root_joints(self, attachment_frame):
root_class = self._mjcf_root.find('default', 'root')
root_x = attachment_frame.add(
'joint', name='root_x', type='slide', axis=[1, 0, 0], dclass=root_class)
root_y = attachment_frame.add(
'joint', name='root_y', type='slide', axis=[0, 1, 0], dclass=root_class)
root_z = attachment_frame.add(
'joint', name='root_z', type='slide', axis=[0, 0, 1], dclass=root_class)
self._root_joints = [root_x, root_y, root_z]
def set_pose(self, physics, position=None, quaternion=None):
if position is not None:
if self._root_joints is not None:
physics.bind(self._root_joints).qpos = position
else:
super().set_pose(physics, position, quaternion=None)
physics.bind(self._mjcf_root.find_all('joint')).qpos = 0.
if quaternion is not None:
# This walker can only rotate along the z-axis, so we extract only that
# component from the quaternion.
z_angle = np.arctan2(
2 * (quaternion[0] * quaternion[3] + quaternion[1] * quaternion[2]),
1 - 2 * (quaternion[2] ** 2 + quaternion[3] ** 2))
physics.bind(self._mjcf_root.find('joint', 'steer')).qpos = z_angle
def initialize_episode(self, physics, unused_random_state):
# gravity compensation
if self._camera_control:
gravity = np.hstack([physics.model.opt.gravity, [0, 0, 0]])
comp_bodies = physics.bind(self._mjcf_root.find('body',
'egocentric_camera'))
comp_bodies.xfrc_applied = -gravity * comp_bodies.mass[..., None]
@property
def mjcf_model(self):
return self._mjcf_root
@composer.cached_property
def actuators(self):
return self._mjcf_root.find_all('actuator')
@composer.cached_property
def root_body(self):
return self._mjcf_root.find('body', 'head_body')
@composer.cached_property
def end_effectors(self):
return [self._mjcf_root.find('body', 'head_body')]
@composer.cached_property
def observable_joints(self):
return [self._mjcf_root.find('joint', 'kick')]
@composer.cached_property
def egocentric_camera(self):
return self._mjcf_root.find('camera', 'egocentric')
@composer.cached_property
def ground_contact_geoms(self):
return (self._mjcf_root.find('geom', 'shell'),)
class RollingBallWithHead(JumpingBallWithHead):
"""A rollable ball with a head."""
def _build(self, **kwargs):
super()._build(**kwargs)
self._mjcf_root.find('actuator', 'kick').remove()
self._mjcf_root.find('joint', 'kick').remove()
@composer.cached_property
def observable_joints(self):
return []
|
{
"content_hash": "9ad81740f78e86b7b81805a1d5ac83b7",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 36.06993006993007,
"alnum_prop": 0.6392012407910043,
"repo_name": "deepmind/dm_control",
"id": "058ee472ccdcb321d319154b86334b0a325fe51a",
"size": "5824",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dm_control/locomotion/walkers/jumping_ball.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136624"
},
{
"name": "Python",
"bytes": "2097331"
}
],
"symlink_target": ""
}
|
import unittest
from mock import MagicMock
from mock import call
from alvi.client.containers.tree import Tree
from alvi.client.api import Pipe
class TestTree(unittest.TestCase):
def setUp(self):
self.pipe = Pipe("test_scene")
self.pipe.send = MagicMock()
def create_sample_tree(self):
tree = Tree(self.pipe)
root = tree.create_root(0)
node1 = root.children.create(1)
root.children.create(2)
node1.children.create(3)
node4 = node1.children.create(4)
node4.children.create(5)
node4.children.create(6)
return tree
def test_create_root(self):
tree = Tree(self.pipe)
root = tree.create_root(0)
self.assertEquals(tree.root, root)
self.assertEquals(root.value, 0)
self.assertRaises(RuntimeError, tree.create_root, 0)
def test_create_tree(self):
tree = self.create_sample_tree()
self.assertEquals(len(tree.root.children), 2)
node1 = tree.root.children[0]
self.assertEquals(len(node1.children), 2)
self.assertEquals(node1.value, 1)
self.assertEquals(len(tree.root.children[1].children), 0)
expected_calls = [
call('create_node', (0,), {'parent_id': 0, 'value': 0, 'id': 0}),
call('create_node', (1,), {'parent_id': 0, 'value': 1, 'id': 1}),
call('create_node', (2,), {'parent_id': 0, 'value': 2, 'id': 2}),
call('create_node', (3,), {'parent_id': 1, 'value': 3, 'id': 3}),
call('create_node', (4,), {'parent_id': 1, 'value': 4, 'id': 4}),
call('create_node', (5,), {'parent_id': 4, 'value': 5, 'id': 5}),
call('create_node', (6,), {'parent_id': 4, 'value': 6, 'id': 6})
]
self.pipe.send.assert_has_calls(expected_calls)
def test_change_parent(self):
tree = self.create_sample_tree()
node1 = tree.root.children[0]
self.assertEquals(len(node1.children), 2)
node2 = tree.root.children[1]
node4 = tree.root.children[0].children[1]
self.assertEquals(node4.value, 4)
self.assertEquals(node4.id, 4)
self.assertEquals(node4.parent, node1)
node2.children.append(node4)
self.pipe.send.assert_called_with('insert_child', (4,), {'parent_id': 2, 'child_id': 4, 'index': 0})
self.assertEquals(node4.parent, node2)
self.assertEquals(len(node1.children), 1)
self.assertEquals(len(node2.children), 1)
|
{
"content_hash": "386fe9fce511a2a269dea7762e51f780",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 108,
"avg_line_length": 38.90625,
"alnum_prop": 0.5887550200803213,
"repo_name": "alviproject/alvi",
"id": "d5d7d78642b3b2671a81b73c55e56af2965dac79",
"size": "2490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alvi/client/containers/tests/test_tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42568"
},
{
"name": "HTML",
"bytes": "35975"
},
{
"name": "JavaScript",
"bytes": "152425"
},
{
"name": "Python",
"bytes": "108114"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
## =======[ IMPORT & CONSTANTS ]========= ##
import errno
import fnmatch
import os
import pip
import re
import shutil
import sys
import time
import zipfile
__version__ = 2.0
__author__ = 'Cashiuus'
VERBOSE = 1
DEBUG = 0
# ========================[ CORE UTILITY FUNCTIONS ]======================== #
def install_pkg(package):
pip.main(['install', package])
### Imports with exception handling
try: from colorama import init, Fore
except ImportError: install_pkg('colorama')
try: from colorama import init, Fore
except ImportError:
print("[ERROR] Unable to locate or install pip package 'colorama'")
exit(1)
def check_ccleaner():
# If the .ini file does not exist, run the command to create them
os.system('CCleaner.exe /EXPORT')
def create_file(path):
"""
Create a file if it doesn't already exist.
:param path: A full path to the desired file.
:return:
"""
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
try:
file_handle = os.open(path, flags)
except OSError as e:
if e.errno == errno.EEXIST:
# The file already exists
pass
return False
else:
# Something went wrong, troubleshoot error
raise
else:
# No exception, so file was hopefully created.
with os.fdopen(file_handle, 'w') as file_obj:
file_obj.write("### Default Settings for Windows Backup Script\n\n"
"import os\n\n"
"# If any source files are on a USB Drive, specify its letter here.\n"
"# The script checks that it's connected to avoid a conflict if it's\n"
"# not connected. If not using a USB, just leave variable blank.\n"
"USB_DRIVE = ''\n\n"
"BACKUP_PATH = os.path.dirname(os.path.abspath(__file__))\n"
"#BACKUP_PATH = os.path.join(os.path.expanduser('~'), 'Backups', "
"'Windows')\n\n"
"# Designate a file prefix for the output archive. This prefix will\n"
"# prepend a datestamp; e.g. 'Backup-Windows-20160503.zip\n"
"BACKUP_PREFIX = 'Backup-Windows-'\n\n"
"# Populate the empty list below with files you want to backup\n"
"LIST_BACKUP_FILES = [\n"
" # Win Example: r'C:\Windows\System32\drivers\etc\hosts',\n"
"]\n")
return True
def check_python_binary():
# pythonw bug fix to send print() and sys.stdout()
# calls to be ignored to avoid silent fails.
if sys.executable.endswith("pythonw.exe"):
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.path.join(os.getenv("TEMP"), "stderr-" + os.path.basename(sys.argv[0])), "w")
return
def banner():
# TODO: Adjust this to size according to terminal width
line = '=' * 80
try:
init()
border = Fore.GREEN + "===============================================================================================" + Fore.RESET
# ASCII Art Generator: http://patorjk.com/software/taag/#p=display&f=Graffiti&t=Type%20Something%20
banner_msg = Fore.WHITE + """
__ __.__ .___ __________ __
/ \ / \__| ____ __| _/______ _ ________ \______ \_____ ____ | | ____ ________
\ \/\/ / |/ \ / __ |/ _ \ \/ \/ / ___/ | | _/\__ \ _/ ___\| |/ / | \____ \\
\ /| | | \/ /_/ ( <_> ) /\___ \ | | \ / __ \ \___| <| | / |_> >
\__/\ / |__|___| /\____ |\____/ \/\_//____ > |______ /(____ /\___ >__|_ \____/| __/
\/ \/ \/ \/ \/ \/ \/ \/ |__|
v {0}\n""".format(__version__)
except ImportError:
border = line
banner_msg = "Windows Backup Assist -- You should 'pip install colorama' for some flair!"
banner_msg += "\t\t\t\tv {0}".format(__version__)
return border + banner_msg + border
class ProgressBar(object):
"""
A progress bar framework for use in file copying operations
"""
def __init__(self, message, width=20, progressSymbol=u'\u00bb ', emptySymbol=u'\u002e '):
self.width = width
if self.width < 0:
self.width = 0
self.message = message
self.progressSymbol = progressSymbol
self.emptySymbol = emptySymbol
def update(self, progress):
totalBlocks = self.width
filledBlocks = int(round(progress / (100 / float(totalBlocks)) ))
emptyBlocks = totalBlocks - filledBlocks
progressbar = Fore.CYAN + self.progressSymbol * filledBlocks + self.emptySymbol * emptyBlocks
if not self.message:
self.message = u''
progressMessage = u'\r{0} {1} {2}{3}%'.format(self.message, progressbar, Fore.RESET, progress)
sys.stdout.write(progressMessage)
sys.stdout.flush()
def calculate_update(self, done, total):
progress = int(round( (done / float(total)) * 100) )
self.update(progress)
def count_files(files):
return len(files)
def create_input_list(input_list):
"""
Receive an input list and clean up files and directories to build a clean list of files w/o any directory entries.
This will serve to be a cleaned input file list for the backup zip file, which doesn't easily compress entire directories.
:param input_list:
:return:
"""
# Enumerate the input file list and build a proper input list of files
verified_list = []
# Transform excludes glob patterns to regular expressions
excludes = r'|'.join([fnmatch.translate(x) for x in LIST_EXCLUDES]) or r'$.'
for item in input_list:
# Check if input 'file' is a directory or file
if os.path.isdir(item):
for root, dirs, filenames in os.walk(item):
# Exclude dirs we don't want before processing the walk
dirs[:] = [os.path.join(root, d) for d in dirs]
dirs[:] = [d for d in dirs if not re.match(excludes, d)]
# exclude from files iter
filenames = [os.path.join(root, f) for f in filenames]
filenames = [f for f in filenames if not re.match(excludes, f)]
#filenames = [f for f in filenames if re.match(includes, f)]
for f in filenames:
verified_list.append(os.path.join(root, f))
else:
verified_list.append(item)
if DEBUG:
print(Fore.YELLOW + " [DEBUG :: create_input_list] " + Fore.RESET)
print(verified_list)
print("")
return verified_list
def backup_to_zip(files, dest):
"""
This function will receive a list of files to backup
and will copy the files to the pre-defined backup path
Usage: backup_to_zip(<list of files>, <backup destination folder path>)
"""
# Check for removable device (defined in defaults.py or settings.py)
if not os.path.exists(USB_DRIVE):
if VERBOSE:
print(Fore.RED + "[WARN]" + Fore.RESET + " USB Drive is not currently connected. Files will be skipped...")
# Build the archive's resulting file name for the backup
zip_name = BACKUP_PATH + os.sep + time.strftime('%Y%m%d') + '.zip'
z = zipfile.ZipFile(zip_name, 'w')
for file in files:
# Filter out any patterns we want to skip
if os.path.basename(file).startswith('~'):
continue
# Begin prepping and writing to the archive
DST_FILE = os.path.join(dest, os.path.basename(file))
if VERBOSE:
print(Fore.GREEN + "[*]" + Fore.RESET + " Copying file: {}".format(str(file)))
try:
# Copy file; will fail if file is open or locked
#shutil.copy2(file, DST_FILE)
z.write(file)
if DEBUG:
print(Fore.YELLOW + " [DEBUG : backup_to_zip]" + Fore.RESET + " Copied: {}".format(str(file)))
except Exception as e:
if VERBOSE or DEBUG:
print(Fore.RED + "[ERROR]" + Fore.RESET + " Error copying file: ", e)
pass
# Close the zip file when done
z.close()
return
def copy_files_with_progress(files, dst):
"""
Take a list of files and copies them to a specified destination,
while showing a progress bar for longer copy operations.
Usage: copy_files_with_progress(<list of files>, <backup destination path>)
"""
numfiles = count_files(files)
numcopied = 0
copy_error = []
if numfiles > 0:
for file in files:
destfile = os.path.join(dst, os.path.basename(file))
try:
shutil.copy2(file, destfile)
numcopied += 1
if DEBUG:
print(" [DEBUG :: copy_files_with_progress] Copied: {}".format(str(file)))
except:
copy_error.append(file)
files.remove(file)
numfiles -= 1
if DEBUG:
print(" [DEBUG :: copy_files_with_progress] Copy failed exception, file: {}".format(str(file)))
p.calculate_update(numcopied, numfiles)
print("\n")
for f in copy_error:
print("# ----[ Error copying file: {}".format(f))
# Return the list, which may have removed files that were missing
# This way, the next function to zip all files won't include them
return files
def prune_old_backups(search_path, archive_pattern, keep_archives=10):
"""
Parse the backups directory and remove certain archives to keep
size consumed down.
"""
matches = []
for root, dirs, filenames in os.walk(search_path):
for f in filenames:
if f.endswith(('.zip')):
matches.append(os.path.join(root, f))
# TODO: Process this list of identified backup archives to
# 1) identify valid ones based on our naming variable, and
# 2) identify the 10 most recent (possibly exclude archives with identical hashes), and delete the rest.
return matches
if __name__ == '__main__':
# See if we are running this with python.exe or pythonw.exe
check_python_binary()
print(banner())
# Import settings.py that we don't want stored in version control
try:
from settings import *
# Copy the list of files to destination
p = ProgressBar('# ----[ Beging Backup & Copy Procedures ]----')
my_files = copy_files_with_progress(LIST_COPY_FILES, BACKUP_PATH)
# Backup our larger list of input files to compressed archive
my_file_list = create_input_list(LIST_BACKUP_FILES)
backup_to_zip(my_file_list, BACKUP_PATH)
except ImportError:
# First time running script or for some reason settings.py doesn't exist
# Create a fresh settings file with some defaults
create_file('settings.py')
print("\n [FIRST-RUN] A default 'settings.py' file has been created for you.")
print(" [FIRST-RUN] Please update this file with a list of files and backup output path.")
print(" [FIRST-RUN] Once 'settings.py' has been updated, run this script once more. "
"Exiting...\n\n")
|
{
"content_hash": "5cd25d4852eda148933a83176678c46c",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 140,
"avg_line_length": 37.848684210526315,
"alnum_prop": 0.5548409525464975,
"repo_name": "Cashiuus/penprep",
"id": "82d0d93d912d4a4214ea5c834f788f2feec1e5f2",
"size": "13574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system-setup/windows/backup-windows/backup-files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "5990"
},
{
"name": "Python",
"bytes": "51388"
},
{
"name": "Shell",
"bytes": "755384"
}
],
"symlink_target": ""
}
|
"""Example: A Simple class to get & read FaceDetected Events"""
import argparse
import sys
import time
import qi
IP_ADDR = "192.168.0.107"
PORT = 9559
class HumanGreeter(object):
"""
A simple class to react to face detection events.
"""
def __init__(self, app):
"""
Initialisation of qi framework and event detection.
"""
super(HumanGreeter, self).__init__()
app.start()
session = app.session
# Get the service ALMemory.
self.memory = session.service("ALMemory")
self.speech_to_text = session.service("ALSpeechRecognition")
self.tts = session.service("ALTextToSpeech")
self.face_detection = session.service("ALFaceDetection")
# init asr_service
self.speech_to_text.pause(True)
self.speech_to_text.setLanguage("English")
self.vocab = ["pranshu", "rolly", "esin"]
self.speech_to_text.setVocabulary(self.vocab, False)
# Add subscription for face detection
self.face_subscriber = self.memory.subscriber("FaceDetected")
self.face_subscriber.signal.connect(self.on_human_tracked)
self.got_face = False
self.last_face_detected = 0
# Add subscription for speech detection
self.speech_subscriber = self.memory.subscriber("WordRecognized")
self.speech_subscriber.signal.connect(self.on_word_recognized)
self.got_word = False
self.last_word_detected = 0
# Subscribe to the face_detection
self.face_detection.subscribe("HumanGreeter")
self.speech_to_text.setAudioExpression(True)
self.speech_to_text.pause(False)
def on_word_recognized(self, value):
print value
if value[1] > 0.35:
self.speech_to_text.pause(True)
self.tts.say("Hi {}".format(value[0]))
self.speech_to_text.pause(False)
def on_human_tracked(self, value):
"""
Callback for event FaceDetected.
"""
if value == []: # empty value when the face disappears
self.got_face = False
elif not self.got_face and (time.time() - self.last_face_detected >= 10): # only speak the first time a face appears
self.got_face = True
print "I saw a face!"
self.tts.say("Hello! what is your name?")
self.last_face_detected = time.time()
def run(self):
"""
Loop on, wait for events until manual interruption.
"""
print "Starting HumanGreeter"
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Interrupted by user, stopping HumanGreeter"
self.face_detection.unsubscribe("HumanGreeter")
#stop
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default=IP_ADDR,
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=PORT,
help="Naoqi port number")
args = parser.parse_args()
try:
# Initialize qi framework.
connection_url = "tcp://" + args.ip + ":" + str(args.port)
app = qi.Application(["HumanGreeter", "--qi-url=" + connection_url])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
human_greeter = HumanGreeter(app)
human_greeter.run()
|
{
"content_hash": "468c7a5312a0b218f54eaf036d28567f",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 125,
"avg_line_length": 34.271028037383175,
"alnum_prop": 0.591764385055904,
"repo_name": "pban1993/naoqi_robot",
"id": "0be4a3d09808305b681f4fabb9edf6942a918ea1",
"size": "3717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "facerecognition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3973"
}
],
"symlink_target": ""
}
|
from .. import hook, bar
from . import base
class WindowName(base._TextBox):
"""
Displays the name of the window that currently has focus.
"""
def __init__(self, width=bar.STRETCH, **config):
base._TextBox.__init__(self, width=width, **config)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
hook.subscribe.window_name_change(self.update)
hook.subscribe.focus_change(self.update)
hook.subscribe.float_change(self.update)
def update(self):
w = self.bar.screen.group.currentWindow
state = ''
if w is None:
pass
elif w.maximized:
state = '[] '
elif w.minimized:
state = '_ '
elif w.floating:
state = 'V '
self.text = "%s%s" % (state, w.name if w and w.name else " ")
self.bar.draw()
|
{
"content_hash": "369f74f5e2d6b997e47ae1e11897ad51",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 69,
"avg_line_length": 29.766666666666666,
"alnum_prop": 0.5599104143337066,
"repo_name": "kiniou/qtile",
"id": "c690d54ab6fc854b70c23a7014e34ef805a26d04",
"size": "2187",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libqtile/widget/windowname.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3598"
},
{
"name": "Makefile",
"bytes": "1110"
},
{
"name": "Python",
"bytes": "811761"
},
{
"name": "Shell",
"bytes": "2645"
}
],
"symlink_target": ""
}
|
import itertools as it
from computation.split_and_merge.util.node_border_iterator import iterate_node_border
class MergeGroup(object):
"""
Class that collects a group of homogenous nodes. Groups that are divided by the periodic border can be
organized in subgroups.
The object is intended to be used in two phases:
1. Collecting nodes: All nodes are collected in one subgroup. Multiple subgroups are not allowed in this phase.
2. Merging with other MergeGroup objects: Two objects can be merged resulting in one object with separate
subgroups. When merging has been started, no additional nodes can be added.
"""
class SharedAttributes(object):
"""
Instances of this class can be used to store key value pairs (like in dicts). One instance can be shared between
multiple MergeGroup instances so an attribute update will automtically be published to all merge groups that
hold a reference to the same shared attributes object.
"""
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
def __contains__(self, item):
return item in self.__dict__
def __init__(self, initial_node=None):
self._shared_attributes = MergeGroup.SharedAttributes(
subgroups=[],
translation_vectors=[], # offset that can be applied to the i-th subgroup to merge it with the next
# subgroup in the list
all_merge_groups=[self],
is_cyclic=False
)
self._index_of_primary_subgroup = None # Saves the index of the first subgroup in the subgroups list that has
# been assigned to this MergeGroup object
if initial_node is not None:
self.add(set([initial_node]))
def add(self, nodes):
"""
Adds nodes from another MergeGroup or a single node.
"""
if len(self._subgroups) > 1:
raise AddingNodesNotAllowedError('In the merge phase adding nodes is not longer possible.')
if isinstance(nodes, type(self)):
# only the nodes of the second merge group are collected. That is NOT a merging operation!
self.add(list(nodes))
else:
if not isinstance(nodes, (set, list)):
nodes = [nodes]
if len(self._subgroups) == 0:
self._subgroups.append(set())
self._index_of_primary_subgroup = 0
self._subgroups[0].update(nodes)
def merge(self, merge_group, translation_vector):
"""
Merges two MergeGroups by adding the _subgroups of the other merge_group respectively. Therefore, both
MergeGroups are changed.
It is supposed that the given translation_vector can be applied on the primary subgroup of self to merge it with
the primary subgroup of merge_group.
"""
def subtract_vector_list(translation_vector, *vector_lists):
for vector in it.chain(*vector_lists):
translation_vector = tuple(tc - vc for tc, vc in zip(translation_vector, vector))
return translation_vector
subgroup_count_before_merge = len(self._subgroups)
self._subgroups.extend(merge_group._subgroups)
subtraction_vectors = self._translation_vectors[self._index_of_primary_subgroup:]
subtraction_vectors.extend(merge_group._translation_vectors[:merge_group._index_of_primary_subgroup])
self._translation_vectors.append(subtract_vector_list(translation_vector, subtraction_vectors))
self._translation_vectors.extend(merge_group._translation_vectors)
self._all_merge_groups.extend(merge_group._all_merge_groups)
merge_group._update_all_merge_groups(self._shared_attributes, subgroup_count_before_merge)
def set_cyclic(self):
self._shared_attributes.is_cyclic = True
def _update_all_merge_groups(self, shared_attributes, index_shift):
for merge_group in self._all_merge_groups[:]:
merge_group._shared_attributes = shared_attributes
merge_group._index_of_primary_subgroup += index_shift
def __getattr__(self, attr):
if attr.startswith('_'):
if hasattr(self, '_shared_attributes') and attr[1:] in self._shared_attributes:
return getattr(self._shared_attributes, attr[1:])
return super(MergeGroup, self).__getattribute__(attr)
def __contains__(self, node):
return any(node in subgroup for subgroup in self._subgroups)
def __len__(self):
return sum(len(subgroup) for subgroup in self._subgroups)
def __delitem__(self, item):
for subgroup in self._subgroups:
if item in subgroup:
del subgroup[item]
def __iter__(self):
"""
Iterator for the nodes of all subgroups 'as is' -> translation is not applied.
"""
return it.chain(*(subgroup for subgroup in self._subgroups))
def iter_with_applied_translation(self, iter_with_non_translated_nodes=False,
keep_largest_volume_within_cell=False):
"""
Iterator for the nodes of all subgroups with applied translation -> the result is a continuous volume
"""
def create_node_generator(subgroup, translation_vector):
# generator expression must be created in a nested function to avoid late variable binding problems with
# "combined_translation_vector"; without a nested function all generator expressions would get the last
# value that was assigned to "combined_translation_vector" during the loop iterations in the outer function
return ((tuple(nc - tc for nc, tc in zip(pos, translation_vector)), dim)
for pos, dim in subgroup)
def get_generators_from_start_index(index, reverse=False):
generators = []
if not reverse:
factor = 1
subgroups = self._subgroups[index:]
translation_vectors = self._translation_vectors[index:]
combined_translation_vector = (0, 0, 0)
else:
factor = -1
subgroups = self._subgroups[index-1::-1] if index > 0 else []
translation_vectors = self._translation_vectors[index-2::-1] if index > 1 else []
combined_translation_vector = (tuple(-c for c in self._translation_vectors[index-1])
if index > 0 else (0, 0, 0))
iterator = it.izip_longest(subgroups, translation_vectors)
for subgroup, translation_vector in iterator:
generator = create_node_generator(subgroup, combined_translation_vector)
if iter_with_non_translated_nodes:
generators.append(it.izip(subgroup, generator))
else:
generators.append(generator)
if translation_vector is not None:
combined_translation_vector = tuple(cc + factor * tc for cc, tc in zip(combined_translation_vector,
translation_vector))
return generators
if keep_largest_volume_within_cell:
index_of_non_translated_subgroup = max(range(len(self._subgroups)), key=lambda i: len(self._subgroups[i]))
else:
index_of_non_translated_subgroup = self._index_of_primary_subgroup
generators = []
backwards_generators = get_generators_from_start_index(index_of_non_translated_subgroup, reverse=True)
generators.extend(list(reversed(backwards_generators)))
forwards_generators = get_generators_from_start_index(index_of_non_translated_subgroup)
generators.extend(forwards_generators)
return it.chain(*generators)
@property
def is_cyclic(self):
return self._is_cyclic
class Graph(object):
def __init__(self):
self.nodes = {}
def add_node(self, node, neighbors=None):
if neighbors is None:
neighbors = set()
else:
neighbors = set(neighbors)
if node in self.nodes:
self.remove_node(node)
self.nodes[node] = set()
self.add_neighbors(node, neighbors)
def add_neighbors(self, node, neighbors):
for neighbor in neighbors:
if neighbor in self.nodes:
self.nodes[neighbor].add(node)
else:
self.add_node(neighbor, [node])
self.nodes[node] |= set(neighbors)
def remove_node(self, node):
neighbors = self.nodes[node]
self.remove_neighbors(node, self.nodes[node])
del self.nodes[node]
return neighbors
def remove_neighbors(self, node, neighbors):
for neighbor in neighbors:
self.nodes[neighbor] -= set([node])
self.nodes[node] -= set(neighbors)
return self.nodes[node]
def get_neighbors(self, node):
return self.nodes[node]
def __contains__(self, node):
return node in self.nodes
def __len__(self):
return len(self.nodes)
def __iter__(self):
return self.nodes.__iter__()
def iterkeys(self):
return self.nodes.iterkeys()
def iteritems(self):
return self.nodes.iteritems()
def itervalues(self):
return self.nodes.itervalues()
class GraphForSplitAndMerge(Graph):
'''
Nodes must be tuples of the format ((x, y, z), (width, height, depth)).
Graph class which starts with just one initial node. That node can be split
multiple times at a specified split point (resulting in max 8 sub nodes).
Neighboring relationships of the sub nodes are determined automatically.
After the split phase, boundary nodes can be detected. If periodic boundary
conditions should be considered, the neighboring relationships can be updated
manually with the found boundary nodes.
Afterwards, single nodes can be merged together. Internally, they stay as
single nodes and merges are logged with other data structures (sets).
The split method can only be used until merge is called for the first time!
'''
def __init__(self, data, mask, get_translation_vector, is_relevant_part, initial_node=None):
Graph.__init__(self)
self.data = data
self.mask = mask
self.get_translation_vector = get_translation_vector
self.is_relevant_part = is_relevant_part
self.merged_nodes = {}
self.border_nodes = None
self.border_node_translation_vectors = None
self.border_node_pair_translations = None
self.split_allowed = True
self.adding_non_periodic_neighbors_allowed = True
self.merging_non_periodic_neighbors_allowed = True
self.initial_node_set = False
if initial_node is not None:
self.set_initial_node(initial_node)
def set_initial_node(self, initial_node):
if not self.initial_node_set:
self.add_node(initial_node, set())
self.initial_node_set = True
else:
raise InitialNodeAlreadySetError
def add_node(self, node, potential_neighbors=None):
neighbors = self.__find_neighbors(node, potential_neighbors)
Graph.add_node(self, node, neighbors)
# The new node must be merged with itself because later other nodes could share the set of merged nodes:
self.merged_nodes[node] = MergeGroup(node)
def add_neighbors(self, node, neighbors, translation_vectors=None):
periodic_border_relationship = (translation_vectors is not None)
if not self.adding_non_periodic_neighbors_allowed:
if node not in self.nodes:
raise AddingNodesNotAllowedError
elif not periodic_border_relationship:
raise AddingNonPeriodicNeighborsNotAllowedError
Graph.add_neighbors(self, node, neighbors)
if periodic_border_relationship:
self.adding_non_periodic_neighbors_allowed = False
if self.border_nodes is None:
self.__mark_border_nodes()
self.border_nodes[node] |= set(neighbors)
for neighbor, translation_vector in zip(neighbors, translation_vectors):
self.border_node_pair_translations[(neighbor, node)] = translation_vector
self.border_node_pair_translations[(node, neighbor)] = tuple(-c for c in translation_vector)
def remove_node(self, node):
Graph.remove_node(self, node)
del self.merged_nodes[node]
def split_node(self, node, split_point_rel):
'''
split_point_rel is that point relative to the left top corner of the node that contains the first inhomogeneity.
It is implicated that the node data is stored in C order. Therefore, it is possible to split the node into 4
homogeneous and 4 potential inhomogeneous sub nodes to speed which causes a great speedup of the whole
algorithm.
'''
if self.split_allowed:
self.border_nodes = None
self.border_node_translation_vectors = None
self.border_node_pair_translations = None
x, y, z = node[0]
w, h, d = node[1]
x_inh, y_inh, z_inh = split_point_rel
potential_new_homogen_nodes = (((x, y, z ), (x_inh+1, y_inh+1, z_inh)),
((x, y, z+z_inh), (x_inh+1, y_inh, d-z_inh)),
((x, y+y_inh+1, z ), (x_inh, h-y_inh-1, z_inh)),
((x, y+y_inh, z+z_inh), (x_inh, h-y_inh, d-z_inh)))
potential_new_inhomogen_nodes = (((x+x_inh+1, y, z ), (w-x_inh-1, y_inh+1, z_inh)),
((x+x_inh+1, y, z+z_inh), (w-x_inh-1, y_inh, d-z_inh)),
((x+x_inh, y+y_inh+1, z ), (w-x_inh, h-y_inh-1, z_inh)),
((x+x_inh, y+y_inh, z+z_inh), (w-x_inh, h-y_inh, d-z_inh)))
def get_relevant_nodes(potential_nodes, hom_nodes=False):
new_nodes = []
for n in potential_nodes:
x, y, z = n[0]
w, h, d = n[1]
if w > 0 and h > 0 and d > 0:
# a => b
if not hom_nodes or (self.is_relevant_part(self.data[x:x+w, y:y+h, z:z+d]) and
not bool(self.mask[x, y, z])):
new_nodes.append(n)
return new_nodes
new_homogen_nodes = get_relevant_nodes(potential_new_homogen_nodes, hom_nodes=True)
new_inhomogen_nodes = get_relevant_nodes(potential_new_inhomogen_nodes)
all_new_nodes = set(new_homogen_nodes) | set(new_inhomogen_nodes)
potential_neighbors = self.get_neighbors(node) | all_new_nodes
self.remove_node(node)
for n in all_new_nodes:
self.add_node(n, potential_neighbors - set([n]))
return new_inhomogen_nodes
else:
raise SplitNotAllowedError
def get_border_nodes(self):
if self.border_nodes is None:
self.__mark_border_nodes()
return self.border_nodes.keys()
def get_border_node_translation_vectors(self):
if self.border_node_translation_vectors is None:
self.__mark_border_nodes()
return self.border_node_translation_vectors
def __mark_border_nodes(self):
translation_vectors = None
def func(border_x, border_y, border_z):
if bool(self.mask[border_x, border_y, border_z]):
translation_vectors.add(tuple(self.get_translation_vector((border_x, border_y, border_z))))
self.border_nodes = {}
self.border_node_translation_vectors = {}
self.border_node_pair_translations = {}
for node in self:
node_x, node_y, node_z = node[0]
node_w, node_h, node_d = node[1]
for x, y, z in it.product((node_x-1, node_x+node_w), (node_y-1, node_y+node_h), (node_z-1, node_z+node_d)):
if(bool(self.mask[x, y, z])):
translation_vectors = set()
iterate_node_border(node, func)
self.border_nodes[node] = set()
self.border_node_translation_vectors[node] = translation_vectors
break
def forbid_splitting(self):
self.split_allowed = False
def merge_nodes(self, node1, node2):
'''
Only nodes can be merged that are neighboring.
'''
# Neighbors?
if node2 not in self.nodes[node1]:
raise NotNeighboringError
self.forbid_splitting()
if not self.is_merged(node1, node2, detect_cyclic_merge=True):
separated_by_periodic_boundary_condition = (self.border_nodes is not None and
node1 in self.border_nodes and
node2 in self.border_nodes[node1])
if not separated_by_periodic_boundary_condition:
if not self.merging_non_periodic_neighbors_allowed:
raise MergingNonBorderNodesNotAllowedError
# Record all merged nodes of node2 as merged nodes of node1 (node2 is already included)
self.merged_nodes[node1].add(self.merged_nodes[node2])
# Since the merged nodes of node1 have a reference to self.merged_nodes[node1], they have been already
# updated. So, it is only necessary to update the merged_nodes of node2 (which contain node2 already).
for merged_node in self.merged_nodes[node2]:
self.merged_nodes[merged_node] = self.merged_nodes[node1]
else:
self.merging_non_periodic_neighbors_allowed = False
self.merged_nodes[node1].merge(self.merged_nodes[node2], self.border_node_pair_translations[(node1,
node2)])
return node2 in self.merged_nodes[node1]
def is_merged(self, node1, node2, detect_cyclic_merge=False):
# If node2 is not merged with node1 than node1 is not merged with node2, neither.
is_merged = node2 in self.merged_nodes[node1]
if is_merged and detect_cyclic_merge and self.merged_nodes[node1] is self.merged_nodes[node2]:
self.merged_nodes[node1].set_cyclic()
return is_merged
def get_all_areas(self, apply_translation=False, with_non_translated_nodes=False, mark_cyclic_areas=False):
'''
Returns a list of sets, each describing a merged area of nodes. If "apply_translation" is set to True,
all subgroups of an area are translated with respect to the periodic border condition. As a result, all areas
are contiguous in space. If "with_non_translated_nodes" is set additionally, a second list is returned that
contains the same areas without applied translations. If "mark_cyclic_areas" is set to True, the function has a
further list as second/third return value indicating which areas (given by index) are cyclic (have infinite
extent).
'''
if self.split_allowed:
return []
areas = []
# Saves non translated areas if "apply_translation" and "with_non_translated_nodes" are set to true
alt_areas = [] if with_non_translated_nodes else None
cyclic_areas = [] if mark_cyclic_areas else None
visited_nodes = set()
for node in self:
if node not in visited_nodes:
area = set()
if apply_translation:
node_iterator = self.merged_nodes[node].iter_with_applied_translation(
iter_with_non_translated_nodes=True, keep_largest_volume_within_cell=True
)
if with_non_translated_nodes:
alt_area = set()
for merged_node, translated_node in node_iterator:
area.add(translated_node)
alt_area.add(merged_node)
visited_nodes.add(merged_node)
alt_areas.append(alt_area)
else:
for merged_node, translated_node in node_iterator:
area.add(translated_node)
visited_nodes.add(merged_node)
else:
node_iterator = self.merged_nodes[node]
for merged_node in node_iterator:
area.add(merged_node)
visited_nodes.add(merged_node)
areas.append(area)
if mark_cyclic_areas and self.merged_nodes[node].is_cyclic:
cyclic_areas.append(len(areas) - 1)
if apply_translation and with_non_translated_nodes and mark_cyclic_areas:
return areas, alt_areas, cyclic_areas
elif mark_cyclic_areas:
return areas, cyclic_areas
else:
return areas
def __find_neighbors(self, node, potential_neighbors):
x_, y_, z_ = node[0]
w_, h_, d_ = node[1]
neighbors = set()
if potential_neighbors is None:
potential_neighbors = self.nodes.keys()
for n in potential_neighbors:
x, y, z = n[0]
w, h, d = n[1]
if x-w_ <= x_ <= x+w and y-h_ <= y_ <= y+h and z-d_ <= z_ <= z+d:
neighbors.add(n)
return neighbors
def iter_border_items(self):
self.get_border_nodes() # ensure that self.border_nodes is set correctly
return self.border_nodes.iteritems()
class NotNeighboringError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class InitialNodeAlreadySetError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class SplitNotAllowedError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class AddingNodesNotAllowedError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class AddingNonPeriodicNeighborsNotAllowedError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class MergingNonBorderNodesNotAllowedError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
|
{
"content_hash": "9584819f5ce94eeeb500be36d888e255",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 120,
"avg_line_length": 44.553639846743295,
"alnum_prop": 0.5854581416347766,
"repo_name": "sciapp/pyMolDyn",
"id": "e1aa621cd032cacb09c9e8e63f1192e55109449c",
"size": "23257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/computation/split_and_merge/util/graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "1998"
},
{
"name": "C",
"bytes": "32038"
},
{
"name": "CSS",
"bytes": "911"
},
{
"name": "HTML",
"bytes": "21678"
},
{
"name": "Makefile",
"bytes": "7526"
},
{
"name": "Python",
"bytes": "664457"
},
{
"name": "Ruby",
"bytes": "516"
},
{
"name": "Shell",
"bytes": "21156"
}
],
"symlink_target": ""
}
|
import tests.periodicities.period_test as per
per.buildModel((360 , 'T' , 50));
|
{
"content_hash": "1cfd5474ca732f4d8026c0531e2479e4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 20.5,
"alnum_prop": 0.7073170731707317,
"repo_name": "antoinecarme/pyaf",
"id": "beb916976c6c3f7575fb9970e8713d1fad8b5685",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/periodicities/Minute/Cycle_Minute_50_T_360.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""Automated tests (as opposed to human-verified test patterns)
It was tempting to mock out curses to get predictable output from ``tigetstr``,
but there are concrete integration-testing benefits in not doing so. For
instance, ``tigetstr`` changed its return type in Python 3.2.3. So instead, we
simply create all our test ``Terminal`` instances with a known terminal type.
All we require from the host machine is that a standard terminfo definition of
xterm-256color exists.
"""
from __future__ import with_statement # Make 2.5-compatible
from curses import tigetstr, tparm
from functools import partial
from StringIO import StringIO
import sys
from nose import SkipTest
from nose.tools import eq_
# This tests that __all__ is correct, since we use below everything that should
# be imported:
from blessings import *
TestTerminal = partial(Terminal, kind='xterm-256color')
def unicode_cap(cap):
"""Return the result of ``tigetstr`` except as Unicode."""
return tigetstr(cap).decode('utf-8')
def unicode_parm(cap, *parms):
"""Return the result of ``tparm(tigetstr())`` except as Unicode."""
return tparm(tigetstr(cap), *parms).decode('utf-8')
def test_capability():
"""Check that a capability lookup works.
Also test that Terminal grabs a reasonable default stream. This test
assumes it will be run from a tty.
"""
t = TestTerminal()
sc = unicode_cap('sc')
eq_(t.save, sc)
eq_(t.save, sc) # Make sure caching doesn't screw it up.
def test_capability_without_tty():
"""Assert capability templates are '' when stream is not a tty."""
t = TestTerminal(stream=StringIO())
eq_(t.save, u'')
eq_(t.red, u'')
def test_capability_with_forced_tty():
"""If we force styling, capabilities had better not (generally) be empty."""
t = TestTerminal(stream=StringIO(), force_styling=True)
eq_(t.save, unicode_cap('sc'))
def test_parametrization():
"""Test parametrizing a capability."""
eq_(TestTerminal().cup(3, 4), unicode_parm('cup', 3, 4))
def height_and_width():
"""Assert that ``height_and_width()`` returns ints."""
t = TestTerminal() # kind shouldn't matter.
assert isinstance(int, t.height)
assert isinstance(int, t.width)
def test_stream_attr():
"""Make sure Terminal exposes a ``stream`` attribute that defaults to something sane."""
eq_(Terminal().stream, sys.__stdout__)
def test_location():
"""Make sure ``location()`` does what it claims."""
t = TestTerminal(stream=StringIO(), force_styling=True)
with t.location(3, 4):
t.stream.write(u'hi')
eq_(t.stream.getvalue(), unicode_cap('sc') +
unicode_parm('cup', 4, 3) +
u'hi' +
unicode_cap('rc'))
def test_horizontal_location():
"""Make sure we can move the cursor horizontally without changing rows."""
t = TestTerminal(stream=StringIO(), force_styling=True)
with t.location(x=5):
pass
eq_(t.stream.getvalue(), unicode_cap('sc') +
unicode_parm('hpa', 5) +
unicode_cap('rc'))
def test_null_location():
"""Make sure ``location()`` with no args just does position restoration."""
t = TestTerminal(stream=StringIO(), force_styling=True)
with t.location():
pass
eq_(t.stream.getvalue(), unicode_cap('sc') +
unicode_cap('rc'))
def test_zero_location():
"""Make sure ``location()`` pays attention to 0-valued args."""
t = TestTerminal(stream=StringIO(), force_styling=True)
with t.location(0, 0):
pass
eq_(t.stream.getvalue(), unicode_cap('sc') +
unicode_parm('cup', 0, 0) +
unicode_cap('rc'))
def test_null_fileno():
"""Make sure ``Terminal`` works when ``fileno`` is ``None``.
This simulates piping output to another program.
"""
out = StringIO()
out.fileno = None
t = TestTerminal(stream=out)
eq_(t.save, u'')
def test_mnemonic_colors():
"""Make sure color shortcuts work."""
def color(num):
return unicode_parm('setaf', num)
def on_color(num):
return unicode_parm('setab', num)
# Avoid testing red, blue, yellow, and cyan, since they might someday
# change depending on terminal type.
t = TestTerminal()
eq_(t.white, color(7))
eq_(t.green, color(2)) # Make sure it's different than white.
eq_(t.on_black, on_color(0))
eq_(t.on_green, on_color(2))
eq_(t.bright_black, color(8))
eq_(t.bright_green, color(10))
eq_(t.on_bright_black, on_color(8))
eq_(t.on_bright_green, on_color(10))
def test_callable_numeric_colors():
"""``color(n)`` should return a formatting wrapper."""
t = TestTerminal()
eq_(t.color(5)('smoo'), t.magenta + 'smoo' + t.normal)
eq_(t.color(5)('smoo'), t.color(5) + 'smoo' + t.normal)
eq_(t.on_color(2)('smoo'), t.on_green + 'smoo' + t.normal)
eq_(t.on_color(2)('smoo'), t.on_color(2) + 'smoo' + t.normal)
def test_null_callable_numeric_colors():
"""``color(n)`` should be a no-op on null terminals."""
t = TestTerminal(stream=StringIO())
eq_(t.color(5)('smoo'), 'smoo')
eq_(t.on_color(6)('smoo'), 'smoo')
def test_naked_color_cap():
"""``term.color`` should return a stringlike capability."""
t = TestTerminal()
eq_(t.color + '', t.setaf + '')
def test_number_of_colors_without_tty():
"""``number_of_colors`` should return 0 when there's no tty."""
# Hypothesis: once setupterm() has run and decided the tty supports 256
# colors, it never changes its mind.
raise SkipTest
t = TestTerminal(stream=StringIO())
eq_(t.number_of_colors, 0)
t = TestTerminal(stream=StringIO(), force_styling=True)
eq_(t.number_of_colors, 0)
def test_number_of_colors_with_tty():
"""``number_of_colors`` should work."""
t = TestTerminal()
eq_(t.number_of_colors, 256)
def test_formatting_functions():
"""Test crazy-ass formatting wrappers, both simple and compound."""
t = TestTerminal()
# By now, it should be safe to use sugared attributes. Other tests test those.
eq_(t.bold(u'hi'), t.bold + u'hi' + t.normal)
eq_(t.green('hi'), t.green + u'hi' + t.normal) # Plain strs for Python 2.x
# Test some non-ASCII chars, probably not necessary:
eq_(t.bold_green(u'boö'), t.bold + t.green + u'boö' + t.normal)
eq_(t.bold_underline_green_on_red('boo'),
t.bold + t.underline + t.green + t.on_red + u'boo' + t.normal)
# Don't spell things like this:
eq_(t.on_bright_red_bold_bright_green_underline('meh'),
t.on_bright_red + t.bold + t.bright_green + t.underline + u'meh' + t.normal)
def test_formatting_functions_without_tty():
"""Test crazy-ass formatting wrappers when there's no tty."""
t = TestTerminal(stream=StringIO())
eq_(t.bold(u'hi'), u'hi')
eq_(t.green('hi'), u'hi')
# Test non-ASCII chars, no longer really necessary:
eq_(t.bold_green(u'boö'), u'boö')
eq_(t.bold_underline_green_on_red('loo'), u'loo')
eq_(t.on_bright_red_bold_bright_green_underline('meh'), u'meh')
def test_nice_formatting_errors():
"""Make sure you get nice hints if you misspell a formatting wrapper."""
t = TestTerminal()
try:
t.bold_misspelled('hey')
except TypeError, e:
assert 'probably misspelled' in e.args[0]
try:
t.bold_misspelled(u'hey') # unicode
except TypeError, e:
assert 'probably misspelled' in e.args[0]
try:
t.bold_misspelled(None) # an arbitrary non-string
except TypeError, e:
assert 'probably misspelled' not in e.args[0]
try:
t.bold_misspelled('a', 'b') # >1 string arg
except TypeError, e:
assert 'probably misspelled' not in e.args[0]
def test_init_descriptor_always_initted():
"""We should be able to get a height and width even on no-tty Terminals."""
t = Terminal(stream=StringIO())
eq_(type(t.height), int)
|
{
"content_hash": "34ce004844b9edfdcc4ad572bf3c07a8",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 92,
"avg_line_length": 32.34538152610442,
"alnum_prop": 0.6247827166625279,
"repo_name": "sgml/popcorn_maker",
"id": "817e766573e4920459357d6a24d8ade3a9ef717d",
"size": "8082",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/blessings/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "389985"
},
{
"name": "Groff",
"bytes": "14527"
},
{
"name": "HTML",
"bytes": "121483"
},
{
"name": "JavaScript",
"bytes": "1418029"
},
{
"name": "Nginx",
"bytes": "1717"
},
{
"name": "Puppet",
"bytes": "11668"
},
{
"name": "Python",
"bytes": "5106648"
},
{
"name": "Shell",
"bytes": "2419"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
}
|
"""pyrflink - Python implementation of the RFlink SerialGateway."""
import time
import threading
import logging
import pickle
import os
import json
import socket
import select
import re
from queue import Queue
from importlib import import_module
import serial
LOGGER = logging.getLogger(__name__)
class Gateway(object):
"""Base implementation for a RFlink Gateway."""
# pylint: disable=too-many-instance-attributes
def __init__(self, event_callback=None, persistence=False,
persistence_file='RFlink.pickle', rflink_version='43'):
"""Setup Gateway."""
self.queue = Queue()
self.lock = threading.Lock()
self.event_callback = event_callback
self.sensors = {}
self.metric = True # if true - use metric, if false - use imperial
self.debug = True # if true - print all received messages
self.persistence = persistence # if true - save sensors to disk
self.persistence_file = persistence_file # path to persistence file
self.persistence_bak = '{}.bak'.format(self.persistence_file)
if persistence:
self._safe_load_sensors()
if rflink_version <= '43':
_const = import_module('rflink.const_43')
#elif rflink_version >= '44':
# _const = import_module('rflink.const_43')
self.const = _const
def _handle_presentation(self, msg):
"""Process a presentation message."""
if msg.child_id == 255:
# this is a presentation of the sensor platform
self.add_sensor(msg.node_id)
self.sensors[msg.node_id].type = msg.sub_type
self.sensors[msg.node_id].rflink_version = msg.payload
self.alert(msg.node_id)
else:
# this is a presentation of a child sensor
if not self.is_sensor(msg.node_id):
LOGGER.error('Node %s is unknown, will not add child sensor.',
msg.node_id)
return
self.sensors[msg.node_id].add_child_sensor(msg.child_id,
msg.sub_type)
self.alert(msg.node_id)
def _handle_set(self, msg):
"""Process a set message."""
if self.is_sensor(msg.node_id, msg.child_id):
self.sensors[msg.node_id].set_child_value(
msg.child_id, msg.sub_type, msg.payload)
self.alert(msg.node_id)
def _handle_req(self, msg):
"""Process a req message.
This will return the value if it exists. If no value exists,
nothing is returned.
"""
if self.is_sensor(msg.node_id, msg.child_id):
value = self.sensors[msg.node_id].children[
msg.child_id].values.get(msg.sub_type)
if value:
return msg.copy(type=self.const.MessageType.set, payload=value)
def _handle_internal(self, msg):
"""Process an internal protocol message."""
if msg.sub_type == self.const.Internal.I_ID_REQUEST:
return msg.copy(ack=0,
sub_type=self.const.Internal.I_ID_RESPONSE,
payload=self.add_sensor())
elif msg.sub_type == self.const.Internal.I_SKETCH_NAME:
if self.is_sensor(msg.node_id):
self.sensors[msg.node_id].sketch_name = msg.payload
self.alert(msg.node_id)
elif msg.sub_type == self.const.Internal.I_SKETCH_VERSION:
if self.is_sensor(msg.node_id):
self.sensors[msg.node_id].sketch_version = msg.payload
self.alert(msg.node_id)
elif msg.sub_type == self.const.Internal.I_CONFIG:
return msg.copy(ack=0, payload='M' if self.metric else 'I')
elif msg.sub_type == self.const.Internal.I_BATTERY_LEVEL:
if self.is_sensor(msg.node_id):
self.sensors[msg.node_id].battery_level = int(msg.payload)
self.alert(msg.node_id)
elif msg.sub_type == self.const.Internal.I_TIME:
return msg.copy(ack=0, payload=int(time.time()))
elif msg.sub_type == self.const.Internal.I_LOG_MESSAGE and self.debug:
LOGGER.info('n:%s c:%s t:%s s:%s p:%s',
msg.node_id,
msg.child_id,
msg.type,
msg.sub_type,
msg.payload)
def send(self, message):
"""Should be implemented by a child class."""
raise NotImplementedError
def logic(self, data):
"""Parse the data and respond to it appropriately.
Response is returned to the caller and has to be sent
data as a RFlink command string.
"""
try:
msg = Message(data)
except ValueError:
return None
if msg.type == self.const.MessageType.presentation:
self._handle_presentation(msg)
elif msg.type == self.const.MessageType.set:
self._handle_set(msg)
elif msg.type == self.const.MessageType.req:
return self._handle_req(msg)
elif msg.type == self.const.MessageType.internal:
return self._handle_internal(msg)
return None
def _save_pickle(self, filename):
"""Save sensors to pickle file."""
with open(filename, 'wb') as file_handle:
pickle.dump(self.sensors, file_handle, pickle.HIGHEST_PROTOCOL)
file_handle.flush()
os.fsync(file_handle.fileno())
def _load_pickle(self, filename):
"""Load sensors from pickle file."""
try:
with open(filename, 'rb') as file_handle:
self.sensors = pickle.load(file_handle)
except IOError:
pass
def _save_json(self, filename):
"""Save sensors to json file."""
with open(filename, 'w') as file_handle:
json.dump(self.sensors, file_handle, cls=RFlinkJSONEncoder)
file_handle.flush()
os.fsync(file_handle.fileno())
def _load_json(self, filename):
"""Load sensors from json file."""
with open(filename, 'r') as file_handle:
self.sensors = json.load(file_handle, cls=RFlinkJSONDecoder)
def _save_sensors(self):
"""Save sensors to file."""
fname = os.path.realpath(self.persistence_file)
exists = os.path.isfile(fname)
dirname = os.path.dirname(fname)
if exists and os.access(fname, os.W_OK) and \
os.access(dirname, os.W_OK) or \
not exists and os.access(dirname, os.W_OK):
split_fname = os.path.splitext(fname)
tmp_fname = '{}.tmp{}'.format(split_fname[0], split_fname[1])
self._perform_file_action(tmp_fname, 'save')
if exists:
os.rename(fname, self.persistence_bak)
os.rename(tmp_fname, fname)
if exists:
os.remove(self.persistence_bak)
else:
LOGGER.error('Permission denied when writing to %s', fname)
def _load_sensors(self, path=None):
"""Load sensors from file."""
if path is None:
path = self.persistence_file
exists = os.path.isfile(path)
if exists and os.access(path, os.R_OK):
if path in self.persistence_bak:
os.rename(path, self.persistence_file)
path = self.persistence_file
self._perform_file_action(path, 'load')
return True
else:
LOGGER.warning('File does not exist or is not readable: %s', path)
return False
def _safe_load_sensors(self):
"""Load sensors safely from file."""
try:
loaded = self._load_sensors()
except ValueError:
LOGGER.error('Bad file contents: %s', self.persistence_file)
loaded = False
if not loaded:
LOGGER.warning('Trying backup file: %s', self.persistence_bak)
try:
if not self._load_sensors(self.persistence_bak):
LOGGER.warning('Failed to load sensors from file: %s',
self.persistence_file)
except ValueError:
LOGGER.error('Bad file contents: %s', self.persistence_file)
LOGGER.warning('Removing file: %s', self.persistence_file)
os.remove(self.persistence_file)
def _perform_file_action(self, filename, action):
"""Perform action on specific file types.
Dynamic dispatch function for performing actions on
specific file types.
"""
ext = os.path.splitext(filename)[1]
func = getattr(self, '_%s_%s' % (action, ext[1:]), None)
if func is None:
raise Exception('Unsupported file type %s' % ext[1:])
func(filename)
def alert(self, nid):
"""Tell anyone who wants to know that a sensor was updated.
Also save sensors if persistence is enabled.
"""
if self.event_callback is not None:
try:
self.event_callback('sensor_update', nid)
except Exception as exception: # pylint: disable=W0703
LOGGER.exception(exception)
if self.persistence:
self._save_sensors()
def get_next_id(self):
"""Return the next available sensor id."""
if len(self.sensors):
next_id = max(self.sensors.keys()) + 1
else:
next_id = 1
if next_id <= 254:
return next_id
return None
def add_sensor(self, sensorid=None):
"""Add a sensor to the gateway."""
if sensorid is None:
sensorid = self._get_next_id()
if sensorid is not None and sensorid not in self.sensors:
self.sensors[sensorid] = Sensor(sensorid)
return sensorid
return None
def is_sensor(self, sensorid, child_id=None):
"""Return True if a sensor and its child exist."""
if sensorid not in self.sensors:
LOGGER.info('SensorID: %s',sensorid)
return False
if child_id is not None:
return child_id in self.sensors[sensorid].children
return True
def setup_logging(self):
"""Set the logging level to debug."""
if self.debug:
logging.basicConfig(level=logging.DEBUG)
def handle_queue(self, queue=None):
"""Handle queue.
If queue is not empty, get the function and any args and kwargs
from the queue. Run the function and return output.
"""
if queue is None:
queue = self.queue
if not queue.empty():
func, args, kwargs = queue.get()
reply = func(*args, **kwargs)
queue.task_done()
return reply
return None
def fill_queue(self, func, args=None, kwargs=None, queue=None):
"""Put a function in a queue.
Put the function 'func', a tuple of arguments 'args' and a dict
of keyword arguments 'kwargs', as a tuple in the queue.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if queue is None:
queue = self.queue
queue.put((func, args, kwargs))
def set_child_value(
self, sensor_id, child_id, value_type, value, **kwargs):
"""Add a command to set a sensor value, to the queue.
A queued command will be sent to the sensor, when the gateway
thread has sent all previously queued commands to the FIFO queue.
"""
ack = kwargs.get('ack', 0)
if self.is_sensor(sensor_id, child_id):
self.fill_queue(self.sensors[sensor_id].set_child_value,
(child_id, value_type, value), {'ack': ack})
class SerialGateway(Gateway, threading.Thread):
"""Serial gateway for RFlink."""
# pylint: disable=too-many-arguments
def __init__(self, port, event_callback=None,
persistence=False, persistence_file='RFlink.pickle',
rflink_version='43', baud=57600, timeout=3.0,
reconnect_timeout=10.0):
"""Setup serial gateway."""
threading.Thread.__init__(self)
Gateway.__init__(self, event_callback, persistence,
persistence_file, rflink_version)
self.serial = None
self.port = port
self.baud = baud
self.timeout = timeout
self.reconnect_timeout = reconnect_timeout
self._stop_event = threading.Event()
def connect(self):
"""Connect to the serial port."""
if self.serial:
LOGGER.info('Already connected to %s', self.port)
return True
try:
LOGGER.info('Trying to connect to %s', self.port)
self.serial = serial.Serial(self.port, self.baud,
timeout=self.timeout)
if self.serial.isOpen():
LOGGER.info('%s is open...', self.serial.name)
LOGGER.info('Connected to %s', self.port)
else:
LOGGER.info('%s is not open...', self.serial.name)
self.serial = None
return False
except serial.SerialException:
LOGGER.error('Unable to connect to %s', self.port)
return False
return True
def disconnect(self):
"""Disconnect from the serial port."""
if self.serial is not None:
LOGGER.info('Disconnecting from %s', self.serial.name)
self.serial.close()
self.serial = None
def stop(self):
"""Stop the background thread."""
self.disconnect()
LOGGER.info('Stopping thread')
self._stop_event.set()
def run(self):
"""Background thread that reads messages from the gateway."""
self.setup_logging()
while not self._stop_event.is_set():
if self.serial is None and not self.connect():
time.sleep(self.reconnect_timeout)
continue
response = self.handle_queue()
if response is not None:
self.send(response.encode())
try:
line = self.serial.readline()
if not line:
continue
except serial.SerialException:
LOGGER.exception('Serial exception')
continue
except TypeError:
# pyserial has a bug that causes a TypeError to be thrown when
# the port disconnects instead of a SerialException
self.disconnect()
continue
try:
string = line.decode('utf-8')
except ValueError:
LOGGER.warning(
'Error decoding message from gateway, '
'probably received bad byte.')
continue
self.fill_queue(self.logic, (string,))
def send(self, message):
"""Write a Message to the gateway."""
if not message or not isinstance(message, str):
LOGGER.warning('Missing string! No message sent!')
return
# Lock to make sure only one thread writes at a time to serial port.
with self.lock:
self.serial.write(message.encode())
class Sensor:
"""Represent a sensor."""
def __init__(self, sensor_id):
"""Setup sensor."""
self.sensor_id = sensor_id
self.children = {}
self.type = None
self.sketch_name = None
self.sketch_version = None
self.battery_level = 0
self.rflink_version = None
def add_child_sensor(self, child_id, child_type):
"""Create and add a child sensor."""
if child_id in self.children:
LOGGER.warning(
'child_id %s already exists in children, '
'cannot add child', child_id)
return
self.children[child_id] = ChildSensor(child_id, child_type)
def set_child_value(self, child_id, value_type, value, **kwargs):
"""Set a child sensor's value."""
if child_id in self.children:
self.children[child_id].values[value_type] = value
msg = Message()
msg_type = kwargs.get('msg_type', 1)
ack = kwargs.get('ack', 0)
return msg.copy(node_id=self.sensor_id, child_id=child_id,
type=msg_type, ack=ack, sub_type=value_type,
payload=value)
return None
# TODO: Handle error # pylint: disable=W0511
class ChildSensor:
"""Represent a child sensor."""
# pylint: disable=too-few-public-methods
def __init__(self, child_id, child_type):
"""Setup child sensor."""
# pylint: disable=invalid-name
self.id = child_id
self.type = child_type
self.values = {}
class Message:
"""Represent a message from the gateway."""
def __init__(self, data=None):
"""Setup message."""
self.node_id = ''
self.message_id = ''
self.type = ''
self.ack = ''
self.sub_type = ''
self.payload = '' # All data except payload are integers
if data is not None:
self.decode(data)
def copy(self, **kwargs):
"""Copy a message, optionally replace attributes with kwargs."""
msg = Message(self.encode())
for key, val in kwargs.items():
setattr(msg, key, val)
return msg
def decode(self, data):
"""Decode a message from command string."""
try:
list_data = re.split(';',data)
if len(list_data) > 4:
del list_data[0]
del list_data[0]
del list_data[-1]
self.node_id=list_data[0]
del list_data[0]
self.child_id=list_data[0]
del list_data[0]
self.type='0'
self.ack=''
self.sub_type=''
self.payload = list_data
LOGGER.info('Sensor: %s',self.payload)
elif len(list_data) == 4:
del list_data[0]
del list_data[0]
del list_data[-1]
self.node_id='255'
self.child_id='255'
self.type='3' # internal message
self.ack=''
self.sub_type=''
self.payload = list_data[0] #.pop(3)
LOGGER.info('Internal Message: %s',self.payload)
except ValueError:
LOGGER.warning('Error decoding message from gateway, '
'bad data received: %s', data)
raise ValueError
def encode(self):
"""Encode a command string from message."""
try:
return ';'.join([str(f) for f in [
self.node_id,
self.child_id,
int(self.type),
self.ack,
int(self.sub_type),
self.payload,
]]) + '\n\r'
except ValueError:
LOGGER.exception('Error encoding message to gateway')
return None
class RFlinkJSONEncoder(json.JSONEncoder):
"""JSON encoder."""
def default(self, obj): # pylint: disable=E0202
"""Serialize obj into JSON."""
if isinstance(obj, Sensor):
return {
'sensor_id': obj.sensor_id,
'children': obj.children,
'type': obj.type,
'sketch_name': obj.sketch_name,
'sketch_version': obj.sketch_version,
'battery_level': obj.battery_level,
'rflink_version': obj.rflink_version,
}
if isinstance(obj, ChildSensor):
return {
'id': obj.id,
'type': obj.type,
'values': obj.values,
}
return json.JSONEncoder.default(self, obj)
class RFlinkJSONDecoder(json.JSONDecoder):
"""JSON decoder."""
def __init__(self):
"""Setup decoder."""
json.JSONDecoder.__init__(self, object_hook=self.dict_to_object)
def dict_to_object(self, obj): # pylint: disable=R0201
"""Return object from dict."""
if not isinstance(obj, dict):
return obj
if 'sensor_id' in obj:
sensor = Sensor(obj['sensor_id'])
sensor.__dict__.update(obj)
return sensor
elif all(k in obj for k in ['id', 'type', 'values']):
child = ChildSensor(obj['id'], obj['type'])
child.values = obj['values']
return child
elif all(k.isdigit() for k in obj.keys()):
return {int(k): v for k, v in obj.items()}
return obj
|
{
"content_hash": "664d41d7517ebd4a9025bb298ca44654",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 79,
"avg_line_length": 36.797891036906854,
"alnum_prop": 0.5446078899608368,
"repo_name": "matt2005/pyrflink",
"id": "1525f8e3380c6653e00f89627ade09dac1f523bc",
"size": "20938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rflink/rflink_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "63919"
}
],
"symlink_target": ""
}
|
import warnings
from functools import partial
from logging import getLogger
from django.conf import settings
from django.contrib import messages
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.urlresolvers import NoReverseMatch
from django.utils.translation import ugettext_lazy as _
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.django_load import load
from cms.utils.moderator import use_draft
from menus.base import Menu
from menus.exceptions import NamespaceAlreadyRegistered
from menus.models import CacheKey
logger = getLogger('menus')
def _build_nodes_inner_for_one_menu(nodes, menu_class_name):
'''
This is an easier to test "inner loop" building the menu tree structure
for one menu (one language, one site)
'''
done_nodes = {} # Dict of node.id:Node
final_nodes = []
# This is to prevent infinite loops - we need to compare the number of
# times we see a specific node to "something", and for the time being,
# it's the total number of nodes
list_total_length = len(nodes)
while nodes:
# For when the node has a parent_id but we haven't seen it yet.
# We must not append it to the final list in this case!
should_add_to_final_list = True
node = nodes.pop(0)
# Increment the "seen" counter for this specific node.
node._counter = getattr(node, '_counter', 0) + 1
# Implicit namespacing by menu.__name__
if not node.namespace:
node.namespace = menu_class_name
if node.namespace not in done_nodes:
# We need to create the namespace dict to avoid KeyErrors
done_nodes[node.namespace] = {}
# If we have seen the parent_id already...
if node.parent_id in done_nodes[node.namespace]:
# Implicit parent namespace by menu.__name__
if not node.parent_namespace:
node.parent_namespace = menu_class_name
parent = done_nodes[node.namespace][node.parent_id]
parent.children.append(node)
node.parent = parent
# If it has a parent_id but we haven't seen it yet...
elif node.parent_id:
# We check for infinite loops here, by comparing the number of
# times we "saw" this node to the number of nodes in the list
if node._counter < list_total_length:
nodes.append(node)
# Never add this node to the final list until it has a real
# parent (node.parent)
should_add_to_final_list = False
if should_add_to_final_list:
final_nodes.append(node)
# add it to the "seen" list
done_nodes[node.namespace][node.id] = node
return final_nodes
def _get_menu_class_for_instance(menu_class, instance):
"""
Returns a new menu class that subclasses
menu_class but is bound to instance.
This means it sets the "instance" attribute of the class.
"""
attrs = {'instance': instance}
class_name = menu_class.__name__
meta_class = type(menu_class)
return meta_class(class_name, (menu_class,), attrs)
class MenuRenderer(object):
# The main logic behind this class is to decouple
# the singleton menu pool from the menu rendering logic.
# By doing this we can be sure that each request has it's
# private instance that will always have the same attributes.
def __init__(self, pool, request):
self.pool = pool
# It's important this happens on init
# because we need to make sure that a menu renderer
# points to the same registered menus as long as the
# instance lives.
self.menus = pool.get_registered_menus(for_rendering=True)
self.request = request
self.language = get_language_from_request(request)
self.site = Site.objects.get_current(request)
self.draft_mode_active = use_draft(request)
def _get_cache_key(self, site_id):
# This internal will change to a cached property on 3.5
prefix = getattr(settings, 'CMS_CACHE_PREFIX', 'menu_cache_')
key = '%smenu_nodes_%s_%s' % (prefix, self.language, site_id)
if self.request.user.is_authenticated():
key += '_%s_user' % self.request.user.pk
if self.draft_mode_active:
key += ':draft'
else:
key += ':public'
return key
def _is_cached(self, site_id):
# This internal will change to a cached property on 3.5
_internal_cache = '_is_cached_{}'.format(site_id)
if not hasattr(self, _internal_cache):
cache_key = self._get_cache_key(site_id=site_id)
db_cache_key_lookup = CacheKey.objects.filter(
key=cache_key,
language=self.language,
site=site_id,
)
# Cache the lookup to avoid a query on every call to a menu tag
setattr(self, _internal_cache, db_cache_key_lookup.exists())
return getattr(self, _internal_cache)
def _build_nodes(self, site_id):
"""
This is slow. Caching must be used.
One menu is built per language and per site.
Namespaces: they are ID prefixes to avoid node ID clashes when plugging
multiple trees together.
- We iterate on the list of nodes.
- We store encountered nodes in a dict (with namespaces):
done_nodes[<namespace>][<node's id>] = node
- When a node has a parent defined, we lookup that parent in done_nodes
if it's found:
set the node as the node's parent's child (re-read this)
else:
the node is put at the bottom of the list
"""
if site_id:
warnings.warn(
"The site_id parameter to _build_nodes has been deprecated. "
"It will be removed in django CMS 3.5",
PendingDeprecationWarning
)
else:
site_id = self.site.pk
key = self._get_cache_key(site_id)
cached_nodes = cache.get(key, None)
if cached_nodes and self._is_cached(site_id):
# Only use the cache if the key is present in the database.
# This prevents a condition where keys which have been removed
# from the database due to a change in content, are still used.
return cached_nodes
final_nodes = []
toolbar = getattr(self.request, 'toolbar', None)
for menu_class_name in self.menus:
menu = self.get_menu(menu_class_name)
try:
nodes = menu.get_nodes(self.request)
except NoReverseMatch:
# Apps might raise NoReverseMatch if an apphook does not yet
# exist, skip them instead of crashing
nodes = []
if toolbar and toolbar.is_staff:
messages.error(self.request,
_('Menu %s cannot be loaded. Please, make sure all '
'its urls exist and can be resolved.') %
menu_class_name)
logger.error("Menu %s could not be loaded." %
menu_class_name, exc_info=True)
# nodes is a list of navigation nodes (page tree in cms + others)
final_nodes += _build_nodes_inner_for_one_menu(nodes, menu_class_name)
cache.set(key, final_nodes, get_cms_setting('CACHE_DURATIONS')['menus'])
if not self._is_cached(site_id):
# No need to invalidate the internal lookup cache,
# just set the value directly.
setattr(self, '_is_cached_{}'.format(site_id), True)
# We need to have a list of the cache keys for languages and sites that
# span several processes - so we follow the Django way and share through
# the database. It's still cheaper than recomputing every time!
# This way we can selectively invalidate per-site and per-language,
# since the cache is shared but the keys aren't
CacheKey.objects.create(key=key, language=self.language, site=site_id)
return final_nodes
def _mark_selected(self, nodes):
for node in nodes:
node.selected = node.is_selected(self.request)
return nodes
def apply_modifiers(self, nodes, namespace=None, root_id=None,
post_cut=False, breadcrumb=False):
if not post_cut:
nodes = self._mark_selected(nodes)
# Only fetch modifiers when they're needed.
# We can do this because unlike menu classes,
# modifiers can't change on a request basis.
for cls in self.pool.get_registered_modifiers():
inst = cls(renderer=self)
nodes = inst.modify(
self.request, nodes, namespace, root_id, post_cut, breadcrumb)
return nodes
def get_nodes(self, namespace=None, root_id=None, site_id=None, breadcrumb=False):
if site_id:
warnings.warn(
"The site_id parameter to get_nodes has been deprecated. "
"It will be removed in django CMS 3.5",
PendingDeprecationWarning
)
nodes = self._build_nodes(site_id=site_id)
nodes = self.apply_modifiers(
nodes=nodes,
namespace=namespace,
root_id=root_id,
post_cut=False,
breadcrumb=breadcrumb,
)
return nodes
def get_menu(self, menu_name):
MenuClass = self.menus[menu_name]
return MenuClass(renderer=self)
class MenuPool(object):
def __init__(self):
self.menus = {}
self.modifiers = []
self.discovered = False
def get_renderer(self, request):
self.discover_menus()
# Returns a menu pool wrapper that is bound
# to the given request and can perform
# operations based on the given request.
return MenuRenderer(pool=self, request=request)
def discover_menus(self):
if self.discovered:
return
# FIXME: Remove in 3.5
load('menu')
load('cms_menus')
from menus.modifiers import register
register()
self.discovered = True
def get_registered_menus(self, for_rendering=False):
"""
Returns all registered menu classes.
:param for_rendering: Flag that when True forces us to include
all CMSAttachMenu subclasses, even if they're not attached.
"""
self.discover_menus()
registered_menus = {}
for menu_class_name, menu_cls in self.menus.items():
if isinstance(menu_cls, Menu):
# A Menu **instance** was registered,
# this is non-standard, but acceptable.
menu_cls = menu_cls.__class__
if hasattr(menu_cls, "get_instances"):
# It quacks like a CMSAttachMenu.
# Expand the one CMSAttachMenu into multiple classes.
# Each class is bound to the instance the menu is attached to.
_get_menu_class = partial(_get_menu_class_for_instance, menu_cls)
instances = menu_cls.get_instances() or []
for instance in instances:
# For each instance, we create a unique class
# that is bound to that instance.
# Doing this allows us to delay the instantiation
# of the menu class until it's needed.
# Plus we keep the menus consistent by always
# pointing to a class instead of an instance.
namespace = "{0}:{1}".format(
menu_class_name, instance.pk)
registered_menus[namespace] = _get_menu_class(instance)
if not instances and not for_rendering:
# The menu is a CMSAttachMenu but has no instances,
# normally we'd just ignore it but it's been
# explicitly set that we are not rendering these menus
# via the (for_rendering) flag.
registered_menus[menu_class_name] = menu_cls
elif hasattr(menu_cls, "get_nodes"):
# This is another type of Menu, cannot be expanded, but must be
# instantiated, none-the-less.
registered_menus[menu_class_name] = menu_cls
else:
raise ValidationError(
"Something was registered as a menu, but isn't.")
return registered_menus
def get_registered_modifiers(self):
return self.modifiers
def clear(self, site_id=None, language=None, all=False):
'''
This invalidates the cache for a given menu (site_id and language)
'''
if all:
cache_keys = CacheKey.objects.get_keys()
else:
cache_keys = CacheKey.objects.get_keys(site_id, language)
to_be_deleted = cache_keys.distinct().values_list('key', flat=True)
if to_be_deleted:
cache.delete_many(to_be_deleted)
cache_keys.delete()
def register_menu(self, menu_cls):
import warnings
if menu_cls.__module__.split('.')[-1] == 'menu':
warnings.warn('menu.py filename is deprecated, '
'and it will be removed in version 3.5; '
'please rename it to cms_menus.py', DeprecationWarning)
from menus.base import Menu
assert issubclass(menu_cls, Menu)
if menu_cls.__name__ in self.menus:
raise NamespaceAlreadyRegistered(
"[{0}] a menu with this name is already registered".format(
menu_cls.__name__))
# Note: menu_cls should still be the menu CLASS at this point.
self.menus[menu_cls.__name__] = menu_cls
def register_modifier(self, modifier_class):
import os
import inspect
import warnings
source_file = os.path.basename(inspect.stack()[1][1])
if source_file == 'menu.py':
warnings.warn('menu.py filename is deprecated, '
'and it will be removed in version 3.5; '
'please rename it to cms_menus.py', DeprecationWarning)
from menus.base import Modifier
assert issubclass(modifier_class, Modifier)
if modifier_class not in self.modifiers:
self.modifiers.append(modifier_class)
def get_menus_by_attribute(self, name, value):
"""
Returns the list of menus that match the name/value criteria provided.
"""
# Note that we are limiting the output to only single instances of any
# specific menu class. This is to address issue (#4041) which has
# cropped-up in 3.0.13/3.0.0.
# By setting for_rendering to False
# we're limiting the output to menus
# that are registered and have instances
# (in case of attached menus).
menus = self.get_registered_menus(for_rendering=False)
return sorted(list(set([(menu.__name__, menu.name)
for menu_class_name, menu in menus.items()
if getattr(menu, name, None) == value])))
def get_nodes_by_attribute(self, nodes, name, value):
return [node for node in nodes if node.attr.get(name, None) == value]
def apply_modifiers(self, nodes, request, namespace=None, root_id=None,
post_cut=False, breadcrumb=False):
warnings.warn('menu_pool.apply_modifiers is deprecated '
'and it will be removed in version 3.5; '
'please use the menu renderer instead.', DeprecationWarning)
renderer = self.get_renderer(request)
nodes = renderer.apply_modifiers(
nodes=nodes,
namespace=namespace,
root_id=root_id,
post_cut=post_cut,
breadcrumb=breadcrumb,
)
return nodes
def get_nodes(self, request, namespace=None, root_id=None, site_id=None,
breadcrumb=False):
warnings.warn('menu_pool.get_nodes is deprecated '
'and it will be removed in version 3.5; '
'please use the menu renderer instead.', DeprecationWarning)
renderer = self.get_renderer(request)
nodes = renderer.get_nodes(
namespace=namespace,
root_id=root_id,
site_id=site_id,
breadcrumb=breadcrumb,
)
return nodes
menu_pool = MenuPool()
|
{
"content_hash": "6fbde69b89f1863bba7fff72a26ff826",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 86,
"avg_line_length": 39.74764150943396,
"alnum_prop": 0.5914080579125378,
"repo_name": "jproffitt/django-cms",
"id": "d5f73eeac90ac07b424f61465d62c0c383a0563a",
"size": "16877",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "menus/menu_pool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132607"
},
{
"name": "HTML",
"bytes": "197520"
},
{
"name": "JavaScript",
"bytes": "1227622"
},
{
"name": "Python",
"bytes": "2300833"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
}
|
import contextlib
import mock
from neutron.common import constants as l3_const
from neutron import context
from neutron.db import l3_dvr_db
from neutron.extensions import l3
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
class L3DvrTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(L3DvrTestCase, self).setUp()
self.ctx = context.get_admin_context()
self.mixin = l3_dvr_db.L3_NAT_with_dvr_db_mixin()
def _create_router(self, router):
with self.ctx.session.begin(subtransactions=True):
return self.mixin._create_router_db(self.ctx, router, 'foo_tenant')
def _test__create_router_db(self, expected=False, distributed=None):
router = {'name': 'foo_router', 'admin_state_up': True}
if distributed is not None:
router['distributed'] = distributed
result = self._create_router(router)
self.assertEqual(expected, result.extra_attributes['distributed'])
def test_create_router_db_default(self):
self._test__create_router_db(expected=False)
def test_create_router_db_centralized(self):
self._test__create_router_db(expected=False, distributed=False)
def test_create_router_db_distributed(self):
self._test__create_router_db(expected=True, distributed=True)
def test__validate_router_migration_on_router_update(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True
}
router_db = self._create_router(router)
self.assertIsNone(self.mixin._validate_router_migration(
self.ctx, router_db, {'name': 'foo_router_2'}))
def test__validate_router_migration_raise_error(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True
}
router_db = self._create_router(router)
self.assertRaises(NotImplementedError,
self.mixin._validate_router_migration,
self.ctx, router_db, {'distributed': False})
def test_update_router_db_centralized_to_distributed(self):
router = {'name': 'foo_router', 'admin_state_up': True}
agent = {'id': _uuid()}
distributed = {'distributed': True}
router_db = self._create_router(router)
router_id = router_db['id']
self.assertFalse(router_db.extra_attributes.distributed)
self.mixin._get_router = mock.Mock(return_value=router_db)
self.mixin._validate_router_migration = mock.Mock()
self.mixin._update_distributed_attr = mock.Mock()
self.mixin.list_l3_agents_hosting_router = mock.Mock(
return_value={'agents': [agent]})
self.mixin._unbind_router = mock.Mock()
router_db = self.mixin._update_router_db(
self.ctx, router_id, distributed, mock.ANY)
# Assert that the DB value has changed
self.assertTrue(router_db.extra_attributes.distributed)
self.assertEqual(1,
self.mixin._update_distributed_attr.call_count)
def _test_get_device_owner(self, is_distributed=False,
expected=l3_const.DEVICE_OWNER_ROUTER_INTF,
pass_router_id=True):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': is_distributed
}
router_db = self._create_router(router)
router_pass = router_db['id'] if pass_router_id else router_db
with mock.patch.object(self.mixin, '_get_router') as f:
f.return_value = router_db
result = self.mixin._get_device_owner(self.ctx, router_pass)
self.assertEqual(expected, result)
def test_get_device_owner_by_router_id(self):
self._test_get_device_owner()
def test__get_device_owner_centralized(self):
self._test_get_device_owner(pass_router_id=False)
def test__get_device_owner_distributed(self):
self._test_get_device_owner(
is_distributed=True,
expected=l3_dvr_db.DEVICE_OWNER_DVR_INTERFACE,
pass_router_id=False)
def _test__is_distributed_router(self, router, expected):
result = l3_dvr_db.is_distributed_router(router)
self.assertEqual(expected, result)
def test__is_distributed_router_by_db_object(self):
router = {'name': 'foo_router', 'admin_state_up': True}
router_db = self._create_router(router)
self.mixin._get_device_owner(mock.ANY, router_db)
def test__is_distributed_router_default(self):
router = {'id': 'foo_router_id'}
self._test__is_distributed_router(router, False)
def test__is_distributed_router_centralized(self):
router = {'id': 'foo_router_id', 'distributed': False}
self._test__is_distributed_router(router, False)
def test__is_distributed_router_distributed(self):
router = {'id': 'foo_router_id', 'distributed': True}
self._test__is_distributed_router(router, True)
def test_get_agent_gw_ports_exist_for_network(self):
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = []
self.mixin.get_agent_gw_ports_exist_for_network(
self.ctx, 'network_id', 'host', 'agent_id')
plugin.get_ports.assert_called_with(self.ctx, {
'network_id': ['network_id'],
'device_id': ['agent_id'],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
def _test_prepare_direct_delete_dvr_internal_ports(self, port):
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin._get_port.return_value = port
self.assertRaises(l3.L3PortInUse,
self.mixin.prevent_l3_port_deletion,
self.ctx,
port['id'])
def test_prevent_delete_floatingip_agent_gateway_port(self):
port = {
'id': 'my_port_id',
'fixed_ips': mock.ANY,
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW
}
self._test_prepare_direct_delete_dvr_internal_ports(port)
def test_prevent_delete_csnat_port(self):
port = {
'id': 'my_port_id',
'fixed_ips': mock.ANY,
'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT
}
self._test_prepare_direct_delete_dvr_internal_ports(port)
def test__create_gw_port_with_no_gateway(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True,
}
router_db = self._create_router(router)
router_id = router_db['id']
self.assertTrue(router_db.extra_attributes.distributed)
with contextlib.nested(
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_create_gw_port'),
mock.patch.object(self.mixin,
'create_snat_intf_ports_if_not_exists')
) as (cw, cs):
self.mixin._create_gw_port(
self.ctx, router_id, router_db, mock.ANY,
mock.ANY, mock.ANY)
self.assertFalse(cs.call_count)
def test_build_routers_list_with_gw_port_mismatch(self):
routers = [{'gw_port_id': 'foo_gw_port_id', 'id': 'foo_router_id'}]
gw_ports = {}
routers = self.mixin._build_routers_list(self.ctx, routers, gw_ports)
self.assertIsNone(routers[0].get('gw_port'))
def test_clear_unused_fip_agent_gw_port(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': _uuid(),
'floating_network_id': _uuid()
}
with contextlib.nested(
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_get_floatingip'),
mock.patch.object(self.mixin,
'get_vm_port_hostid'),
mock.patch.object(self.mixin,
'check_fips_availability_on_host_ext_net'),
mock.patch.object(self.mixin,
'delete_floatingip_agent_gateway_port')
) as (gfips, gvm, cfips, dfips):
gfips.return_value = floatingip
gvm.return_value = 'my-host'
cfips.return_value = True
self.mixin.clear_unused_fip_agent_gw_port(
self.ctx, floatingip)
self.assertTrue(dfips.called)
self.assertTrue(cfips.called)
self.assertTrue(gvm.called)
def test_delete_floatingip_agent_gateway_port(self):
port = {
'id': 'my_port_id',
'binding:host_id': 'foo_host',
'network_id': 'ext_network_id',
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW
}
with contextlib.nested(
mock.patch.object(manager.NeutronManager, 'get_plugin'),
mock.patch.object(self.mixin,
'get_vm_port_hostid')) as (gp, vm_host):
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = [port]
vm_host.return_value = 'foo_host'
self.mixin.delete_floatingip_agent_gateway_port(
self.ctx, 'foo_host', 'network_id')
plugin.get_ports.assert_called_with(self.ctx, filters={
'network_id': ['network_id'],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
plugin._delete_port.assert_called_with(self.ctx, 'my_port_id')
def _delete_floatingip_test_setup(self, floatingip):
fip_id = floatingip['id']
with contextlib.nested(
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_get_floatingip'),
mock.patch.object(self.mixin,
'clear_unused_fip_agent_gw_port'),
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'delete_floatingip')) as (gf, vf, df):
gf.return_value = floatingip
self.mixin.delete_floatingip(self.ctx, fip_id)
return vf
def _disassociate_floatingip_setup(self, port_id=None, floatingip=None):
with contextlib.nested(
mock.patch.object(self.mixin, '_get_floatingip_on_port'),
mock.patch.object(self.mixin,
'clear_unused_fip_agent_gw_port'),
) as (gf, vf):
gf.return_value = floatingip
self.mixin.disassociate_floatingips(
self.ctx, port_id, do_notify=False)
return vf
def test_disassociate_floatingip_with_vm_port(self):
port_id = '1234'
floatingip = {
'id': _uuid(),
'fixed_port_id': 1234,
'floating_network_id': _uuid()
}
mock_disassociate_fip = self._disassociate_floatingip_setup(
port_id=port_id, floatingip=floatingip)
self.assertTrue(mock_disassociate_fip.called)
def test_disassociate_floatingip_with_no_vm_port(self):
mock_disassociate_fip = self._disassociate_floatingip_setup()
self.assertFalse(mock_disassociate_fip.called)
def test_delete_floatingip_without_internal_port(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': None,
'floating_network_id': _uuid()
}
mock_fip_clear = self._delete_floatingip_test_setup(floatingip)
self.assertFalse(mock_fip_clear.call_count)
def test_delete_floatingip_with_internal_port(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': _uuid(),
'floating_network_id': _uuid()
}
mock_fip_clear = self._delete_floatingip_test_setup(floatingip)
self.assertTrue(mock_fip_clear.called)
def _floatingip_on_port_test_setup(self, hostid):
router = {'id': 'foo_router_id', 'distributed': True}
floatingip = {
'id': _uuid(),
'port_id': _uuid(),
'router_id': 'foo_router_id'
}
routers = {
'foo_router_id': router
}
fipagent = {
'id': _uuid()
}
# NOTE: mock.patch is not needed here since self.mixin is created fresh
# for each test. It doesn't work with some methods since the mixin is
# tested in isolation (e.g. _get_agent_by_type_and_host).
self.mixin.get_vm_port_hostid = mock.Mock(return_value=hostid)
self.mixin._get_agent_by_type_and_host = mock.Mock(
return_value=fipagent)
self.mixin.get_fip_sync_interfaces = mock.Mock(
return_value='fip_interface')
self.mixin._process_floating_ips(self.ctx, routers, [floatingip])
return (router, floatingip)
def test_floatingip_on_port_no_host(self):
router, fip = self._floatingip_on_port_test_setup(None)
self.assertTrue(self.mixin.get_vm_port_hostid.called)
self.assertFalse(self.mixin._get_agent_by_type_and_host.called)
self.assertFalse(self.mixin.get_fip_sync_interfaces.called)
self.assertNotIn(l3_const.FLOATINGIP_KEY, router)
self.assertNotIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router)
def test_floatingip_on_port_with_host(self):
router, fip = self._floatingip_on_port_test_setup(_uuid())
self.assertTrue(self.mixin.get_vm_port_hostid.called)
self.assertTrue(self.mixin._get_agent_by_type_and_host.called)
self.assertTrue(self.mixin.get_fip_sync_interfaces.called)
self.assertIn(l3_const.FLOATINGIP_KEY, router)
self.assertIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router)
self.assertIn(fip, router[l3_const.FLOATINGIP_KEY])
self.assertIn('fip_interface',
router[l3_const.FLOATINGIP_AGENT_INTF_KEY])
def test_delete_disassociated_floatingip_agent_port(self):
fip = {
'id': _uuid(),
'port_id': None
}
floatingip = {
'id': _uuid(),
'fixed_port_id': 1234,
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': True}
with contextlib.nested(
mock.patch.object(self.mixin,
'get_router'),
mock.patch.object(self.mixin,
'clear_unused_fip_agent_gw_port'),
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_update_fip_assoc'),
) as (grtr, vf, cf):
grtr.return_value = router
self.mixin._update_fip_assoc(
self.ctx, fip, floatingip, mock.ANY)
self.assertTrue(vf.called)
def _setup_test_create_delete_floatingip(
self, fip, floatingip_db, router_db):
port = {
'id': '1234',
'binding:host_id': 'myhost',
'network_id': 'external_net'
}
with contextlib.nested(
mock.patch.object(self.mixin,
'get_router'),
mock.patch.object(self.mixin,
'get_vm_port_hostid'),
mock.patch.object(self.mixin,
'clear_unused_fip_agent_gw_port'),
mock.patch.object(self.mixin,
'create_fip_agent_gw_port_if_not_exists'),
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_update_fip_assoc'),
) as (grtr, vmp, d_fip, c_fip, up_fip):
grtr.return_value = router_db
vmp.return_value = 'my-host'
self.mixin._update_fip_assoc(
self.ctx, fip, floatingip_db, port)
return d_fip, c_fip
def test_create_floatingip_agent_gw_port_with_dvr_router(self):
floatingip = {
'id': _uuid(),
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': True}
fip = {
'id': _uuid(),
'port_id': _uuid()
}
delete_fip, create_fip = (
self._setup_test_create_delete_floatingip(
fip, floatingip, router))
self.assertTrue(create_fip.called)
self.assertFalse(delete_fip.called)
def test_create_floatingip_agent_gw_port_with_non_dvr_router(self):
floatingip = {
'id': _uuid(),
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': False}
fip = {
'id': _uuid(),
'port_id': _uuid()
}
delete_fip, create_fip = (
self._setup_test_create_delete_floatingip(
fip, floatingip, router))
self.assertFalse(create_fip.called)
self.assertFalse(delete_fip.called)
def test_delete_floatingip_agent_gw_port_with_dvr_router(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': 1234,
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': True}
fip = {
'id': _uuid(),
'port_id': None
}
delete_fip, create_fip = (
self._setup_test_create_delete_floatingip(
fip, floatingip, router))
self.assertTrue(delete_fip.called)
self.assertFalse(create_fip.called)
def test_delete_floatingip_agent_gw_port_with_non_dvr_router(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': 1234,
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': False}
fip = {
'id': _uuid(),
'port_id': None
}
delete_fip, create_fip = (
self._setup_test_create_delete_floatingip(
fip, floatingip, router))
self.assertFalse(create_fip.called)
self.assertFalse(delete_fip.called)
def test__validate_router_migration_prevent_check_advanced_svc(self):
router = {'name': 'foo_router', 'admin_state_up': True}
router_db = self._create_router(router)
# make sure the check are invoked, whether they pass or
# raise, it does not matter in the context of this test
with contextlib.nested(
mock.patch.object(self.mixin, 'check_router_has_no_firewall'),
mock.patch.object(self.mixin, 'check_router_has_no_vpnaas')
) as (check_fw, check_vpn):
self.mixin._validate_router_migration(
self.ctx, router_db, {'distributed': True})
check_fw.assert_called_once_with(self.ctx, router_db)
check_vpn.assert_called_once_with(self.ctx, router_db)
def test_check_router_has_no_firewall_raises(self):
with mock.patch.object(
manager.NeutronManager, 'get_service_plugins') as sp:
fw_plugin = mock.Mock()
sp.return_value = {'FIREWALL': fw_plugin}
fw_plugin.get_firewalls.return_value = [mock.ANY]
self.assertRaises(
l3.RouterInUse,
self.mixin.check_router_has_no_firewall,
self.ctx, {'id': 'foo_id', 'tenant_id': 'foo_tenant'})
def test_check_router_has_no_firewall_passes(self):
with mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={}):
self.assertTrue(
self.mixin.check_router_has_no_firewall(mock.ANY, mock.ANY))
def test_check_router_has_no_vpn(self):
with mock.patch.object(
manager.NeutronManager, 'get_service_plugins') as sp:
vpn_plugin = mock.Mock()
sp.return_value = {'VPN': vpn_plugin}
self.mixin.check_router_has_no_vpnaas(mock.ANY, {'id': 'foo_id'})
vpn_plugin.check_router_in_use.assert_called_once_with(
mock.ANY, 'foo_id')
|
{
"content_hash": "16ee133db502220ee4e89d4f4953c0c4",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 79,
"avg_line_length": 40.60039761431412,
"alnum_prop": 0.5633630398589756,
"repo_name": "rdo-management/neutron",
"id": "2b7fab9355375b32abc20f05feee9ccbf71c8329",
"size": "21034",
"binary": false,
"copies": "1",
"ref": "refs/heads/mgt-master",
"path": "neutron/tests/unit/db/test_l3_dvr_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "24047"
},
{
"name": "Gettext Catalog",
"bytes": "575107"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "6918375"
},
{
"name": "Shell",
"bytes": "12287"
}
],
"symlink_target": ""
}
|
import unittest
class SanityTest(unittest.TestCase):
def setUp(self):
pass
def test_true_eq_true(self):
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "1c3fdd547b4bc480bb78710402951857",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 36,
"avg_line_length": 20.6,
"alnum_prop": 0.616504854368932,
"repo_name": "timBrockman/fsnd-p2-mublog",
"id": "661269962384425c6679b37a456a0224a38e275f",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sanity_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "47521"
},
{
"name": "Python",
"bytes": "12114"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from grapher import errors
from grapher.settings import Testing
class GrapherErrorTest(TestCase):
def test_internal_error(self):
e = errors.GrapherError()
self.assertEqual(e.status_code, 500)
response = e.as_api_response()
self.assertIn('INTERNAL_ERROR', response)
self.assertEqual(len(response), 1)
class BadRequestErrorTest(TestCase):
def test_valid_error_codes(self):
valid_errors = (
'DATA_CANNOT_BE_EMPTY',
('DATA_CANNOT_BE_EMPTY',),
('NOT_FOUND', (10,),),
('INVALID_FIELDS', ('name, age',), ('_id, job',))
)
e = errors.BadRequestError(*valid_errors)
self.assertEqual(e.status_code, 400)
response = e.as_api_response()
self.assertIsInstance(response, dict)
self.assertEqual(len(response), 3)
for code, error in response.items():
self.assertIn(code, Testing.ERRORS)
|
{
"content_hash": "341aee5c2811b6bd692c7649dc7dc505",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 61,
"avg_line_length": 28.852941176470587,
"alnum_prop": 0.6126401630988787,
"repo_name": "lucasdavid/grapher",
"id": "75258512ec53ade35226f8284abdbc15974f48e5",
"size": "981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/errors_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99199"
}
],
"symlink_target": ""
}
|
from twisted.conch.ssh import transport
from twisted.python import log
class HonsshServer(transport.SSHServerTransport):
def connectionMade(self):
"""
Called when the connection is made to the other side. We sent our
version and the MSG_KEXINIT packet.
"""
self.transport.write('%s\r\n' % (self.ourVersionString,))
self.currentEncryptions = transport.SSHCiphers('none', 'none', 'none', 'none')
self.currentEncryptions.setKeys('', '', '', '', '', '')
def dataReceived(self, data):
"""
First, check for the version string (SSH-2.0-*). After that has been
received, this method adds data to the buffer, and pulls out any
packets.
@type data: C{str}
"""
self.buf = self.buf + data
if not self.gotVersion:
if self.buf.find('\n', self.buf.find('SSH-')) == -1:
return
lines = self.buf.split('\n')
for p in lines:
if p.startswith('SSH-'):
self.gotVersion = True
self.otherVersionString = p.strip()
remoteVersion = p.split('-')[1]
if remoteVersion not in self.supportedVersions:
self._unsupportedVersionReceived(remoteVersion)
return
i = lines.index(p)
self.buf = '\n'.join(lines[i + 1:])
self.sendKexInit()
packet = self.getPacket()
while packet:
messageNum = ord(packet[0])
self.dispatchMessage(messageNum, packet[1:])
packet = self.getPacket()
def sendDisconnect(self, reason, desc):
"""
http://kbyte.snowpenguin.org/portal/2013/04/30/kippo-protocol-mismatch-workaround/
Workaround for the "bad packet length" error message.
@param reason: the reason for the disconnect. Should be one of the
DISCONNECT_* values.
@type reason: C{int}
@param desc: a descrption of the reason for the disconnection.
@type desc: C{str}
"""
if not 'bad packet length' in desc:
# With python >= 3 we can use super?
transport.SSHServerTransport.sendDisconnect(self, reason, desc)
else:
self.transport.write('Protocol mismatch.\n')
log.msg('[SERVER] - Disconnecting with error, code %s\nreason: %s' % (reason, desc))
self.transport.loseConnection()
|
{
"content_hash": "f728830421fb65cb28341d2bb681a7af",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 96,
"avg_line_length": 41.9344262295082,
"alnum_prop": 0.5527756059421423,
"repo_name": "coolhacks/docker-hacks",
"id": "cd574b13e43de0f2db6cbc23cd965d096a9fe43d",
"size": "4038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dockpot/honssh/honsshServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "23973"
},
{
"name": "CSS",
"bytes": "1227"
},
{
"name": "Groff",
"bytes": "37179"
},
{
"name": "HTML",
"bytes": "50500"
},
{
"name": "Nginx",
"bytes": "3121"
},
{
"name": "Python",
"bytes": "10073"
},
{
"name": "Shell",
"bytes": "35683"
}
],
"symlink_target": ""
}
|
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
INTERNAL_IPS = ('127.0.0.1',)
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
container = os.environ['GRIMA_DB'] + os.sep + 'grima.db'
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = container # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'u9qb9d!w-b=*k-quc7cdl346&ubb1@hhmm6rzo!2&du*%px9+&'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'grima.www.urls'
templates = os.environ['GRIMA_WWW'] + os.sep + 'templates'
TEMPLATE_DIRS = (
templates
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'debug_toolbar',
'south',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
# $Id:$
#
# Local Variables:
# indent-tabs-mode: nil
# python-continuation-offset: 2
# python-indent: 8
# End:
# vim: ai et si sw=8 ts=8
|
{
"content_hash": "fc25cc93ef752c11e91bfa4f14660d87",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 101,
"avg_line_length": 30.489795918367346,
"alnum_prop": 0.6877510040160643,
"repo_name": "cdsi/grima",
"id": "0ace6d1f98d401a55729f343bc27667e030eb5b0",
"size": "3026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grima/www/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "656"
},
{
"name": "JavaScript",
"bytes": "18"
},
{
"name": "PHP",
"bytes": "8038"
},
{
"name": "Python",
"bytes": "54848"
},
{
"name": "Shell",
"bytes": "11378"
}
],
"symlink_target": ""
}
|
"""Manages Google Cloud Key Management resources.
See https://cloud.google.com/kms/docs/
"""
import base64
from typing import Optional, List
from googleapiclient import discovery
from googleapiclient import errors
from google.auth import credentials
class CloudKmsError(Exception):
"""An error raised when failed to make a Cloud KMS request."""
pass
class CloudKmsClient(object):
"""A class for managing Google Cloud Key Management resources."""
def __init__(self, cloudkms_service: discovery.Resource):
self._cloudkms_service = cloudkms_service
@classmethod
def from_credentials(cls, credentials: credentials.Credentials):
return cls(
discovery.build('cloudkms',
'v1',
credentials=credentials,
cache_discovery=False))
def create_keyring(self,
project_id: str,
name: str,
location: str = 'global'):
"""Create a cryptographic key ring.
A key ring is a toplevel logical grouping of cryptographic keys.
Args:
project_id: The id of the project where you want to create the key
ring.
name: Name of the key ring.
location: Where you want to make the key ring available.
"""
parent = 'projects/{}/locations/{}'.format(project_id, location)
keyring_rsrc = self._cloudkms_service.projects().locations().keyRings()
request = keyring_rsrc.create(parent=parent, keyRingId=name, body={})
request.execute(num_retries=5)
def create_key(self,
project_id: str,
keyring_name: str,
key_name: str,
location: str = 'global'):
"""Create a cryptographic key.
Args:
project_id: The id of the project where you want to create the key
ring.
keyring_name: Name of the cryptographic key ring. A key ring is a
toplevel logical grouping of cryptographic keys.
key_name: Name of the cryptographic key.
location: Where the key ring is available.
"""
parent = 'projects/{}/locations/{}/keyRings/{}'.format(
project_id, location, keyring_name)
keyring_rsrc = self._cloudkms_service.projects().locations().keyRings()
request_body = {
'purpose': 'ENCRYPT_DECRYPT',
}
request = keyring_rsrc.cryptoKeys().create(parent=parent,
cryptoKeyId=key_name,
body=request_body)
request.execute(num_retries=5)
def list_keyrings(self, project_id: str,
location: str = 'global') -> Optional[List[str]]:
"""List cryptographic key rings in the given project and location.
A key ring is a toplevel logical grouping of cryptographic keys.
Args:
project_id: The id of the project where you want to create the key
ring.
location: The location where the key rings are available.
Returns:
The keyrings in the given project that are available in the given
location.
Raises:
CloudKmsError: When the api call does not return the expected
response.
"""
parent = 'projects/{}/locations/{}'.format(project_id, location)
keyring_rsrc = self._cloudkms_service.projects().locations().keyRings()
request = keyring_rsrc.list(parent=parent)
try:
response = request.execute(num_retries=5)
if 'keyRings' not in response:
raise CloudKmsError(
'Unexpected response when listing keyrings [{}]'.format(
response))
except errors.HttpError as e:
if e.resp.status == 404:
# The given project or location does not exist
return []
keyring_names = [
keyring['name'].split('/')[-1] for keyring in response['keyRings']
]
return keyring_names
def list_keys(self,
project_id: str,
keyring_name: str,
location: str = 'global') -> Optional[List[str]]:
"""List cryptographic keys in the given key ring.
Args:
project_id: The id of the project where you want to create the key
ring.
keyring_name: Name of the cryptographic key ring. A key ring is a
toplevel logical grouping of cryptographic keys.
location: The location where the given key ring is available.
Returns:
The keys in the given key ring.
Raises:
CloudKmsError: When the api call does not return the expected
response.
"""
parent = 'projects/{}/locations/{}/keyRings/{}'.format(
project_id, location, keyring_name)
keyring_rsrc = self._cloudkms_service.projects().locations().keyRings()
request = keyring_rsrc.cryptoKeys().list(parent=parent)
try:
response = request.execute(num_retries=5)
if 'cryptoKeys' not in response:
raise CloudKmsError(
'Unexpected response when listing keys [{}]'.format(
response))
except errors.HttpError as e:
if e.resp.status == 404:
# The given keyring or project or location does not exist
return []
key_names = [
key['name'].split('/')[-1] for key in response['cryptoKeys']
]
return key_names
def encrypt(self,
plaintext: str,
project_id: str,
key_name: str,
keyring_name: str,
location: str = 'global') -> Optional[str]:
"""Encrypt the given plain text using the key provided.
Args:
plaintext: The data to encrypt.
project_id: The id of the project where the given key is in.
key_name: Name of the cryptographic key.
keyring_name: Name of the cryptographic key ring. A key ring is a
toplevel logical grouping of cryptographic keys.
location: The location that the keyring is available.
Returns:
The cipher text generated with the given plain text.
Raises:
CloudKmsError: When the encryption api call does not return the
expected response.
"""
name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
project_id, location, keyring_name, key_name)
# Cloud KMS api requires the plain text to be a base64 encoded string.
b64_encoded_plaintext = base64.urlsafe_b64encode(
plaintext.encode('utf-8')).decode('utf-8')
keyring_rsrc = self._cloudkms_service.projects().locations().keyRings()
request = keyring_rsrc.cryptoKeys().encrypt(
name=name, body={'plaintext': b64_encoded_plaintext})
response = request.execute(num_retries=5)
if 'ciphertext' not in response:
raise CloudKmsError(
'Unexpected response when encrypting text "{}" [{}]'.format(
plaintext, response))
return response['ciphertext']
def decrypt(self,
ciphertext: str,
project_id: str,
key_name: str,
keyring_name: str,
location: str = 'global') -> Optional[str]:
"""Decrypt the data that was previously encrypted with the key provided.
Args:
ciphertext: The encrypted data originally encrypted with the given
key.
project_id: The id of the project where the given key is in.
key_name: Name of the cryptographic key.
keyring_name: Name of the cryptographic key ring. A key ring is a
toplevel logical grouping of cryptographic keys.
location: The location that the keyring is available.
Returns:
The decrypted data generated from the given cipher text.
Raises:
CloudKmsError: When the decryption api call does not return the
expected response.
"""
name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
project_id, location, keyring_name, key_name)
keyring_rsrc = self._cloudkms_service.projects().locations().keyRings()
request = keyring_rsrc.cryptoKeys().decrypt(
name=name, body={'ciphertext': ciphertext})
response = request.execute(num_retries=5)
if 'plaintext' not in response:
raise CloudKmsError(
'Unexpected response when decrypting text "{}" [{}]'.format(
ciphertext, response))
# Return value Cloud KMS api call is a base64 encoded string
b64_encoded_ciphertext = response['plaintext']
return base64.urlsafe_b64decode(b64_encoded_ciphertext).decode('utf-8')
|
{
"content_hash": "62d6fdd161548d4c1c14bca04567174d",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 80,
"avg_line_length": 40.004329004329,
"alnum_prop": 0.5730981495509144,
"repo_name": "GoogleCloudPlatform/django-cloud-deploy",
"id": "2c42fff7643f076d82605ff9312df06ad8344ee7",
"size": "9816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cloud_deploy/cloudlib/cloudkms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "Python",
"bytes": "583745"
},
{
"name": "Shell",
"bytes": "997"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('openstack', '0021_volume_instance'),
]
operations = [
migrations.AddField(
model_name='volume',
name='device',
field=models.CharField(blank=True, max_length=50, help_text='Name of volume as instance device e.g. /dev/vdb.', validators=[django.core.validators.RegexValidator('^/dev/[a-zA-Z0-9]+$', message='Device should match pattern "/dev/alphanumeric+"')]),
),
]
|
{
"content_hash": "e64aab699fec0150df670588a997f8cd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 259,
"avg_line_length": 32.31578947368421,
"alnum_prop": 0.6498371335504886,
"repo_name": "opennode/nodeconductor-openstack",
"id": "a9ad019f58201c52ef9814185af314ad8aa26b98",
"size": "638",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/waldur_openstack/openstack/migrations/0022_volume_device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "792426"
}
],
"symlink_target": ""
}
|
import asyncio
import datetime
import json
import re
from unittest import mock
import pytest
from multidict import CIMultiDict
from aiohttp import HttpVersion, HttpVersion10, HttpVersion11, hdrs, signals
from aiohttp.test_utils import make_mocked_request
from aiohttp.web import ContentCoding, Response, StreamResponse, json_response
def make_request(method, path, headers=CIMultiDict(),
version=HttpVersion11, **kwargs):
app = kwargs.pop('app', None) or mock.Mock()
app._debug = False
app.on_response_prepare = signals.Signal(app)
protocol = kwargs.pop('protocol', None) or mock.Mock()
return make_mocked_request(method, path, headers,
version=version, protocol=protocol,
app=app, **kwargs)
@pytest.yield_fixture
def buf():
return bytearray()
@pytest.yield_fixture
def writer(buf):
writer = mock.Mock()
def acquire(cb):
cb(writer.transport)
def buffer_data(chunk):
buf.extend(chunk)
def write(chunk):
buf.extend(chunk)
def write_headers(status_line, headers):
headers = status_line + ''.join(
[k + ': ' + v + '\r\n' for k, v in headers.items()])
headers = headers.encode('utf-8') + b'\r\n'
buf.extend(headers)
@asyncio.coroutine
def write_eof(chunk=b''):
buf.extend(chunk)
writer.acquire.side_effect = acquire
writer.transport.write.side_effect = write
writer.write.side_effect = write
writer.write_eof.side_effect = write_eof
writer.write_headers.side_effect = write_headers
writer.buffer_data.side_effect = buffer_data
writer.drain.return_value = ()
return writer
def test_stream_response_ctor():
resp = StreamResponse()
assert 200 == resp.status
assert resp.keep_alive is None
assert resp.task is None
req = mock.Mock()
resp._req = req
assert resp.task is req.task
def test_content_length():
resp = StreamResponse()
assert resp.content_length is None
def test_content_length_setter():
resp = StreamResponse()
resp.content_length = 234
assert 234 == resp.content_length
def test_drop_content_length_header_on_setting_len_to_None():
resp = StreamResponse()
resp.content_length = 1
assert "1" == resp.headers['Content-Length']
resp.content_length = None
assert 'Content-Length' not in resp.headers
def test_set_content_length_to_None_on_non_set():
resp = StreamResponse()
resp.content_length = None
assert 'Content-Length' not in resp.headers
resp.content_length = None
assert 'Content-Length' not in resp.headers
def test_setting_content_type():
resp = StreamResponse()
resp.content_type = 'text/html'
assert 'text/html' == resp.headers['content-type']
def test_setting_charset():
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = 'koi8-r'
assert 'text/html; charset=koi8-r' == resp.headers['content-type']
def test_default_charset():
resp = StreamResponse()
assert resp.charset is None
def test_reset_charset():
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = None
assert resp.charset is None
def test_reset_charset_after_setting():
resp = StreamResponse()
resp.content_type = 'text/html'
resp.charset = 'koi8-r'
resp.charset = None
assert resp.charset is None
def test_charset_without_content_type():
resp = StreamResponse()
with pytest.raises(RuntimeError):
resp.charset = 'koi8-r'
def test_last_modified_initial():
resp = StreamResponse()
assert resp.last_modified is None
def test_last_modified_string():
resp = StreamResponse()
dt = datetime.datetime(1990, 1, 2, 3, 4, 5, 0, datetime.timezone.utc)
resp.last_modified = 'Mon, 2 Jan 1990 03:04:05 GMT'
assert resp.last_modified == dt
def test_last_modified_timestamp():
resp = StreamResponse()
dt = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, datetime.timezone.utc)
resp.last_modified = 0
assert resp.last_modified == dt
resp.last_modified = 0.0
assert resp.last_modified == dt
def test_last_modified_datetime():
resp = StreamResponse()
dt = datetime.datetime(2001, 2, 3, 4, 5, 6, 0, datetime.timezone.utc)
resp.last_modified = dt
assert resp.last_modified == dt
def test_last_modified_reset():
resp = StreamResponse()
resp.last_modified = 0
resp.last_modified = None
assert resp.last_modified is None
@asyncio.coroutine
def test_start():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert resp.keep_alive is None
msg = yield from resp.prepare(req)
assert msg.write_headers.called
msg2 = yield from resp.prepare(req)
assert msg is msg2
assert resp.keep_alive
req2 = make_request('GET', '/')
# with pytest.raises(RuntimeError):
msg3 = yield from resp.prepare(req2)
assert msg is msg3
@asyncio.coroutine
def test_chunked_encoding():
req = make_request('GET', '/')
resp = StreamResponse()
assert not resp.chunked
resp.enable_chunked_encoding()
assert resp.chunked
msg = yield from resp.prepare(req)
assert msg.chunked
@asyncio.coroutine
def test_chunk_size():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert not resp.chunked
resp.enable_chunked_encoding(chunk_size=8192)
assert resp.chunked
msg = yield from resp.prepare(req)
assert msg.chunked
assert msg.enable_chunking.called
assert msg.filter is not None
@asyncio.coroutine
def test_chunked_encoding_forbidden_for_http_10():
req = make_request('GET', '/', version=HttpVersion10)
resp = StreamResponse()
resp.enable_chunked_encoding()
with pytest.raises(RuntimeError) as ctx:
yield from resp.prepare(req)
assert re.match("Using chunked encoding is forbidden for HTTP/1.0",
str(ctx.value))
@asyncio.coroutine
def test_compression_no_accept():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert not resp.chunked
assert not resp.compression
resp.enable_compression()
assert resp.compression
msg = yield from resp.prepare(req)
assert not msg.enable_compression.called
@asyncio.coroutine
def test_force_compression_no_accept_backwards_compat():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert not resp.chunked
assert not resp.compression
resp.enable_compression(force=True)
assert resp.compression
msg = yield from resp.prepare(req)
assert msg.enable_compression.called
assert msg.filter is not None
@asyncio.coroutine
def test_force_compression_false_backwards_compat():
req = make_request('GET', '/', payload_writer=mock.Mock())
resp = StreamResponse()
assert not resp.compression
resp.enable_compression(force=False)
assert resp.compression
msg = yield from resp.prepare(req)
assert not msg.enable_compression.called
@asyncio.coroutine
def test_compression_default_coding():
req = make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
assert not resp.chunked
assert not resp.compression
resp.enable_compression()
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('deflate')
assert 'deflate' == resp.headers.get(hdrs.CONTENT_ENCODING)
assert msg.filter is not None
@asyncio.coroutine
def test_force_compression_deflate():
req = make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
resp.enable_compression(ContentCoding.deflate)
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('deflate')
assert 'deflate' == resp.headers.get(hdrs.CONTENT_ENCODING)
@asyncio.coroutine
def test_force_compression_no_accept_deflate():
req = make_request('GET', '/')
resp = StreamResponse()
resp.enable_compression(ContentCoding.deflate)
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('deflate')
assert 'deflate' == resp.headers.get(hdrs.CONTENT_ENCODING)
@asyncio.coroutine
def test_force_compression_gzip():
req = make_request(
'GET', '/',
headers=CIMultiDict({hdrs.ACCEPT_ENCODING: 'gzip, deflate'}))
resp = StreamResponse()
resp.enable_compression(ContentCoding.gzip)
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('gzip')
assert 'gzip' == resp.headers.get(hdrs.CONTENT_ENCODING)
@asyncio.coroutine
def test_force_compression_no_accept_gzip():
req = make_request('GET', '/')
resp = StreamResponse()
resp.enable_compression(ContentCoding.gzip)
assert resp.compression
msg = yield from resp.prepare(req)
msg.enable_compression.assert_called_with('gzip')
assert 'gzip' == resp.headers.get(hdrs.CONTENT_ENCODING)
@asyncio.coroutine
def test_delete_content_length_if_compression_enabled():
req = make_request('GET', '/')
resp = Response(body=b'answer')
resp.enable_compression(ContentCoding.gzip)
yield from resp.prepare(req)
assert resp.content_length is None
@asyncio.coroutine
def test_write_non_byteish():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with pytest.raises(AssertionError):
resp.write(123)
def test_write_before_start():
resp = StreamResponse()
with pytest.raises(RuntimeError):
resp.write(b'data')
@asyncio.coroutine
def test_cannot_write_after_eof():
resp = StreamResponse()
writer = mock.Mock()
resp_impl = yield from resp.prepare(
make_request('GET', '/', writer=writer))
resp_impl.write_eof = mock.Mock()
resp_impl.write_eof.return_value = ()
resp.write(b'data')
yield from resp.write_eof()
writer.write.reset_mock()
with pytest.raises(RuntimeError):
resp.write(b'next data')
assert not writer.write.called
@asyncio.coroutine
def test___repr___after_eof():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
assert resp.prepared
resp.write(b'data')
yield from resp.write_eof()
assert not resp.prepared
resp_repr = repr(resp)
assert resp_repr == '<StreamResponse OK eof>'
@asyncio.coroutine
def test_cannot_write_eof_before_headers():
resp = StreamResponse()
with pytest.raises(AssertionError):
yield from resp.write_eof()
@asyncio.coroutine
def test_cannot_write_eof_twice():
resp = StreamResponse()
writer = mock.Mock()
resp_impl = yield from resp.prepare(make_request('GET', '/'))
resp_impl.write = mock.Mock()
resp_impl.write_eof = mock.Mock()
resp_impl.write_eof.return_value = ()
resp.write(b'data')
assert resp_impl.write.called
yield from resp.write_eof()
resp_impl.write.reset_mock()
yield from resp.write_eof()
assert not writer.write.called
@asyncio.coroutine
def _test_write_returns_drain():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with mock.patch('aiohttp.http_writer.noop') as noop:
assert noop == resp.write(b'data')
@asyncio.coroutine
def _test_write_returns_empty_tuple_on_empty_data():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with mock.patch('aiohttp.http_writer.noop') as noop:
assert noop.return_value == resp.write(b'')
def test_force_close():
resp = StreamResponse()
assert resp.keep_alive is None
resp.force_close()
assert resp.keep_alive is False
@asyncio.coroutine
def test_response_output_length():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with pytest.warns(DeprecationWarning):
assert resp.output_length
def test_response_cookies():
resp = StreamResponse()
assert resp.cookies == {}
assert str(resp.cookies) == ''
resp.set_cookie('name', 'value')
assert str(resp.cookies) == 'Set-Cookie: name=value; Path=/'
resp.set_cookie('name', 'other_value')
assert str(resp.cookies) == 'Set-Cookie: name=other_value; Path=/'
resp.cookies['name'] = 'another_other_value'
resp.cookies['name']['max-age'] = 10
assert (str(resp.cookies) ==
'Set-Cookie: name=another_other_value; Max-Age=10; Path=/')
resp.del_cookie('name')
expected = ('Set-Cookie: name=("")?; '
'expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/')
assert re.match(expected, str(resp.cookies))
resp.set_cookie('name', 'value', domain='local.host')
expected = 'Set-Cookie: name=value; Domain=local.host; Path=/'
assert str(resp.cookies) == expected
def test_response_cookie_path():
resp = StreamResponse()
assert resp.cookies == {}
resp.set_cookie('name', 'value', path='/some/path')
assert str(resp.cookies) == 'Set-Cookie: name=value; Path=/some/path'
resp.set_cookie('name', 'value', expires='123')
assert (str(resp.cookies) ==
'Set-Cookie: name=value; expires=123; Path=/')
resp.set_cookie('name', 'value', domain='example.com',
path='/home', expires='123', max_age='10',
secure=True, httponly=True, version='2.0')
assert (str(resp.cookies).lower() == 'set-cookie: name=value; '
'domain=example.com; '
'expires=123; '
'httponly; '
'max-age=10; '
'path=/home; '
'secure; '
'version=2.0')
def test_response_cookie__issue_del_cookie():
resp = StreamResponse()
assert resp.cookies == {}
assert str(resp.cookies) == ''
resp.del_cookie('name')
expected = ('Set-Cookie: name=("")?; '
'expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/')
assert re.match(expected, str(resp.cookies))
def test_cookie_set_after_del():
resp = StreamResponse()
resp.del_cookie('name')
resp.set_cookie('name', 'val')
# check for Max-Age dropped
expected = 'Set-Cookie: name=val; Path=/'
assert str(resp.cookies) == expected
def test_set_status_with_reason():
resp = StreamResponse()
resp.set_status(200, "Everithing is fine!")
assert 200 == resp.status
assert "Everithing is fine!" == resp.reason
@asyncio.coroutine
def test_start_force_close():
req = make_request('GET', '/')
resp = StreamResponse()
resp.force_close()
assert not resp.keep_alive
yield from resp.prepare(req)
assert not resp.keep_alive
@asyncio.coroutine
def test___repr__():
req = make_request('GET', '/path/to')
resp = StreamResponse(reason=301)
yield from resp.prepare(req)
assert "<StreamResponse 301 GET /path/to >" == repr(resp)
def test___repr___not_prepared():
resp = StreamResponse(reason=301)
assert "<StreamResponse 301 not prepared>" == repr(resp)
@asyncio.coroutine
def test_keep_alive_http10_default():
req = make_request('GET', '/', version=HttpVersion10)
resp = StreamResponse()
yield from resp.prepare(req)
assert not resp.keep_alive
@asyncio.coroutine
def test_keep_alive_http10_switched_on():
headers = CIMultiDict(Connection='keep-alive')
req = make_request('GET', '/', version=HttpVersion10, headers=headers)
req._message = req._message._replace(should_close=False)
resp = StreamResponse()
yield from resp.prepare(req)
assert resp.keep_alive
@asyncio.coroutine
def test_keep_alive_http09():
headers = CIMultiDict(Connection='keep-alive')
req = make_request('GET', '/', version=HttpVersion(0, 9), headers=headers)
resp = StreamResponse()
yield from resp.prepare(req)
assert not resp.keep_alive
def test_prepare_twice():
req = make_request('GET', '/')
resp = StreamResponse()
impl1 = yield from resp.prepare(req)
impl2 = yield from resp.prepare(req)
assert impl1 is impl2
@asyncio.coroutine
def test_prepare_calls_signal():
app = mock.Mock()
req = make_request('GET', '/', app=app)
resp = StreamResponse()
sig = mock.Mock()
app.on_response_prepare.append(sig)
yield from resp.prepare(req)
sig.assert_called_with(req, resp)
def test_get_nodelay_unprepared():
resp = StreamResponse()
with pytest.raises(AssertionError):
resp.tcp_nodelay
def test_set_nodelay_unprepared():
resp = StreamResponse()
with pytest.raises(AssertionError):
resp.set_tcp_nodelay(True)
@asyncio.coroutine
def test_get_nodelay_prepared():
resp = StreamResponse()
writer = mock.Mock()
writer.tcp_nodelay = False
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
assert not resp.tcp_nodelay
def test_set_nodelay_prepared():
resp = StreamResponse()
writer = mock.Mock()
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
resp.set_tcp_nodelay(True)
writer.set_tcp_nodelay.assert_called_with(True)
def test_get_cork_unprepared():
resp = StreamResponse()
with pytest.raises(AssertionError):
resp.tcp_cork
def test_set_cork_unprepared():
resp = StreamResponse()
with pytest.raises(AssertionError):
resp.set_tcp_cork(True)
@asyncio.coroutine
def test_get_cork_prepared():
resp = StreamResponse()
writer = mock.Mock()
writer.tcp_cork = False
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
assert not resp.tcp_cork
def test_set_cork_prepared():
resp = StreamResponse()
writer = mock.Mock()
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
resp.set_tcp_cork(True)
writer.set_tcp_cork.assert_called_with(True)
# Response class
def test_response_ctor():
resp = Response()
assert 200 == resp.status
assert 'OK' == resp.reason
assert resp.body is None
assert resp.content_length == 0
assert 'CONTENT-LENGTH' not in resp.headers
def test_ctor_with_headers_and_status():
resp = Response(body=b'body', status=201,
headers={'Age': '12', 'DATE': 'date'})
assert 201 == resp.status
assert b'body' == resp.body
assert resp.headers['AGE'] == '12'
resp._start(mock.Mock(version=HttpVersion11))
assert 4 == resp.content_length
assert resp.headers['CONTENT-LENGTH'] == '4'
def test_ctor_content_type():
resp = Response(content_type='application/json')
assert 200 == resp.status
assert 'OK' == resp.reason
assert 0 == resp.content_length
assert (CIMultiDict([('CONTENT-TYPE', 'application/json')]) ==
resp.headers)
def test_ctor_text_body_combined():
with pytest.raises(ValueError):
Response(body=b'123', text='test text')
def test_ctor_text():
resp = Response(text='test text')
assert 200 == resp.status
assert 'OK' == resp.reason
assert 9 == resp.content_length
assert (CIMultiDict(
[('CONTENT-TYPE', 'text/plain; charset=utf-8')]) == resp.headers)
assert resp.body == b'test text'
assert resp.text == 'test text'
resp.headers['DATE'] = 'date'
resp._start(mock.Mock(version=HttpVersion11))
assert resp.headers['CONTENT-LENGTH'] == '9'
def test_ctor_charset():
resp = Response(text='текст', charset='koi8-r')
assert 'текст'.encode('koi8-r') == resp.body
assert 'koi8-r' == resp.charset
def test_ctor_charset_default_utf8():
resp = Response(text='test test', charset=None)
assert 'utf-8' == resp.charset
def test_ctor_charset_in_content_type():
with pytest.raises(ValueError):
Response(text='test test', content_type='text/plain; charset=utf-8')
def test_ctor_charset_without_text():
resp = Response(content_type='text/plain', charset='koi8-r')
assert 'koi8-r' == resp.charset
def test_ctor_both_content_type_param_and_header_with_text():
with pytest.raises(ValueError):
Response(headers={'Content-Type': 'application/json'},
content_type='text/html', text='text')
def test_ctor_both_charset_param_and_header_with_text():
with pytest.raises(ValueError):
Response(headers={'Content-Type': 'application/json'},
charset='koi8-r', text='text')
def test_ctor_both_content_type_param_and_header():
with pytest.raises(ValueError):
Response(headers={'Content-Type': 'application/json'},
content_type='text/html')
def test_ctor_both_charset_param_and_header():
with pytest.raises(ValueError):
Response(headers={'Content-Type': 'application/json'},
charset='koi8-r')
def test_assign_nonbyteish_body():
resp = Response(body=b'data')
with pytest.raises(ValueError):
resp.body = 123
assert b'data' == resp.body
assert 4 == resp.content_length
resp.headers['DATE'] = 'date'
resp._start(mock.Mock(version=HttpVersion11))
assert resp.headers['CONTENT-LENGTH'] == '4'
assert 4 == resp.content_length
def test_assign_nonstr_text():
resp = Response(text='test')
with pytest.raises(AssertionError):
resp.text = b'123'
assert b'test' == resp.body
assert 4 == resp.content_length
def test_response_set_content_length():
resp = Response()
with pytest.raises(RuntimeError):
resp.content_length = 1
@asyncio.coroutine
def test_send_headers_for_empty_body(buf, writer):
req = make_request('GET', '/', payload_writer=writer)
resp = Response()
yield from resp.prepare(req)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match('HTTP/1.1 200 OK\r\n'
'Content-Length: 0\r\n'
'Content-Type: application/octet-stream\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n', txt)
@asyncio.coroutine
def test_render_with_body(buf, writer):
req = make_request('GET', '/', payload_writer=writer)
resp = Response(body=b'data')
yield from resp.prepare(req)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match('HTTP/1.1 200 OK\r\n'
'Content-Length: 4\r\n'
'Content-Type: application/octet-stream\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n'
'data', txt)
@asyncio.coroutine
def test_send_set_cookie_header(buf, writer):
resp = Response()
resp.cookies['name'] = 'value'
req = make_request('GET', '/', payload_writer=writer)
yield from resp.prepare(req)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match('HTTP/1.1 200 OK\r\n'
'Content-Length: 0\r\n'
'Set-Cookie: name=value\r\n'
'Content-Type: application/octet-stream\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n', txt)
def test_set_text_with_content_type():
resp = Response()
resp.content_type = "text/html"
resp.text = "text"
assert "text" == resp.text
assert b"text" == resp.body
assert "text/html" == resp.content_type
def test_set_text_with_charset():
resp = Response()
resp.content_type = 'text/plain'
resp.charset = "KOI8-R"
resp.text = "текст"
assert "текст" == resp.text
assert "текст".encode('koi8-r') == resp.body
assert "koi8-r" == resp.charset
def test_default_content_type_in_stream_response():
resp = StreamResponse()
assert resp.content_type == 'application/octet-stream'
def test_default_content_type_in_response():
resp = Response()
assert resp.content_type == 'application/octet-stream'
def test_content_type_with_set_text():
resp = Response(text='text')
assert resp.content_type == 'text/plain'
def test_content_type_with_set_body():
resp = Response(body=b'body')
assert resp.content_type == 'application/octet-stream'
def test_started_when_not_started():
resp = StreamResponse()
assert not resp.prepared
@asyncio.coroutine
def test_started_when_started():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
assert resp.prepared
@asyncio.coroutine
def test_drain_before_start():
resp = StreamResponse()
with pytest.raises(AssertionError):
yield from resp.drain()
@asyncio.coroutine
def test_changing_status_after_prepare_raises():
resp = StreamResponse()
yield from resp.prepare(make_request('GET', '/'))
with pytest.raises(AssertionError):
resp.set_status(400)
def test_nonstr_text_in_ctor():
with pytest.raises(TypeError):
Response(text=b'data')
def test_text_in_ctor_with_content_type():
resp = Response(text='data', content_type='text/html')
assert 'data' == resp.text
assert 'text/html' == resp.content_type
def test_text_in_ctor_with_content_type_header():
resp = Response(text='текст',
headers={'Content-Type': 'text/html; charset=koi8-r'})
assert 'текст'.encode('koi8-r') == resp.body
assert 'text/html' == resp.content_type
assert 'koi8-r' == resp.charset
def test_text_in_ctor_with_content_type_header_multidict():
headers = CIMultiDict({'Content-Type': 'text/html; charset=koi8-r'})
resp = Response(text='текст',
headers=headers)
assert 'текст'.encode('koi8-r') == resp.body
assert 'text/html' == resp.content_type
assert 'koi8-r' == resp.charset
def test_body_in_ctor_with_content_type_header_multidict():
headers = CIMultiDict({'Content-Type': 'text/html; charset=koi8-r'})
resp = Response(body='текст'.encode('koi8-r'),
headers=headers)
assert 'текст'.encode('koi8-r') == resp.body
assert 'text/html' == resp.content_type
assert 'koi8-r' == resp.charset
def test_text_with_empty_payload():
resp = Response(status=200)
assert resp.body is None
assert resp.text is None
def test_response_with_content_length_header_without_body():
resp = Response(headers={'Content-Length': 123})
assert resp.content_length == 123
class TestJSONResponse:
def test_content_type_is_application_json_by_default(self):
resp = json_response('')
assert 'application/json' == resp.content_type
def test_passing_text_only(self):
resp = json_response(text=json.dumps('jaysawn'))
assert resp.text == json.dumps('jaysawn')
def test_data_and_text_raises_value_error(self):
with pytest.raises(ValueError) as excinfo:
json_response(data='foo', text='bar')
expected_message = (
'only one of data, text, or body should be specified'
)
assert expected_message == excinfo.value.args[0]
def test_data_and_body_raises_value_error(self):
with pytest.raises(ValueError) as excinfo:
json_response(data='foo', body=b'bar')
expected_message = (
'only one of data, text, or body should be specified'
)
assert expected_message == excinfo.value.args[0]
def test_text_is_json_encoded(self):
resp = json_response({'foo': 42})
assert json.dumps({'foo': 42}) == resp.text
def test_content_type_is_overrideable(self):
resp = json_response({'foo': 42},
content_type='application/vnd.json+api')
assert 'application/vnd.json+api' == resp.content_type
|
{
"content_hash": "9fca9f3b8cd87e69b71777d3dfd3dbf3",
"timestamp": "",
"source": "github",
"line_count": 1041,
"max_line_length": 78,
"avg_line_length": 26.6926032660903,
"alnum_prop": 0.6487926008565157,
"repo_name": "AraHaanOrg/aiohttp",
"id": "6a407cbdcd110831b051a61ffe14e48fa1efc0f1",
"size": "27842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_web_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Makefile",
"bytes": "2817"
},
{
"name": "Python",
"bytes": "1233163"
},
{
"name": "Shell",
"bytes": "1759"
}
],
"symlink_target": ""
}
|
import time
from queue import Queue
from . import Peer
from threading import Thread
from pubsub import pub
class PeerSeeker(Thread):
def __init__(self, new_peers_queue, torrent, peer_manager):
Thread.__init__(self)
self.stop_requested = False
self.peer_manager = peer_manager
self.new_peers_queue = new_peers_queue
self.torrent = torrent
self.reset_time = time.time()
def request_stop(self):
self.stop_requested = True
def run(self):
failed_peers = []
while not self.stop_requested:
# reset failed peers so we can try again
if (time.time() - self.reset_time) > 10:
failed_peers = []
self.reset_time = time.time()
try:
peer = self.new_peers_queue.get(timeout=1)
except Exception:
continue
peer = Peer.Peer(self.torrent, peer[0], peer[1])
extant_peers = [(peer.ip, peer.port) for peer in self.peer_manager.peers]
if (peer.ip, peer.port) in failed_peers or (peer.ip, peer.port) in extant_peers:
continue
if peer.connect(5):
pub.sendMessage('PeerManager.new_peer', peer=peer)
else:
failed_peers.append((peer.ip, peer.port))
|
{
"content_hash": "3ffff7b4c2d4f5c3ff487221152eb72b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 92,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.5680963130173062,
"repo_name": "AcademicTorrents/python-r-api",
"id": "cd3bc3712b9932ae4c15c9f7d325a79af14e5d8b",
"size": "1329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "academictorrents/PeerSeeker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "56024"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import re as _re
from pyzdde.utils.pyzddeutils import _prettifyCodeDesc, _boldifyText, _prettifyText, _print_mod
class _Buttons(object):
"""ZEMAX 3-letter buttons. Note ZPL Macro codes are not included.
The list of button codes were compiled from ZEMAX Version 13.0404, 2013.
"""
button_code = {
"Off": "None",
"ABg": "ABg Data Catalog",
"Agm": "Athermal Glass Map",
"Bac": "Backup To Archive File",
"Bfv": "Beam File Viewer",
"C31": "Src color chart 1931",
"C76": "Src color chart 1976",
"Caa": "Coating, Abs. vs. Angle",
"Car": "Cardinal Points",
"Cas": "Coat All Surfaces",
"Caw": "Coating, Abs. vs. Wavelength",
"Cca": "Convert to Circular Apertures",
"Cda": "Coating, Dia. vs. Angle",
"Cdw": "Coating, DIa. vs. Angle",
"Cfa": "Convert to Floating Apertures",
"Cfm": "Convert to Maximum Apertures",
"Cfo": "Convert Format",
"Cfs": "Chromatic Focal Shift",
"Cgl": "Convert Global To Local",
"Chk": "System Check",
"Clg": "Convert Local to Global",
"Cls": "Coating List",
"Cna": "Coating, Ret. vs. Angle",
"Cng": "Convert to NSC Group",
"Cnw": "Coating, Ret. vs. Wavelength",
"Coa": "Convert Asphere Type",
"Con": "Conjugate Surface Analysis",
"Cpa": "Coating, Phase vs. Angle",
"Cpw": "Coating, Phase vs. Wavelength",
"Cra": "Coating, Refl vs. Angle",
"Crw": "Coating, Refl. vs. Wavelength",
"Csd": "NSC concatenate spectral source files",
"Csf": "NSC convert to spectral source file",
"Cta": "Coating, Tran. vs. Angle",
"Ctw": "Coating, Tran. vs. Wavelength",
"Dbl": "Make Double Pass",
"Dim": "Partially Coherent Image Analysis",
"Dip": "Biocular Dipvergence/Convergence",
"Dis": "Dispersion Plot",
"Drs": "NSC download radiant source",
"Dvl": "Dispersion vs. Wavelength Plot",
"Dvr": "Detector Viewer",
"Eca": "Explode CAD assembly",
"Ect": "Edit Coating",
"Ecp": "Explode Creo parametric assembly",
"EDE": "Extra Data Editor",
"Eec": "Export Encrypted Coating",
"Eia": "Explode inventor assembly",
"Ele": "ZEMAX Element Drawing",
"Enc": "Diff Encircled Energy",
"Esa": "Explode solidworks assembly",
"Ext": "Exit",
"Fba": "Find Best Asphere",
"Fcd": "Field Curv/Distorion",
"Fcl": "Fiber Coupling",
"Fie": "Field Data",
"Fld": "Add Fold Mirror",
"Flx": "Delete Fold Mirror",
"Fmm": "FFT MTF Map",
"Foa": "Foucault Analysis",
"Foo": "Footprint Analysis",
"Fov": "Biocular Field of View Analysis",
"Fps": "FFT PSF",
"Fvw": "Flux vs. Wavelength",
"Gbp": "Parax Gaussian Beam",
"Gbs": "Skew Gaussian Beam",
"Gcp": "Glass Compare",
"Gee": "Geometric Encircled Energy",
"Gen": "General Lens Data",
"Gft": "Glass Fit",
"Gho": "Ghost Focus",
"Gip": "Grin Profile",
"Gla": "Glass Catalog",
"Glb": "Global Optimization",
"Gmf": "Generate MAT file",
"Gmm": "Geometric MTF Map",
"Gmp": "Glass Map",
"Gpr": "Gradium Profile",
"Grd": "Grid Distortion",
"Grs": "NSC generate radiant source",
"Gst": "Glass Substitution Template",
"Gtf": "Geometric MTF",
"Gvf": "Geometric MTF vs. Field",
"Ham": "Hammer Optimization",
"Hcs": "Huygens PSF Cross Section",
"Hlp": "Help",
"Hmf": "Huygens MTF",
"Hmh": "Huygens MTF vs Field",
"Hps": "Huygens PSF",
"Hsm": "Huygens Surface MTF",
"Htf": "Huygens Through Focus MTF",
"ISO": "ISO Element Drawing",
"Ibm": "Geometric Bitmap Image Analysis",
"Igs": "Export IGES File",
"Iht": "Incident Angle vs. Image Height",
"Ilf": "Illumination Surface",
"Ils": "Illumination Scan",
"Ima": "Geometric Image Analysis",
"Imv": "IMA/BIM File Viewer",
"Ins": "Insert Lens",
"Int": "Interferogram",
"Jbv": "Bitmap File Viewer",
"L3d": "3D Layout",
"L3n": "NSC 3D Layout",
"LDE": "Lens Data Editor",
"LSn": "NSC Shaded Model Layout",
"Lac": "Last Configuration",
"Lat": "Lateral Color",
"Lay": "2D Layout",
"Len": "Lens Search",
"Lin": "Line/Edge Response",
"Lok": "Lock All Windows",
"Lon": "Longitudinal Aberration",
"Lsa": "Light Source Analysis",
"Lsf": "FFT Line/Edge Spread",
"Lsh": "Shaded Model Layout",
"Lsm": "Solid Model Layout",
"Ltr": "NSC lighting trace",
"Lwf": "Wireframe Layout",
"MCE": "Multi-Config Editor",
"MFE": "Merit Function Editor",
"Mfl": "Merit Function List",
"Mfo": "Make Focal",
"Mtf": "Modulation Transfer Function (FFT MTF)",
"Mth": "MTF vs. Field",
"NCE": "Non-Sequential Editor",
"New": "New File",
"Nxc": "Next Configuration",
"Obv": "NSC Object Viewer",
"Opd": "Opd Fan",
"Ope": "Open File",
"Opt": "Optimization",
"Pab": "Pupil Aberration Fan",
"Pal": "Power Field Map",
"Pat": "ZRD Path Analysis",
"Pci": "Partially Coherent Image Analysis",
"Pcs": "PSF Cross Section",
"Per": "Performance Test",
"Pha": "Pol. Phase Aberration",
"Pmp": "Pol. Pupil Map",
"Pol": "Pol. Ray Trace",
"Pop": "Physical Optics Propagation",
"Ppm": "Power Pupil Map",
"Pre": "Prescription Data",
"Prf": "Preferences",
"Ptf": "Pol. Transmission Fan",
"Pvr": "CAD part viewer",
"Pzd": "Playback ZRD on Detectors",
"Qad": "Quick Adjust",
"Qfo": "Quick Focus",
"Raa": "Remove All Apertures",
"Ray": "Ray Fan",
"Rcf": "Reload Coating File",
"Rda": "NSC reverse radiance analysis",
"Rdb": "Ray Database",
"Rdw": "NSC roadway lighting analysis",
"Red": "Redo",
"Rel": "Relative Illumination",
"Res": "Restore From Archive File",
"Rev": "Reverse Elements",
"Rfm": "RMS Field Map",
"Rg4": "New Report Graphic 4",
"Rg6": "New Report Graphic 6",
"Rmf": "RMS vs. Focus",
"Rml": "Refresh Macro List",
"Rms": "RMS vs. Field",
"Rmw": "RMS vs. Wavelength",
"Rtc": "Ray Trace Control",
"Rtr": "Ray Trace",
"Rva": "Remove Variables",
"Rxl": "Refresh Extensions List",
"Sag": "Sag Table",
"Sas": "Save As",
"Sav": "Save File",
"Sca": "Scale Lens",
"Scc": "Surface Curvature Cross Section",
"Scv": "Surface Curvature",
"Sdi": "Seidel Diagram",
"Sdv": "Src directivity",
"Sei": "Seidel Coefficients",
"Sff": "Full Field Spot",
"Sfv": "Scatter Function Viewer",
"Sim": "Image Simulation",
"Sld": "Slider",
"Slm": "Stock lens matching",
"Sma": "Spot Matrix",
"Smc": "Spot Matrix Config",
"Smf": "Surface MTF",
"Spc": "Surface Phase Cross Section",
"Spj": "Src projection",
"Spo": "Src polar",
"Spt": "Spot Diagram",
"Spv": "Scatter Polar Plot",
"Srp": "Surface Phase",
"Srs": "Surface Sag",
"Ssc": "Surface Sag Cross Section",
"Ssg": "System Summary Graphic",
"Srv": "Src rms viewer",
"Ssp": "src spectrum",
"Stf": "Though Focus Spot",
"Sti": "NSC convert SDF to IES",
"Sur": "Surface Data",
"Sys": "System Data",
"TDE": "Tolerance Data Editor",
"Tde": "Tilt/Decenter Elements",
"Tfg": "Through Focus GTF",
"Tfm": "Through Focus MTF",
"Tls": "Tolerance List",
"Tol": "Tolerancing",
"Tpf": "Test Plate Fit",
"Tpl": "Test Plate Lists",
"Tra": "Pol. Transmission",
"Trw": "Transmission vs. Wavelength",
"Tsm": "Tolerance Summary",
"Un2": "Universal Plot 2D",
"Und": "Undo",
"Uni": "Universal Plot",
"Unl": "Unlock All Windows",
"Upa": "Update All",
"Upd": "Update",
"Vig": "Vignetting Plot",
"Vop": "Visual Optimization",
"Vra": "Make All Radii Variable",
"Vth": "Make All Thickness Variable",
"Wav": "Wavelength Data",
"Wfm": "Wavefront Map",
"Xdi": "Extended Diffraction Image Ana",
"Xis": "Export IGES/STEP/SAT FIle",
"Xse": "Extended Source Encircled",
"Yni": "YNI Contributions",
"Yyb": "Y-Ybar",
"Zat": "Zernike Annular Terms",
"Zbb": "Export Zemax Black Box Data",
"Zex": "ZEMAX Extensions",
"Zfr": "Zernike Fringe Terms",
"Zpd": "Zemax Part Designer",
"Zpl": "Edit/Run ZPL Macros",
"Zst": "Zernike Standard Terms",
"Zvf": "Zernike Coefficients vs. Field"
}
def showZButtonList():
"""List all the button codes
showZButtonList()->None (the button codes are printed on screen)
"""
print("Listing all ZEMAX Button codes:")
for code, description in sorted(_Buttons.button_code.items()):
_print_mod(_prettifyCodeDesc(code,description))
_print_mod(_boldifyText("Total number of buttons = ",str(len(_Buttons.button_code))))
def getZButtonCount():
"""Returns the total number of buttons
getZButtonCount()->count
"""
return len(_Buttons.button_code)
def isZButtonCode(buttonCode):
"""Returns True or False depending on whether the button code is a valid
button code.
isZButtonCode(buttonCode)->bool
Parameters
----------
buttonCode : (string) the 3-letter case-sensitive button code to validate
Returns
-------
bool : True if valid button code, False otherwise
"""
return str(buttonCode) in _Buttons.button_code.keys()
def showZButtonDescription(buttonCode):
"""Get a short description about a button code.
showZButtonDescription(buttonCode)->description
Parameters
----------
buttonCode : (string) a 3-letter button code
Returns
-------
description : a shot description about the button code function/analysis type.
"""
if isZButtonCode(str(buttonCode)):
_print_mod(_prettifyText(str(buttonCode), " is a ZEMAX button code",
color0='magenta',color1='black'))
_print_mod(_prettifyText("Description: ", _Buttons.button_code[str(buttonCode)],
color0='blue',color1='black'))
else:
print("{} is NOT a valid ZEMAX button code.".format(str(buttonCode)))
def findZButtonCode(keywords):
"""Find Zemax button codes using specific keywords of interest.
findZButtonCode("keyword#1 [, keyword#2, keyword#3, ...]")->searchResult
Parameters
----------
keywords : a string containing a list of comma separated keywords.
Example
-------
>>> zb.findZButtonCode("Zernike")
[Zst] Zernike Standard Terms
[Zvf] Zernike Coefficients vs. Field
[Zfr] Zernike Fringe Terms
[Zat] Zernike Annular Terms
Found 4 Button codes.
>>> zb.findZButtonCode("Fan")
[Opd] Opd Fan
[Ray] Ray Fan
[Ptf] Pol. Transmission Fan
[Pab] Pupil Aberration Fan
Found 4 Button codes.
"""
words2find = [words.strip() for words in keywords.split(",")]
previousFoundKeys = []
for button, description in _Buttons.button_code.items():
for kWord in words2find:
if __find(kWord,description):
_print_mod(_prettifyCodeDesc(button,description))
previousFoundKeys.append(button)
break # break the inner for loop
if previousFoundKeys:
_print_mod(_boldifyText("Found ", str(len(previousFoundKeys)),
" Button codes",'blue','red','blue'))
def __find(word2find,instring):
r = _re.compile(r'\b({0})s?\b'.format(word2find),flags=_re.IGNORECASE)
if r.search(instring):
return True
else:
return False
if __name__ == '__main__':
#Test showZButtonList()
showZButtonList()
#Test getZButtonCount()
print("Total number of buttons:",getZButtonCount())
#Test isZButtonCode()
print("'Pre' is a button code (True/False):", isZButtonCode('Pre'))
print("'Wav' is an button code (True/False):", isZButtonCode('Wav'))
print("'RANDOM' is a button code (True/False):", isZButtonCode('RANDOM'))
#Test showZOperandDescription()
showZButtonDescription('RANDOM')
showZButtonDescription('Vth')
|
{
"content_hash": "13eabf9d3e11f0a97584e19d94faaf28",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 95,
"avg_line_length": 32.59836065573771,
"alnum_prop": 0.5993630039393177,
"repo_name": "ng110/PyZDDE",
"id": "2d6f62d7e9de14100f05eaa149b2fd398414588b",
"size": "12422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyzdde/zcodes/zemaxbuttons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32698"
},
{
"name": "Python",
"bytes": "810549"
},
{
"name": "Zimpl",
"bytes": "1145"
}
],
"symlink_target": ""
}
|
import sys
import unittest
import glob
import os
import shutil
import pandas as pd
import numpy as np
from subprocess import call
from margin.utils import getFastaDictionary
SIGNALALIGN_ROOT = "../"
ZYMO_C_READS = SIGNALALIGN_ROOT + "tests/minion_test_reads/C/"
ZYMO_REFERENCE = SIGNALALIGN_ROOT + "tests/test_sequences/zymo_sequence.fasta"
def parse_alignment_full(alignment_file):
data = pd.read_table(alignment_file, usecols=(1, 2, 4, 5, 9, 12, 13),
dtype={'ref_pos': np.int64,
'ref_kmer': np.str,
'strand': np.str,
'event_index': np.int64,
'kmer': np.str,
'posterior_prob': np.float64,
'event_mean': np.float64},
header=None,
names=['ref_pos', 'ref_kmer', 'strand', 'event_index', 'kmer', 'posterior_prob', 'event_mean'])
return data
class LibTest(unittest.TestCase):
def test_signalAlign_library(self):
command = "./signalAlignLibTests"
result = call(command, shell=True, bufsize=-1, stdout=sys.stdout, stderr=sys.stderr)
self.assertTrue(result == 0, "signalAlign Library Tests Fail")
class signalAlignLibTests(unittest.TestCase):
def setUp(self):
self.work_dir = "./signalAlign_pylibTest/"
os.makedirs(self.work_dir)
def tearDown(self):
shutil.rmtree(self.work_dir)
class SignalAlignAlignmentTest(unittest.TestCase):
def setUp(self):
os.makedirs("./signalAlign_unittest/")
def tearDown(self):
shutil.rmtree("./signalAlign_unittest/")
def check_alignments(self, true_alignments, reads, reference, kmer_length, contig_name, extra_args=None):
def get_kmer(start):
return referece_sequence[start:start + kmer_length]
assert len(glob.glob(reads + "*.fast5")) > 0, "Didn't find zymo test MinION reads"
assert os.path.isfile(reference), "Didn't find zymo reference sequence"
alignment_command = "./runSignalAlign -d={reads} -r={ref} -smt=threeState -o={testDir} " \
"".format(reads=reads, ref=reference, testDir="./signalAlign_unittest/")
if extra_args is not None:
alignment_command += extra_args
null_output = open(os.devnull, 'w')
result = call(alignment_command, shell=True, bufsize=-1, stdout=null_output, stderr=null_output)
self.assertTrue(result == 0, "error running signalAlign alignments command was {}"
"".format(alignment_command))
test_alignments = glob.glob("./signalAlign_unittest/tempFiles_alignment/*.tsv")
referece_sequence = getFastaDictionary(reference)[contig_name]
self.assertTrue(len(test_alignments) == len(glob.glob(true_alignments + "*.tsv")),
"Didn't make all alignments got {got} should be {should}".format(got=len(test_alignments),
should=len(glob.glob(true_alignments + "*.tsv"))))
for alignment in test_alignments:
alignment_file = alignment.split("/")[-1]
expected = parse_alignment_full(true_alignments + alignment_file)
obs = parse_alignment_full(alignment)
self.assertTrue(len(obs) == len(expected))
for row in obs.itertuples():
ref_pos = row[1]
obs_kmer = row[2]
strand = row[3]
exp_kmer = get_kmer(ref_pos)
self.assertTrue(obs_kmer == exp_kmer, msg="kmer at index {idx} on strand {strand} is {obs} should be "
"{exp}, file {f}".format(idx=ref_pos,
strand=strand,
obs=obs_kmer,
exp=exp_kmer,
f=alignment))
def test_zymo_reads(self):
zymo_true_alignments = SIGNALALIGN_ROOT + "tests/test_alignments/zymo_C_test_alignments_sm3/" \
"tempFiles_alignment/"
self.check_alignments(true_alignments=zymo_true_alignments,
reads=ZYMO_C_READS,
reference=ZYMO_REFERENCE,
kmer_length=6,
contig_name="ZYMO",
extra_args="--2d ")
def test_pUC_r9_reads_5mer(self):
pUC_true_alignments = SIGNALALIGN_ROOT + "tests/test_alignments/pUC_5mer_tempFiles_alignment/"
pUC_reads = SIGNALALIGN_ROOT + "tests/minion_test_reads/pUC/"
pUC_reference = SIGNALALIGN_ROOT + "tests/test_sequences/pUC19_SspI.fa"
self.check_alignments(true_alignments=pUC_true_alignments,
reads=pUC_reads,
reference=pUC_reference,
kmer_length=5,
contig_name="pUC19",
extra_args="-T=../models/testModelR9_5mer_acegot_template.model "
"-C=../models/testModelR9_5mer_acegot_complement.model "
"--2d ")
def test_pUC_r9_reads_6mer(self):
pUC_true_alignments = SIGNALALIGN_ROOT + "tests/test_alignments/pUC_6mer_tempFiles_alignment/"
pUC_reads = SIGNALALIGN_ROOT + "tests/minion_test_reads/pUC/"
pUC_reference = SIGNALALIGN_ROOT + "tests/test_sequences/pUC19_SspI.fa"
self.check_alignments(true_alignments=pUC_true_alignments,
reads=pUC_reads,
reference=pUC_reference,
kmer_length=6,
contig_name="pUC19",
extra_args="--2d ")
def test_Ecoli1D_reads_5mer(self):
pUC_true_alignments = SIGNALALIGN_ROOT + "tests/test_alignments/ecoli1D_test_alignments_sm3/"
pUC_reads = SIGNALALIGN_ROOT + "tests/minion_test_reads/1D/"
pUC_reference = SIGNALALIGN_ROOT + "tests/test_sequences/E.coli_K12.fasta"
self.check_alignments(true_alignments=pUC_true_alignments,
reads=pUC_reads,
reference=pUC_reference,
kmer_length=5,
contig_name="gi_ecoli",
extra_args="-T=../models/testModelR9p4_5mer_acegt_template.model ")
class signalAlign_EM_test(unittest.TestCase):
def setUp(self):
os.makedirs("./signalAlign_unittest/")
def tearDown(self):
shutil.rmtree("./signalAlign_unittest/")
def test_EM(self):
em_command = "./trainModels run --config=../tests/trainModels-config.yaml"
null_output = open(os.devnull, 'w')
result = call(em_command, shell=True, bufsize=-1, stdout=null_output, stderr=null_output)
self.assertTrue(result == 0, "error running signalAlign alignments command was {}"
"".format(em_command))
def main():
testSuite = unittest.TestSuite()
testSuite.addTest(LibTest('test_signalAlign_library'))
testSuite.addTest(SignalAlignAlignmentTest('test_zymo_reads'))
testSuite.addTest(SignalAlignAlignmentTest('test_pUC_r9_reads_5mer'))
testSuite.addTest(SignalAlignAlignmentTest('test_pUC_r9_reads_6mer'))
testSuite.addTest(SignalAlignAlignmentTest('test_Ecoli1D_reads_5mer'))
testSuite.addTest(signalAlign_EM_test('test_EM'))
testRunner = unittest.TextTestRunner(verbosity=2)
testRunner.run(testSuite)
if __name__ == '__main__':
main()
|
{
"content_hash": "91f9c0f55d99c6755bc1bb193ab43001",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 139,
"avg_line_length": 46.0919540229885,
"alnum_prop": 0.5463840399002494,
"repo_name": "mitenjain/signalAlign",
"id": "7519d350768c60977ec4dc53a41c0b8a5c0be13d",
"size": "8043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/testSignalAlign.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2766301"
},
{
"name": "C++",
"bytes": "39236"
},
{
"name": "Dockerfile",
"bytes": "1246"
},
{
"name": "HTML",
"bytes": "48463"
},
{
"name": "Jupyter Notebook",
"bytes": "88628"
},
{
"name": "Makefile",
"bytes": "11826"
},
{
"name": "Python",
"bytes": "455126"
},
{
"name": "Shell",
"bytes": "622"
}
],
"symlink_target": ""
}
|
import sys
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from Bus import CoherentBus
from InstTracer import InstTracer
from ExeTracer import ExeTracer
from MemObject import MemObject
default_tracer = ExeTracer()
if buildEnv['TARGET_ISA'] == 'alpha':
from AlphaTLB import AlphaDTB, AlphaITB
from AlphaInterrupts import AlphaInterrupts
elif buildEnv['TARGET_ISA'] == 'sparc':
from SparcTLB import SparcTLB
from SparcInterrupts import SparcInterrupts
elif buildEnv['TARGET_ISA'] == 'x86':
from X86TLB import X86TLB
from X86LocalApic import X86LocalApic
elif buildEnv['TARGET_ISA'] == 'mips':
from MipsTLB import MipsTLB
from MipsInterrupts import MipsInterrupts
elif buildEnv['TARGET_ISA'] == 'arm':
from ArmTLB import ArmTLB
from ArmInterrupts import ArmInterrupts
elif buildEnv['TARGET_ISA'] == 'power':
from PowerTLB import PowerTLB
from PowerInterrupts import PowerInterrupts
class BaseCPU(MemObject):
type = 'BaseCPU'
abstract = True
system = Param.System(Parent.any, "system object")
cpu_id = Param.Int(-1, "CPU identifier")
numThreads = Param.Unsigned(1, "number of HW thread contexts")
function_trace = Param.Bool(False, "Enable function trace")
function_trace_start = Param.Tick(0, "Tick to start function trace")
checker = Param.BaseCPU(NULL, "checker CPU")
do_checkpoint_insts = Param.Bool(True,
"enable checkpoint pseudo instructions")
do_statistics_insts = Param.Bool(True,
"enable statistics pseudo instructions")
profile = Param.Latency('0ns', "trace the kernel stack")
do_quiesce = Param.Bool(True, "enable quiesce instructions")
workload = VectorParam.Process([], "processes to run")
if buildEnv['TARGET_ISA'] == 'sparc':
dtb = Param.SparcTLB(SparcTLB(), "Data TLB")
itb = Param.SparcTLB(SparcTLB(), "Instruction TLB")
interrupts = Param.SparcInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'alpha':
dtb = Param.AlphaTLB(AlphaDTB(), "Data TLB")
itb = Param.AlphaTLB(AlphaITB(), "Instruction TLB")
interrupts = Param.AlphaInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'x86':
dtb = Param.X86TLB(X86TLB(), "Data TLB")
itb = Param.X86TLB(X86TLB(), "Instruction TLB")
interrupts = Param.X86LocalApic(NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'mips':
dtb = Param.MipsTLB(MipsTLB(), "Data TLB")
itb = Param.MipsTLB(MipsTLB(), "Instruction TLB")
interrupts = Param.MipsInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'arm':
dtb = Param.ArmTLB(ArmTLB(), "Data TLB")
itb = Param.ArmTLB(ArmTLB(), "Instruction TLB")
interrupts = Param.ArmInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.PowerTLB(PowerTLB(), "Data TLB")
itb = Param.PowerTLB(PowerTLB(), "Instruction TLB")
interrupts = Param.PowerInterrupts(
NULL, "Interrupt Controller")
else:
print "Don't know what TLB to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
max_insts_all_threads = Param.Counter(0,
"terminate when all threads have reached this inst count")
max_insts_any_thread = Param.Counter(0,
"terminate when any thread reaches this inst count")
max_loads_all_threads = Param.Counter(0,
"terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count")
progress_interval = Param.Frequency('0Hz',
"frequency to print out the progress message")
defer_registration = Param.Bool(False,
"defer registration with system (for sampling)")
tracer = Param.InstTracer(default_tracer, "Instruction tracer")
icache_port = MasterPort("Instruction Port")
dcache_port = MasterPort("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
_cached_ports += ["itb.walker.port", "dtb.walker.port"]
_uncached_slave_ports = []
_uncached_master_ports = []
if buildEnv['TARGET_ISA'] == 'x86':
_uncached_slave_ports += ["interrupts.pio", "interrupts.int_slave"]
_uncached_master_ports += ["interrupts.int_master"]
def createInterruptController(self):
if buildEnv['TARGET_ISA'] == 'sparc':
self.interrupts = SparcInterrupts()
elif buildEnv['TARGET_ISA'] == 'alpha':
self.interrupts = AlphaInterrupts()
elif buildEnv['TARGET_ISA'] == 'x86':
_localApic = X86LocalApic(pio_addr=0x2000000000000000)
self.interrupts = _localApic
elif buildEnv['TARGET_ISA'] == 'mips':
self.interrupts = MipsInterrupts()
elif buildEnv['TARGET_ISA'] == 'arm':
self.interrupts = ArmInterrupts()
elif buildEnv['TARGET_ISA'] == 'power':
self.interrupts = PowerInterrupts()
else:
print "Don't know what Interrupt Controller to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
def connectCachedPorts(self, bus):
for p in self._cached_ports:
exec('self.%s = bus.slave' % p)
def connectUncachedPorts(self, bus):
for p in self._uncached_slave_ports:
exec('self.%s = bus.master' % p)
for p in self._uncached_master_ports:
exec('self.%s = bus.slave' % p)
def connectAllPorts(self, cached_bus, uncached_bus = None):
self.connectCachedPorts(cached_bus)
if not uncached_bus:
uncached_bus = cached_bus
self.connectUncachedPorts(uncached_bus)
def addPrivateSplitL1Caches(self, ic, dc, iwc = None, dwc = None):
self.icache = ic
self.dcache = dc
self.icache_port = ic.cpu_side
self.dcache_port = dc.cpu_side
self._cached_ports = ['icache.mem_side', 'dcache.mem_side']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
if iwc and dwc:
self.itb_walker_cache = iwc
self.dtb_walker_cache = dwc
self.itb.walker.port = iwc.cpu_side
self.dtb.walker.port = dwc.cpu_side
self._cached_ports += ["itb_walker_cache.mem_side", \
"dtb_walker_cache.mem_side"]
else:
self._cached_ports += ["itb.walker.port", "dtb.walker.port"]
# Checker doesn't need its own tlb caches because it does
# functional accesses only
if self.checker != NULL:
self._cached_ports += ["checker.itb.walker.port", \
"checker.dtb.walker.port"]
def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc = None, dwc = None):
self.addPrivateSplitL1Caches(ic, dc, iwc, dwc)
self.toL2Bus = CoherentBus()
self.connectCachedPorts(self.toL2Bus)
self.l2cache = l2c
self.toL2Bus.master = self.l2cache.cpu_side
self._cached_ports = ['l2cache.mem_side']
def addCheckerCpu(self):
pass
|
{
"content_hash": "218cdc4f1f7123618cb1a76b90e5d317",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 78,
"avg_line_length": 39.663101604278076,
"alnum_prop": 0.6224888769044088,
"repo_name": "xiaoyaozi5566/DynamicCache",
"id": "c27fd1c27db4812728a65715f536c4a52e3212db",
"size": "9719",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/cpu/BaseCPU.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "232026"
},
{
"name": "C",
"bytes": "853406"
},
{
"name": "C++",
"bytes": "10403775"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "269390"
},
{
"name": "Hack",
"bytes": "126"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Makefile",
"bytes": "17355"
},
{
"name": "Perl",
"bytes": "47105700"
},
{
"name": "Perl6",
"bytes": "3549787"
},
{
"name": "Prolog",
"bytes": "44271145"
},
{
"name": "Python",
"bytes": "3283067"
},
{
"name": "R",
"bytes": "2646"
},
{
"name": "Ruby",
"bytes": "217413"
},
{
"name": "Scilab",
"bytes": "56104"
},
{
"name": "Shell",
"bytes": "96876"
},
{
"name": "TeX",
"bytes": "19361"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "16048"
}
],
"symlink_target": ""
}
|
"""Utilities that are distro specific for use on SUSE 12."""
import os
import subprocess
from google_compute_engine import constants
from google_compute_engine.distro_lib import helpers
from google_compute_engine.distro_lib import ip_forwarding_utils
from google_compute_engine.distro_lib import utils
class Utils(utils.Utils):
"""Utilities used by Linux guest services on SUSE 12."""
network_path = constants.LOCALBASE + '/etc/sysconfig/network'
def EnableIpv6(self, interfaces, logger, dhclient_script=None):
"""Configure the network interfaces for IPv6 using dhclient.
Args:
interface: string, the output device names for enabling IPv6.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
"""
pass
def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None):
"""Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
"""
interfaces_to_up = [i for i in interfaces if i != 'eth0']
if interfaces_to_up:
logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)
self._WriteIfcfg(interfaces_to_up, logger)
self._Ifup(interfaces_to_up, logger)
def _WriteIfcfg(self, interfaces, logger):
"""Write ifcfg files for multi-NIC support.
Overwrites the files. This allows us to update ifcfg-* in the future.
Disable the network setup to override this behavior and customize the
configurations.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
"""
for interface in interfaces:
interface_config = os.path.join(
self.network_path, 'ifcfg-%s' % interface)
interface_content = [
'# Added by Google.',
'STARTMODE=hotplug',
'BOOTPROTO=dhcp',
'DHCLIENT_SET_DEFAULT_ROUTE=yes',
'DHCLIENT_ROUTE_PRIORITY=10%s00' % interface,
'',
]
with open(interface_config, 'w') as interface_file:
interface_file.write('\n'.join(interface_content))
logger.info('Created ifcfg file for interface %s.', interface)
def _Ifup(self, interfaces, logger):
"""Activate network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
"""
ifup = ['/usr/sbin/wicked', 'ifup', '--timeout', '1']
try:
subprocess.check_call(ifup + interfaces)
except subprocess.CalledProcessError:
logger.warning('Could not activate interfaces %s.', interfaces)
def HandleClockSync(self, logger):
"""Sync the software clock with the hypervisor clock.
Args:
logger: logger object, used to write to SysLog and serial port.
"""
helpers.CallHwclock(logger)
def IpForwardingUtils(self, logger, proto_id=None):
"""Get system IP address configuration utilities.
Args:
logger: logger object, used to write to SysLog and serial port.
proto_id: string, the routing protocol identifier for Google IP changes.
"""
return ip_forwarding_utils.IpForwardingUtilsIproute(logger, proto_id)
def RestartNetworking(self, logger):
"""Restart the networking service to force a DHCP refresh.
Args:
logger: logger object, used to write to SysLog and serial port.
"""
helpers.SystemctlRestart('wickedd-nanny', logger)
|
{
"content_hash": "0279ba900fef6a70390ffa4909d9e125",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 35.90291262135922,
"alnum_prop": 0.6898323418063819,
"repo_name": "rjschwei/compute-image-packages",
"id": "a8bd48648a40587dc12d7175fa9aaa5226374399",
"size": "4309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "393882"
},
{
"name": "Shell",
"bytes": "68210"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.pca import H2OPrincipalComponentAnalysisEstimator as H2OPCA
def pca_scoring():
print("Importing arrests.csv data...")
arrestsH2O = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
print("Run PCA with transform = 'DEMEAN'")
fitH2O = H2OPCA(k = 4, transform = "DEMEAN")
fitH2O.train(x=list(range(4)), training_frame=arrestsH2O)
# TODO: fitH2O.show()
print("Project training data into eigenvector subspace")
predH2O = fitH2O.predict(arrestsH2O)
print("H2O Projection:")
predH2O.head()
if __name__ == "__main__":
pyunit_utils.standalone_test(pca_scoring)
else:
pca_scoring()
|
{
"content_hash": "7ec360cc91b8eec4b92187af0d11d3bd",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 87,
"avg_line_length": 23.529411764705884,
"alnum_prop": 0.7175,
"repo_name": "michalkurka/h2o-3",
"id": "2c94395f6681c3949bd25cdf409e7c2bba0c1a8f",
"size": "800",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/pca/pyunit_scoringPCA.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "231770"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "Dockerfile",
"bytes": "10302"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "166480"
},
{
"name": "HCL",
"bytes": "15007"
},
{
"name": "HTML",
"bytes": "251906"
},
{
"name": "HiveQL",
"bytes": "3965"
},
{
"name": "Java",
"bytes": "11932863"
},
{
"name": "JavaScript",
"bytes": "89484"
},
{
"name": "Jupyter Notebook",
"bytes": "13867219"
},
{
"name": "Makefile",
"bytes": "50635"
},
{
"name": "Python",
"bytes": "6801044"
},
{
"name": "R",
"bytes": "3223113"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "33647"
},
{
"name": "Shell",
"bytes": "186559"
},
{
"name": "TeX",
"bytes": "634412"
}
],
"symlink_target": ""
}
|
"""
Panflute: pandoc filters made simple
====================================
Panflute is a Python package that makes `Pandoc <http://pandoc.org/>`_
filters fun to write. (`Installation <install.html>`_)
"""
from .utils import debug
from .containers import ListContainer, DictContainer
from .base import Element, Block, Inline, MetaValue
# These elements are not part of pandoc-types
from .elements import (
Doc, Citation, ListItem,
DefinitionItem, Definition, LineItem)
from .elements import (
Null, HorizontalRule, Space, SoftBreak, LineBreak, Str,
Code, BlockQuote, Note, Div, Plain, Para, Emph, Strong, Underline,
Strikeout, Superscript, Subscript, SmallCaps, Span, RawBlock, RawInline,
Math, CodeBlock, Link, Image, BulletList, OrderedList, DefinitionList,
LineBlock, Header, Quoted, Cite)
from .table_elements import (
Table, TableHead, TableFoot, TableBody,
TableRow, TableCell, Caption)
from .elements import (
MetaList, MetaMap, MetaString, MetaBool, MetaInlines, MetaBlocks)
from .io import load, dump, run_filter, run_filters
from .io import toJSONFilter, toJSONFilters # Wrappers
from .tools import (
stringify, yaml_filter, shell, run_pandoc, convert_text, get_option)
from .autofilter import main, panfl, get_filter_dirs, stdio
from .version import __version__
|
{
"content_hash": "0041b6ac3fc06905fb19d27f826f406d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.7214714714714715,
"repo_name": "sergiocorreia/panflute",
"id": "f3de3a6dbfb295c5c4fdffcd8f86ede8e65eb456",
"size": "1332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "panflute/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "158636"
},
{
"name": "TeX",
"bytes": "211"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from ray.rllib.policy.policy import Policy
def _sample(probs):
return [np.random.choice(len(pr), p=pr) for pr in probs]
class KerasPolicy(Policy):
"""Initialize the Keras Policy.
This is a Policy used for models with actor and critics.
Note: This class is built for specific usage of Actor-Critic models,
and is less general compared to TFPolicy and TorchPolicies.
Args:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
actor (Model): A model that holds the policy.
critic (Model): A model that holds the value function.
"""
def __init__(self,
observation_space,
action_space,
config,
actor=None,
critic=None):
Policy.__init__(self, observation_space, action_space, config)
self.actor = actor
self.critic = critic
self.models = [self.actor, self.critic]
def compute_actions(self, obs, *args, **kwargs):
state = np.array(obs)
policy = self.actor.predict(state)
value = self.critic.predict(state)
return _sample(policy), [], {"vf_preds": value.flatten()}
def learn_on_batch(self, batch, *args):
self.actor.fit(
batch["obs"],
batch["adv_targets"],
epochs=1,
verbose=0,
steps_per_epoch=20)
self.critic.fit(
batch["obs"],
batch["value_targets"],
epochs=1,
verbose=0,
steps_per_epoch=20)
return {}
def get_weights(self):
return [model.get_weights() for model in self.models]
def set_weights(self, weights):
return [model.set_weights(w) for model, w in zip(self.models, weights)]
|
{
"content_hash": "027397923b40494d8e0aa86a790671b2",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 31.246153846153845,
"alnum_prop": 0.5947808961102905,
"repo_name": "atumanov/ray",
"id": "3008e133c1c6dfff8f427a18a4273d817efbf020",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/rllib/keras_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20715"
},
{
"name": "C++",
"bytes": "1036803"
},
{
"name": "CSS",
"bytes": "9262"
},
{
"name": "Dockerfile",
"bytes": "3411"
},
{
"name": "HTML",
"bytes": "32704"
},
{
"name": "Java",
"bytes": "517715"
},
{
"name": "JavaScript",
"bytes": "8178"
},
{
"name": "Jupyter Notebook",
"bytes": "1610"
},
{
"name": "Python",
"bytes": "3081422"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "76928"
},
{
"name": "Smarty",
"bytes": "955"
}
],
"symlink_target": ""
}
|
"""
Description: Bounding box utilities for localization and detection
Author = "Olalekan Ogunmolu"
Adapted from http://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
Date: May 04, 2017
"""
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
|
{
"content_hash": "daa1a9f39d2fd8326ede98a451926f76",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 102,
"avg_line_length": 31.90625,
"alnum_prop": 0.693437806072478,
"repo_name": "lakehanne/ensenso",
"id": "de3f7b6539cdc4ea2145959980a5f5f376bf029b",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ensenso_detect/src/utils/box_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "232"
},
{
"name": "C++",
"bytes": "113554"
},
{
"name": "CMake",
"bytes": "8920"
},
{
"name": "Jupyter Notebook",
"bytes": "47422"
},
{
"name": "Lua",
"bytes": "28639"
},
{
"name": "Python",
"bytes": "163264"
},
{
"name": "Shell",
"bytes": "1352"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipe_book', '0014_auto_20161110_0115'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(max_length=80, unique=True),
),
]
|
{
"content_hash": "d42db99b5b96fafebef1593ccc0c4fc2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.5923076923076923,
"repo_name": "jneuendorf/what-should-i-eat",
"id": "5e64abc05104e9f2facef2e1828050675d2c6474",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipe_book/migrations/0015_auto_20161124_2214.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10386"
},
{
"name": "CoffeeScript",
"bytes": "6067"
},
{
"name": "HTML",
"bytes": "78432"
},
{
"name": "JavaScript",
"bytes": "89533"
},
{
"name": "Python",
"bytes": "51656"
},
{
"name": "Shell",
"bytes": "204"
}
],
"symlink_target": ""
}
|
'''
------------------------------------------------------------------------------
Copyright 2016 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
==================================================
FindLocalPeaks.py
--------------------------------------------------
requirements: ArcGIS X.X, Python 2.7 or Python 3.4
author: ArcGIS Solutions
contact: support@esri.com
company: Esri
==================================================
description:
Uses an inverted sinks method to find several local peaks on a surface
==================================================
history:
11/29/2016 - mf - Convert from model to script tool
==================================================
'''
# IMPORTS ==========================================
import os
import sys
import traceback
import arcpy
from arcpy import env
import VisibilityUtilities
inputAreaFeature = arcpy.GetParameterAsText(0) # Input Area
inputNumberOfPeaks = arcpy.GetParameterAsText(1) # Number of Highest Points
inputSurfaceRaster = arcpy.GetParameterAsText(2) # Input Surface
outputPeakFeatures = arcpy.GetParameterAsText(3) # Output Peak Points
# LOCALS ===========================================
deleteme = [] # intermediate datasets to be deleted
debug = True # extra messaging during development
# FUNCTIONS ========================================
def main():
try:
# get/set environment
env.overwriteOutput = True
VisibilityUtilities.findLocalPeaks(inputAreaFeature,
inputNumberOfPeaks,
inputSurfaceRaster,
outputPeakFeatures)
# Set output
arcpy.SetParameter(3, outputPeakFeatures)
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
finally:
if len(deleteme) > 0:
# cleanup intermediate datasets
if debug == True: arcpy.AddMessage("Removing intermediate datasets...")
for i in deleteme:
if debug == True: arcpy.AddMessage("Removing: " + str(i))
arcpy.Delete_management(i)
if debug == True: arcpy.AddMessage("Done")
# MAIN =============================================
if __name__ == "__main__":
main()
|
{
"content_hash": "f2bd813bbd240870afe53392e554bc23",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 105,
"avg_line_length": 35.386138613861384,
"alnum_prop": 0.5506435366536094,
"repo_name": "Esri/military-tools-geoprocessing-toolbox",
"id": "5b086171a86361977f4f2f66384f68ccec335dbc",
"size": "3590",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tools/militarytools/esri/toolboxes/scripts/FindLocalPeaks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5466"
},
{
"name": "Python",
"bytes": "708673"
}
],
"symlink_target": ""
}
|
import logging
import ailment
from .atoms import Register, Tmp, MemoryLocation
from .constants import OP_BEFORE, OP_AFTER
from .dataset import DataSet
from .external_codeloc import ExternalCodeLocation
from .undefined import Undefined
from ...engines.light import SimEngineLightAIL, RegisterOffset, SpOffset
from ...errors import SimEngineError
l = logging.getLogger('angr.analyses.reaching_definitions.engine_ail')
class SimEngineRDAIL(SimEngineLightAIL): # pylint:disable=abstract-method
def __init__(self, current_local_call_depth, maximum_local_call_depth, function_handler=None):
super(SimEngineRDAIL, self).__init__()
self._current_local_call_depth = current_local_call_depth
self._maximum_local_call_depth = maximum_local_call_depth
self._function_handler = function_handler
def process(self, state, *args, **kwargs):
# we are using a completely different state. Therefore, we directly call our _process() method before
# SimEngine becomes flexible enough.
try:
self._process(state, None, block=kwargs.pop('block', None))
except SimEngineError as e:
if kwargs.pop('fail_fast', False) is True:
raise e
return self.state
#
# Private methods
#
@staticmethod
def _external_codeloc():
return ExternalCodeLocation()
#
# AIL statement handlers
#
def _ail_handle_Stmt(self, stmt):
if self.state.analysis:
self.state.analysis.observe(self.ins_addr, stmt, self.block, self.state, OP_BEFORE)
super(SimEngineRDAIL, self)._ail_handle_Stmt(stmt)
if self.state.analysis:
self.state.analysis.observe(self.ins_addr, stmt, self.block, self.state, OP_AFTER)
def _ail_handle_Assignment(self, stmt):
"""
:param ailment.Assignment stmt:
:return:
"""
src = self._expr(stmt.src)
dst = stmt.dst
if src is None:
src = DataSet(Undefined(), dst.bits)
if type(dst) is ailment.Tmp:
self.state.kill_and_add_definition(Tmp(dst.tmp_idx), self._codeloc(), src)
self.tmps[dst.tmp_idx] = src
elif type(dst) is ailment.Register:
reg = Register(dst.reg_offset, dst.bits / 8)
self.state.kill_and_add_definition(reg, self._codeloc(), src)
else:
l.warning('Unsupported type of Assignment dst %s.', type(dst).__name__)
def _ail_handle_Store(self, stmt):
data = self._expr(stmt.data) # pylint:disable=unused-variable
addr = self._expr(stmt.addr) # pylint:disable=unused-variable
def _ail_handle_Jump(self, stmt):
target = self._expr(stmt.target) # pylint:disable=unused-variable
def _ail_handle_ConditionalJump(self, stmt):
cond = self._expr(stmt.condition) # pylint:disable=unused-variable
true_target = self._expr(stmt.true_target) # pylint:disable=unused-variable
false_target = self._expr(stmt.false_target) # pylint:disable=unused-variable
ip = Register(self.arch.ip_offset, self.arch.bits / 8)
self.state.kill_definitions(ip, self._codeloc(), )
# kill all cc_ops
# TODO: make it architecture agnostic
self.state.kill_definitions(Register(*self.arch.registers['cc_op']), self._codeloc())
self.state.kill_definitions(Register(*self.arch.registers['cc_dep1']), self._codeloc())
self.state.kill_definitions(Register(*self.arch.registers['cc_dep2']), self._codeloc())
self.state.kill_definitions(Register(*self.arch.registers['cc_ndep']), self._codeloc())
def _ail_handle_Call(self, stmt):
target = self._expr(stmt.target) # pylint:disable=unused-variable
ip = Register(self.arch.ip_offset, self.arch.bits / 8)
self.state.kill_definitions(ip, self._codeloc())
# if arguments exist, use them
if stmt.args:
for arg in stmt.args:
self._expr(arg)
# kill all caller-saved registers
if stmt.calling_convention is not None and stmt.calling_convention.CALLER_SAVED_REGS:
for reg_name in stmt.calling_convention.CALLER_SAVED_REGS:
offset, size = self.arch.registers[reg_name]
reg = Register(offset, size)
self.state.kill_definitions(reg, self._codeloc())
# kill all cc_ops
# TODO: make it architecture agnostic
self.state.kill_definitions(Register(*self.arch.registers['cc_op']), self._codeloc())
self.state.kill_definitions(Register(*self.arch.registers['cc_dep1']), self._codeloc())
self.state.kill_definitions(Register(*self.arch.registers['cc_dep2']), self._codeloc())
self.state.kill_definitions(Register(*self.arch.registers['cc_ndep']), self._codeloc())
#
# AIL expression handlers
#
def _ail_handle_Tmp(self, expr):
if self.state._track_tmps:
self.state.add_use(Tmp(expr.tmp_idx), self._codeloc())
return super(SimEngineRDAIL, self)._ail_handle_Tmp(expr)
def _ail_handle_Register(self, expr):
reg_offset = expr.reg_offset
bits = expr.bits
self.state.add_use(Register(reg_offset, bits / 8), self._codeloc())
if reg_offset == self.arch.sp_offset:
return SpOffset(bits, 0)
elif reg_offset == self.arch.bp_offset:
return SpOffset(bits, 0, is_base=True)
try:
data = DataSet(set(), bits)
defs = self.state.register_definitions.get_objects_by_offset(reg_offset)
if not defs:
# define it right away as an external dependency
self.state.kill_and_add_definition(Register(reg_offset, bits / 8), self._external_codeloc(),
data=expr
)
defs = self.state.register_definitions.get_objects_by_offset(reg_offset)
assert defs
for def_ in defs:
if def_.data is not None:
data.update(def_.data)
else:
l.warning('Data in register <%s> is undefined at %#x.',
self.arch.register_names[reg_offset], self.ins_addr
)
return data
except KeyError:
return RegisterOffset(bits, reg_offset, 0)
def _ail_handle_Load(self, expr):
addr = self._expr(expr.addr)
size = expr.size
# TODO: Load from memory
return MemoryLocation(addr, size)
def _ail_handle_Convert(self, expr):
return ailment.Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed,
self._expr(expr.operand))
def _ail_handle_CmpEQ(self, expr):
op0 = self._expr(expr.operands[0])
op1 = self._expr(expr.operands[1])
return ailment.Expr.BinaryOp(expr.idx, expr.op, [op0, op1], **expr.tags)
def _ail_handle_CmpLE(self, expr):
op0 = self._expr(expr.operands[0])
op1 = self._expr(expr.operands[1])
return ailment.Expr.BinaryOp(expr.idx, expr.op, [op0, op1], **expr.tags)
def _ail_handle_Xor(self, expr):
op0 = self._expr(expr.operands[0])
op1 = self._expr(expr.operands[1])
return ailment.Expr.BinaryOp(expr.idx, expr.op, [op0, op1], **expr.tags)
def _ail_handle_Const(self, expr):
return expr
#
# User defined high level statement handlers
#
def _handle_function(self):
if self._current_local_call_depth > self._maximum_local_call_depth:
l.warning('The analysis reached its maximum recursion depth.')
return None
defs_ip = self.state.register_definitions.get_objects_by_offset(self.arch.ip_offset)
if len(defs_ip) != 1:
l.error('Invalid definition(s) for IP.')
return None
ip_data = next(iter(defs_ip)).data
if len(ip_data) != 1:
l.error('Invalid number of values for IP.')
return None
ip_addr = ip_data.get_first_element()
if not isinstance(ip_addr, (int, long)):
l.error('Invalid type %s for IP.', type(ip_addr).__name__)
return None
is_internal = False
ext_func_name = None
if self.state.loader.main_object.contains_addr(ip_addr) is True:
ext_func_name = self.state.loader.find_plt_stub_name(ip_addr)
if ext_func_name is None:
is_internal = True
else:
symbol = self.state.loader.find_symbol(ip_addr)
if symbol is not None:
ext_func_name = symbol.name
if ext_func_name is not None:
handler_name = 'handle_%s' % ext_func_name
if hasattr(self._function_handler, handler_name):
getattr(self._function_handler, handler_name)(self.state, self._codeloc())
else:
l.warning('Please implement the external function handler for %s() with your own logic.',
ext_func_name)
elif is_internal is True:
handler_name = 'handle_local_function'
if hasattr(self._function_handler, handler_name):
is_updated, state = getattr(self._function_handler, handler_name)(self.state, ip_addr,
self._current_local_call_depth + 1,
self._maximum_local_call_depth)
if is_updated is True:
self.state = state
else:
l.warning('Please implement the local function handler with your own logic.')
else:
l.warning('Could not find function name for external function at address %#x.', ip_addr)
return None
|
{
"content_hash": "f3e16b3b3c76ee4ed381f910923c0cd5",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 117,
"avg_line_length": 38.544401544401545,
"alnum_prop": 0.590904537714114,
"repo_name": "tyb0807/angr",
"id": "6cba3a894f42a4267af99e724972ed29de01500c",
"size": "9983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/analyses/reaching_definitions/engine_ail.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39875"
},
{
"name": "Makefile",
"bytes": "610"
},
{
"name": "Python",
"bytes": "3884780"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Bot', '0002_auto_20170421_1449'),
]
operations = [
migrations.AddField(
model_name='day',
name='week_day_id',
field=models.IntegerField(default=-1, verbose_name='week_day_id'),
),
]
|
{
"content_hash": "e35775e5e33f02de215590150415c758",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.5895522388059702,
"repo_name": "leoniknik/PartyBot",
"id": "53e8accab648bc40fb88fc5b72af4a0f4a9d2122",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bot/migrations/0003_day_week_day_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "361"
},
{
"name": "HTML",
"bytes": "14417"
},
{
"name": "JavaScript",
"bytes": "42"
},
{
"name": "Python",
"bytes": "41484"
}
],
"symlink_target": ""
}
|
import datetime
import pickle
import re
import pandas as pd
import pytest
import ibis
import ibis.common.exceptions as com
import ibis.config as config
import ibis.expr.analysis as L
import ibis.expr.api as api
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.common.exceptions import ExpressionError, RelationError
from ibis.expr.types import ColumnExpr, TableExpr
from ibis.tests.util import assert_equal, assert_pickle_roundtrip
@pytest.fixture
def set_ops_schema_top():
return [('key', 'string'), ('value', 'double')]
@pytest.fixture
def set_ops_schema_bottom():
return [('key', 'string'), ('key2', 'string'), ('value', 'double')]
@pytest.fixture
def setops_table_foo(set_ops_schema_top):
return ibis.table(set_ops_schema_top, 'foo')
@pytest.fixture
def setops_table_bar(set_ops_schema_top):
return ibis.table(set_ops_schema_top, 'bar')
@pytest.fixture
def setops_table_baz(set_ops_schema_bottom):
return ibis.table(set_ops_schema_bottom, 'baz')
@pytest.fixture
def setops_relation_error_message():
return 'Table schemas must be equal for set operations'
def test_empty_schema():
table = api.table([], 'foo')
assert not table.schema()
def test_columns(con):
t = con.table('alltypes')
result = t.columns
expected = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
assert result == expected
def test_view_new_relation(table):
# For assisting with self-joins and other self-referential operations
# where we need to be able to treat instances of the same TableExpr as
# semantically distinct
#
# This thing is not exactly a projection, since it has no semantic
# meaning when it comes to execution
tview = table.view()
roots = tview.op().root_tables()
assert len(roots) == 1
assert roots[0] is tview.op()
def test_get_type(table, schema_dict):
for k, v in schema_dict.items():
assert table._get_type(k) == dt.dtype(v)
def test_getitem_column_select(table, schema_dict):
for k, v in schema_dict.items():
col = table[k]
# Make sure it's the right type
assert isinstance(col, ColumnExpr)
# Ensure we have a field selection with back-reference to the table
parent = col.parent()
assert isinstance(parent, ops.TableColumn)
assert parent.parent() is table
def test_getitem_attribute(table):
result = table.a
assert_equal(result, table['a'])
assert 'a' in dir(table)
# Project and add a name that conflicts with a TableExpr built-in
# attribute
view = table[[table, table['a'].name('schema')]]
assert not isinstance(view.schema, ColumnExpr)
def test_projection(table):
cols = ['f', 'a', 'h']
proj = table[cols]
assert isinstance(proj, TableExpr)
assert isinstance(proj.op(), ops.Selection)
assert proj.schema().names == cols
for c in cols:
expr = proj[c]
assert isinstance(expr, type(table[c]))
def test_projection_no_list(table):
expr = (table.f * 2).name('bar')
result = table.select(expr)
expected = table.projection([expr])
assert_equal(result, expected)
def test_projection_with_exprs(table):
# unnamed expr to test
mean_diff = (table['a'] - table['c']).mean()
col_exprs = [table['b'].log().name('log_b'), mean_diff.name('mean_diff')]
proj = table[col_exprs + ['g']]
schema = proj.schema()
assert schema.names == ['log_b', 'mean_diff', 'g']
assert schema.types == [dt.double, dt.double, dt.string]
# Test with unnamed expr
with pytest.raises(ExpressionError):
table.projection(['g', table['a'] - table['c']])
def test_projection_duplicate_names(table):
with pytest.raises(com.IntegrityError):
table.projection([table.c, table.c])
def test_projection_invalid_root(table):
schema1 = {'foo': 'double', 'bar': 'int32'}
left = api.table(schema1, name='foo')
right = api.table(schema1, name='bar')
exprs = [right['foo'], right['bar']]
with pytest.raises(RelationError):
left.projection(exprs)
def test_projection_unnamed_literal_interactive_blowup(con):
# #147 and #153 alike
table = con.table('functional_alltypes')
exprs = [table.bigint_col, ibis.literal(5)]
with config.option_context('interactive', True):
try:
table.select(exprs)
except Exception as e:
assert 'named' in e.args[0]
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_projection_of_aggregated(table):
# Fully-formed aggregations "block"; in a projection, column
# expressions referencing table expressions below the aggregation are
# invalid.
assert False
def test_projection_with_star_expr(table):
new_expr = (table['a'] * 5).name('bigger_a')
t = table
# it lives!
proj = t[t, new_expr]
repr(proj)
ex_names = table.schema().names + ['bigger_a']
assert proj.schema().names == ex_names
# cannot pass an invalid table expression
t2 = t.aggregate([t['a'].sum().name('sum(a)')], by=['g'])
with pytest.raises(RelationError):
t[[t2]]
# TODO: there may be some ways this can be invalid
def test_projection_convenient_syntax(table):
proj = table[table, table['a'].name('foo')]
proj2 = table[[table, table['a'].name('foo')]]
assert_equal(proj, proj2)
def test_projection_mutate_analysis_bug(con):
# GH #549
t = con.table('airlines')
filtered = t[t.depdelay.notnull()]
leg = ibis.literal('-').join([t.origin, t.dest])
mutated = filtered.mutate(leg=leg)
# it works!
mutated['year', 'month', 'day', 'depdelay', 'leg']
def test_projection_self(table):
result = table[table]
expected = table.projection(table)
assert_equal(result, expected)
def test_projection_array_expr(table):
result = table[table.a]
expected = table[[table.a]]
assert_equal(result, expected)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_add_column_aggregate_crossjoin(table):
# A new column that depends on a scalar value produced by this or some
# other table.
#
# For example:
# SELECT *, b - VAL
# FROM table1
#
# Here, VAL could be something produced by aggregating table1 or any
# other table for that matter.
assert False
def test_mutate(table):
one = table.f * 2
foo = (table.a + table.b).name('foo')
expr = table.mutate(foo, one=one, two=2)
expected = table[table, foo, one.name('one'), ibis.literal(2).name('two')]
assert_equal(expr, expected)
def test_mutate_alter_existing_columns(table):
new_f = table.f * 2
foo = table.d * 2
expr = table.mutate(f=new_f, foo=foo)
expected = table[
'a',
'b',
'c',
'd',
'e',
new_f.name('f'),
'g',
'h',
'i',
'j',
'k',
foo.name('foo'),
]
assert_equal(expr, expected)
def test_replace_column(table):
tb = api.table([('a', 'int32'), ('b', 'double'), ('c', 'string')])
expr = tb.b.cast('int32')
tb2 = tb.set_column('b', expr)
expected = tb[tb.a, expr.name('b'), tb.c]
assert_equal(tb2, expected)
def test_filter_no_list(table):
pred = table.a > 5
result = table.filter(pred)
expected = table[pred]
assert_equal(result, expected)
def test_add_predicate(table):
pred = table['a'] > 5
result = table[pred]
assert isinstance(result.op(), ops.Selection)
def test_invalid_predicate(table, schema):
# a lookalike
table2 = api.table(schema, name='bar')
predicate = table2.a > 5
with pytest.raises(RelationError):
table.filter(predicate)
def test_add_predicate_coalesce(table):
# Successive predicates get combined into one rather than nesting. This
# is mainly to enhance readability since we could handle this during
# expression evaluation anyway.
pred1 = table['a'] > 5
pred2 = table['b'] > 0
result = table[pred1][pred2]
expected = table.filter([pred1, pred2])
assert_equal(result, expected)
# 59, if we are not careful, we can obtain broken refs
interm = table[pred1]
result = interm.filter([interm['b'] > 0])
assert_equal(result, expected)
def test_repr_same_but_distinct_objects(con):
t = con.table('test1')
t_copy = con.table('test1')
table2 = t[t_copy['f'] > 0]
result = repr(table2)
assert result.count('DatabaseTable') == 1
def test_filter_fusion_distinct_table_objects(con):
t = con.table('test1')
tt = con.table('test1')
expr = t[t.f > 0][t.c > 0]
expr2 = t[t.f > 0][tt.c > 0]
expr3 = t[tt.f > 0][tt.c > 0]
expr4 = t[tt.f > 0][t.c > 0]
assert_equal(expr, expr2)
assert repr(expr) == repr(expr2)
assert_equal(expr, expr3)
assert_equal(expr, expr4)
def test_column_relabel(table):
# GH #551. Keeping the test case very high level to not presume that
# the relabel is necessarily implemented using a projection
types = ['int32', 'string', 'double']
table = api.table(zip(['foo', 'bar', 'baz'], types))
result = table.relabel({'foo': 'one', 'baz': 'three'})
schema = result.schema()
ex_schema = api.schema(zip(['one', 'bar', 'three'], types))
assert_equal(schema, ex_schema)
def test_limit(table):
limited = table.limit(10, offset=5)
assert limited.op().n == 10
assert limited.op().offset == 5
def test_sort_by(table):
# Commit to some API for ascending and descending
#
# table.sort_by(['g', expr1, desc(expr2), desc(expr3)])
#
# Default is ascending for anything coercable to an expression,
# and we'll have ascending/descending wrappers to help.
result = table.sort_by(['f'])
sort_key = result.op().sort_keys[0].op()
assert_equal(sort_key.expr, table.f)
assert sort_key.ascending
# non-list input. per #150
result2 = table.sort_by('f')
assert_equal(result, result2)
result2 = table.sort_by([('f', False)])
result3 = table.sort_by([('f', 'descending')])
result4 = table.sort_by([('f', 0)])
key2 = result2.op().sort_keys[0].op()
key3 = result3.op().sort_keys[0].op()
key4 = result4.op().sort_keys[0].op()
assert not key2.ascending
assert not key3.ascending
assert not key4.ascending
assert_equal(result2, result3)
def test_sort_by_desc_deferred_sort_key(table):
result = table.group_by('g').size().sort_by(ibis.desc('count'))
tmp = table.group_by('g').size()
expected = tmp.sort_by((tmp['count'], False))
expected2 = tmp.sort_by(ibis.desc(tmp['count']))
assert_equal(result, expected)
assert_equal(result, expected2)
def test_slice_convenience(table):
expr = table[:5]
expr2 = table[:5:1]
assert_equal(expr, table.limit(5))
assert_equal(expr, expr2)
expr = table[2:7]
expr2 = table[2:7:1]
assert_equal(expr, table.limit(5, offset=2))
assert_equal(expr, expr2)
with pytest.raises(ValueError):
table[2:15:2]
with pytest.raises(ValueError):
table[5:]
with pytest.raises(ValueError):
table[:-5]
with pytest.raises(ValueError):
table[-10:-5]
def test_table_count(table):
result = table.count()
assert isinstance(result, ir.IntegerScalar)
assert isinstance(result.op(), ops.Count)
assert result.get_name() == 'count'
def test_len_raises_expression_error(table):
with pytest.raises(com.ExpressionError):
len(table)
def test_sum_expr_basics(table, int_col):
# Impala gives bigint for all integer types
ex_class = ir.IntegerScalar
result = table[int_col].sum()
assert isinstance(result, ex_class)
assert isinstance(result.op(), ops.Sum)
def test_sum_expr_basics_floats(table, float_col):
# Impala gives double for all floating point types
ex_class = ir.FloatingScalar
result = table[float_col].sum()
assert isinstance(result, ex_class)
assert isinstance(result.op(), ops.Sum)
def test_mean_expr_basics(table, numeric_col):
result = table[numeric_col].mean()
assert isinstance(result, ir.FloatingScalar)
assert isinstance(result.op(), ops.Mean)
def test_aggregate_no_keys(table):
metrics = [
table['a'].sum().name('sum(a)'),
table['c'].mean().name('mean(c)'),
]
# A TableExpr, which in SQL at least will yield a table with a single
# row
result = table.aggregate(metrics)
assert isinstance(result, TableExpr)
def test_aggregate_keys_basic(table):
metrics = [
table['a'].sum().name('sum(a)'),
table['c'].mean().name('mean(c)'),
]
# A TableExpr, which in SQL at least will yield a table with a single
# row
result = table.aggregate(metrics, by=['g'])
assert isinstance(result, TableExpr)
# it works!
repr(result)
def test_aggregate_non_list_inputs(table):
# per #150
metric = table.f.sum().name('total')
by = 'g'
having = table.c.sum() > 10
result = table.aggregate(metric, by=by, having=having)
expected = table.aggregate([metric], by=[by], having=[having])
assert_equal(result, expected)
def test_aggregate_keywords(table):
t = table
expr = t.aggregate(foo=t.f.sum(), bar=lambda x: x.f.mean(), by='g')
expr2 = t.group_by('g').aggregate(foo=t.f.sum(), bar=lambda x: x.f.mean())
expected = t.aggregate(
[t.f.mean().name('bar'), t.f.sum().name('foo')], by='g'
)
assert_equal(expr, expected)
assert_equal(expr2, expected)
def test_groupby_alias(table):
t = table
result = t.groupby('g').size()
expected = t.group_by('g').size()
assert_equal(result, expected)
def test_summary_expand_list(table):
summ = table.f.summary()
metric = table.g.group_concat().name('bar')
result = table.aggregate([metric, summ])
expected = table.aggregate([metric] + summ)
assert_equal(result, expected)
def test_summary_prefix_suffix(table):
def get_names(exprs):
return [e.get_name() for e in exprs]
assert get_names(table.g.summary(prefix="string_")) == [
'string_count',
'string_nulls',
'string_uniques',
]
assert get_names(table.g.summary(suffix="_string")) == [
'count_string',
'nulls_string',
'uniques_string',
]
assert get_names(table.g.summary(prefix="pre_", suffix="_post")) == [
'pre_count_post',
'pre_nulls_post',
'pre_uniques_post',
]
assert get_names(table.f.summary(prefix="float_")) == [
"float_count",
"float_nulls",
"float_min",
"float_max",
"float_sum",
"float_mean",
"float_approx_nunique",
]
assert get_names(table.f.summary(suffix="_numeric")) == [
"count_numeric",
"nulls_numeric",
"min_numeric",
"max_numeric",
"sum_numeric",
"mean_numeric",
"approx_nunique_numeric",
]
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_aggregate_invalid(table):
# Pass a non-aggregation or non-scalar expr
assert False
def test_filter_aggregate_pushdown_predicate(table):
# In the case where we want to add a predicate to an aggregate
# expression after the fact, rather than having to backpedal and add it
# before calling aggregate.
#
# TODO (design decision): This could happen automatically when adding a
# predicate originating from the same root table; if an expression is
# created from field references from the aggregated table then it
# becomes a filter predicate applied on top of a view
pred = table.f > 0
metrics = [table.a.sum().name('total')]
agged = table.aggregate(metrics, by=['g'])
filtered = agged.filter([pred])
expected = table[pred].aggregate(metrics, by=['g'])
assert_equal(filtered, expected)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_filter_aggregate_partial_pushdown(table):
assert False
@pytest.mark.parametrize(
"case_fn",
[
pytest.param(lambda t: t.f.sum(), id="non_boolean"),
pytest.param(lambda t: t.f > 2, id="non_scalar"),
],
)
def test_aggregate_post_predicate(table, case_fn):
# Test invalid having clause
metrics = [table.f.sum().name('total')]
by = ['g']
having = [case_fn(table)]
with pytest.raises(com.IbisTypeError):
table.aggregate(metrics, by=by, having=having)
def test_group_by_having_api(table):
# #154, add a HAVING post-predicate in a composable way
metric = table.f.sum().name('foo')
postp = table.d.mean() > 1
expr = table.group_by('g').having(postp).aggregate(metric)
expected = table.aggregate(metric, by='g', having=postp)
assert_equal(expr, expected)
def test_group_by_kwargs(table):
t = table
expr = t.group_by(['f', t.h], z='g', z2=t.d).aggregate(
t.d.mean().name('foo')
)
expected = t.group_by(['f', t.h, t.g.name('z'), t.d.name('z2')]).aggregate(
t.d.mean().name('foo')
)
assert_equal(expr, expected)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_aggregate_root_table_internal(table):
assert False
def test_compound_aggregate_expr(table):
# See ibis #24
compound_expr = (table['a'].sum() / table['a'].mean()).name('foo')
assert L.is_reduction(compound_expr)
# Validates internally
table.aggregate([compound_expr])
def test_groupby_convenience(table):
metrics = [table.f.sum().name('total')]
expr = table.group_by('g').aggregate(metrics)
expected = table.aggregate(metrics, by=['g'])
assert_equal(expr, expected)
group_expr = table.g.cast('double').name('g')
expr = table.group_by(group_expr).aggregate(metrics)
expected = table.aggregate(metrics, by=[group_expr])
assert_equal(expr, expected)
def test_group_by_count_size(table):
# #148, convenience for interactive use, and so forth
result1 = table.group_by('g').size()
result2 = table.group_by('g').count()
expected = table.group_by('g').aggregate([table.count().name('count')])
assert_equal(result1, expected)
assert_equal(result2, expected)
result = table.group_by('g').count('foo')
expected = table.group_by('g').aggregate([table.count().name('foo')])
assert_equal(result, expected)
def test_group_by_column_select_api(table):
grouped = table.group_by('g')
result = grouped.f.sum()
expected = grouped.aggregate(table.f.sum().name('sum(f)'))
assert_equal(result, expected)
supported_functions = ['sum', 'mean', 'count', 'size', 'max', 'min']
# make sure they all work
for fn in supported_functions:
getattr(grouped.f, fn)()
def test_value_counts_convenience(table):
# #152
result = table.g.value_counts()
expected = table.group_by('g').aggregate(table.count().name('count'))
assert_equal(result, expected)
def test_isin_value_counts(table):
# #157, this code path was untested before
bool_clause = table.g.notin(['1', '4', '7'])
# it works!
bool_clause.name('notin').value_counts()
def test_value_counts_unnamed_expr(con):
nation = con.table('tpch_nation')
expr = nation.n_name.lower().value_counts()
expected = nation.n_name.lower().name('unnamed').value_counts()
assert_equal(expr, expected)
def test_aggregate_unnamed_expr(con):
nation = con.table('tpch_nation')
expr = nation.n_name.lower().left(1)
with pytest.raises(com.ExpressionError):
nation.group_by(expr).aggregate(nation.count().name('metric'))
def test_default_reduction_names(table):
d = table.f
cases = [
(d.count(), 'count'),
(d.sum(), 'sum'),
(d.mean(), 'mean'),
(d.approx_nunique(), 'approx_nunique'),
(d.approx_median(), 'approx_median'),
(d.min(), 'min'),
(d.max(), 'max'),
]
for expr, ex_name in cases:
assert expr.get_name() == ex_name
def test_join_no_predicate_list(con):
region = con.table('tpch_region')
nation = con.table('tpch_nation')
pred = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, pred)
expected = region.inner_join(nation, [pred])
assert_equal(joined, expected)
def test_asof_join():
left = ibis.table([('time', 'int32'), ('value', 'double')])
right = ibis.table([('time', 'int32'), ('value2', 'double')])
joined = api.asof_join(left, right, 'time')
pred = joined.op().predicates[0].op()
assert pred.left.op().name == pred.right.op().name == 'time'
def test_asof_join_with_by():
left = ibis.table(
[('time', 'int32'), ('key', 'int32'), ('value', 'double')]
)
right = ibis.table(
[('time', 'int32'), ('key', 'int32'), ('value2', 'double')]
)
joined = api.asof_join(left, right, 'time', by='key')
by = joined.op().by[0].op()
assert by.left.op().name == by.right.op().name == 'key'
@pytest.mark.parametrize(
('ibis_interval', 'timedelta_interval'),
[
[ibis.interval(days=2), pd.Timedelta('2 days')],
[ibis.interval(days=2), datetime.timedelta(days=2)],
[ibis.interval(hours=5), pd.Timedelta('5 hours')],
[ibis.interval(hours=5), datetime.timedelta(hours=5)],
[ibis.interval(minutes=7), pd.Timedelta('7 minutes')],
[ibis.interval(minutes=7), datetime.timedelta(minutes=7)],
[ibis.interval(seconds=9), pd.Timedelta('9 seconds')],
[ibis.interval(seconds=9), datetime.timedelta(seconds=9)],
[ibis.interval(milliseconds=11), pd.Timedelta('11 milliseconds')],
[ibis.interval(milliseconds=11), datetime.timedelta(milliseconds=11)],
[ibis.interval(microseconds=15), pd.Timedelta('15 microseconds')],
[ibis.interval(microseconds=15), datetime.timedelta(microseconds=15)],
[ibis.interval(nanoseconds=17), pd.Timedelta('17 nanoseconds')],
],
)
def test_asof_join_with_tolerance(ibis_interval, timedelta_interval):
left = ibis.table(
[('time', 'int32'), ('key', 'int32'), ('value', 'double')]
)
right = ibis.table(
[('time', 'int32'), ('key', 'int32'), ('value2', 'double')]
)
joined = api.asof_join(left, right, 'time', tolerance=ibis_interval)
tolerance = joined.op().tolerance
assert_equal(tolerance, ibis_interval)
joined = api.asof_join(left, right, 'time', tolerance=timedelta_interval)
tolerance = joined.op().tolerance
assert isinstance(tolerance, ir.IntervalScalar)
assert isinstance(tolerance.op(), ops.Literal)
def test_equijoin_schema_merge():
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'int32')])
pred = table1['key1'] == table2['key2']
join_types = ['inner_join', 'left_join', 'outer_join']
ex_schema = api.Schema(
['key1', 'value1', 'key2', 'stuff'],
['string', 'double', 'string', 'int32'],
)
for fname in join_types:
f = getattr(table1, fname)
joined = f(table2, [pred]).materialize()
assert_equal(joined.schema(), ex_schema)
def test_join_combo_with_projection(table):
# Test a case where there is column name overlap, but the projection
# passed makes it a non-issue. Highly relevant with self-joins
#
# For example, where left/right have some field names in common:
# SELECT left.*, right.a, right.b
# FROM left join right on left.key = right.key
t = table
t2 = t.mutate(foo=t.f * 2, bar=t.f * 4)
# this works
joined = t.left_join(t2, [t['g'] == t2['g']])
proj = joined.projection([t, t2['foo'], t2['bar']])
repr(proj)
def test_join_getitem_projection(con):
region = con.table('tpch_region')
nation = con.table('tpch_nation')
pred = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, pred)
result = joined[nation]
expected = joined.projection(nation)
assert_equal(result, expected)
def test_self_join(table):
# Self-joins are problematic with this design because column
# expressions may reference either the left or right For example:
#
# SELECT left.key, sum(left.value - right.value) as total_deltas
# FROM table left
# INNER JOIN table right
# ON left.current_period = right.previous_period + 1
# GROUP BY 1
#
# One way around the self-join issue is to force the user to add
# prefixes to the joined fields, then project using those. Not that
# satisfying, though.
left = table
right = table.view()
metric = (left['a'] - right['b']).mean().name('metric')
joined = left.inner_join(right, [right['g'] == left['g']])
# basic check there's no referential problems
result_repr = repr(joined)
assert 'ref_0' in result_repr
assert 'ref_1' in result_repr
# Cannot be immediately materialized because of the schema overlap
with pytest.raises(RelationError):
joined.materialize()
# Project out left table schema
proj = joined[[left]]
assert_equal(proj.schema(), left.schema())
# Try aggregating on top of joined
aggregated = joined.aggregate([metric], by=[left['g']])
ex_schema = api.Schema(['g', 'metric'], ['string', 'double'])
assert_equal(aggregated.schema(), ex_schema)
def test_self_join_no_view_convenience(table):
# #165, self joins ought to be possible when the user specifies the
# column names to join on rather than referentially-valid expressions
result = table.join(table, [('g', 'g')])
t2 = table.view()
expected = table.join(t2, table.g == t2.g)
assert_equal(result, expected)
def test_materialized_join_reference_bug(con):
# GH#403
orders = con.table('tpch_orders')
customer = con.table('tpch_customer')
lineitem = con.table('tpch_lineitem')
items = (
orders.join(lineitem, orders.o_orderkey == lineitem.l_orderkey)[
lineitem, orders.o_custkey, orders.o_orderpriority
]
.join(customer, [('o_custkey', 'c_custkey')])
.materialize()
)
items['o_orderpriority'].value_counts()
def test_join_project_after(table):
# e.g.
#
# SELECT L.foo, L.bar, R.baz, R.qux
# FROM table1 L
# INNER JOIN table2 R
# ON L.key = R.key
#
# or
#
# SELECT L.*, R.baz
# ...
#
# The default for a join is selecting all fields if possible
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'int32')])
pred = table1['key1'] == table2['key2']
joined = table1.left_join(table2, [pred])
projected = joined.projection([table1, table2['stuff']])
assert projected.schema().names == ['key1', 'value1', 'stuff']
projected = joined.projection([table2, table1['key1']])
assert projected.schema().names == ['key2', 'stuff', 'key1']
def test_semi_join_schema(table):
# A left semi join discards the schema of the right table
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'double')])
pred = table1['key1'] == table2['key2']
semi_joined = table1.semi_join(table2, [pred]).materialize()
result_schema = semi_joined.schema()
assert_equal(result_schema, table1.schema())
def test_cross_join(table):
metrics = [
table['a'].sum().name('sum_a'),
table['b'].mean().name('mean_b'),
]
scalar_aggs = table.aggregate(metrics)
joined = table.cross_join(scalar_aggs).materialize()
agg_schema = api.Schema(['sum_a', 'mean_b'], ['int64', 'double'])
ex_schema = table.schema().append(agg_schema)
assert_equal(joined.schema(), ex_schema)
def test_cross_join_multiple(table):
a = table['a', 'b', 'c']
b = table['d', 'e']
c = table['f', 'h']
joined = ibis.cross_join(a, b, c)
expected = a.cross_join(b.cross_join(c))
assert joined.equals(expected)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_join_compound_boolean_predicate(table):
# The user might have composed predicates through logical operations
assert False
def test_filter_join_unmaterialized(table):
table1 = ibis.table(
{'key1': 'string', 'key2': 'string', 'value1': 'double'}
)
table2 = ibis.table({'key3': 'string', 'value2': 'double'})
# It works!
joined = table1.inner_join(table2, [table1['key1'] == table2['key3']])
filtered = joined.filter([table1.value1 > 0])
repr(filtered)
def test_join_overlapping_column_names(table):
t1 = ibis.table(
[('foo', 'string'), ('bar', 'string'), ('value1', 'double')]
)
t2 = ibis.table(
[('foo', 'string'), ('bar', 'string'), ('value2', 'double')]
)
joined = t1.join(t2, 'foo')
expected = t1.join(t2, t1.foo == t2.foo)
assert_equal(joined, expected)
joined = t1.join(t2, ['foo', 'bar'])
expected = t1.join(t2, [t1.foo == t2.foo, t1.bar == t2.bar])
assert_equal(joined, expected)
def test_join_key_alternatives(con):
t1 = con.table('star1')
t2 = con.table('star2')
# Join with tuples
joined = t1.inner_join(t2, [('foo_id', 'foo_id')])
joined2 = t1.inner_join(t2, [(t1.foo_id, t2.foo_id)])
# Join with single expr
joined3 = t1.inner_join(t2, t1.foo_id == t2.foo_id)
expected = t1.inner_join(t2, [t1.foo_id == t2.foo_id])
assert_equal(joined, expected)
assert_equal(joined2, expected)
assert_equal(joined3, expected)
with pytest.raises(com.ExpressionError):
t1.inner_join(t2, [('foo_id', 'foo_id', 'foo_id')])
def test_join_invalid_refs(con):
t1 = con.table('star1')
t2 = con.table('star2')
t3 = con.table('star3')
predicate = t1.bar_id == t3.bar_id
with pytest.raises(com.RelationError):
t1.inner_join(t2, [predicate])
def test_join_invalid_expr_type(con):
left = con.table('star1')
invalid_right = left.foo_id
join_key = ['bar_id']
with pytest.raises(
NotImplementedError,
match=r'string __getitem__\[str\]',
):
left.inner_join(invalid_right, join_key)
def test_join_non_boolean_expr(con):
t1 = con.table('star1')
t2 = con.table('star2')
# oops
predicate = t1.f * t2.value1
with pytest.raises(com.ExpressionError):
t1.inner_join(t2, [predicate])
def test_unravel_compound_equijoin(table):
t1 = ibis.table(
[
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value1', 'double'),
],
'foo_table',
)
t2 = ibis.table(
[
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value2', 'double'),
],
'bar_table',
)
p1 = t1.key1 == t2.key1
p2 = t1.key2 == t2.key2
p3 = t1.key3 == t2.key3
joined = t1.inner_join(t2, [p1 & p2 & p3])
expected = t1.inner_join(t2, [p1, p2, p3])
assert_equal(joined, expected)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_join_add_prefixes(table):
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_join_nontrivial_exprs(table):
assert False
def test_union(
setops_table_foo,
setops_table_bar,
setops_table_baz,
setops_relation_error_message,
):
result = setops_table_foo.union(setops_table_bar)
assert isinstance(result.op(), ops.Union)
assert not result.op().distinct
result = setops_table_foo.union(setops_table_bar, distinct=True)
assert result.op().distinct
with pytest.raises(RelationError, match=setops_relation_error_message):
setops_table_foo.union(setops_table_baz)
def test_intersection(
setops_table_foo,
setops_table_bar,
setops_table_baz,
setops_relation_error_message,
):
result = setops_table_foo.intersect(setops_table_bar)
assert isinstance(result.op(), ops.Intersection)
with pytest.raises(RelationError, match=setops_relation_error_message):
setops_table_foo.intersect(setops_table_baz)
def test_difference(
setops_table_foo,
setops_table_bar,
setops_table_baz,
setops_relation_error_message,
):
result = setops_table_foo.difference(setops_table_bar)
assert isinstance(result.op(), ops.Difference)
with pytest.raises(RelationError, match=setops_relation_error_message):
setops_table_foo.difference(setops_table_baz)
def test_column_ref_on_projection_rename(con):
region = con.table('tpch_region')
nation = con.table('tpch_nation')
customer = con.table('tpch_customer')
joined = region.inner_join(
nation, [region.r_regionkey == nation.n_regionkey]
).inner_join(customer, [customer.c_nationkey == nation.n_nationkey])
proj_exprs = [
customer,
nation.n_name.name('nation'),
region.r_name.name('region'),
]
joined = joined.projection(proj_exprs)
metrics = [joined.c_acctbal.sum().name('metric')]
# it works!
joined.aggregate(metrics, by=['region'])
@pytest.fixture
def t1():
return ibis.table(
[('key1', 'string'), ('key2', 'string'), ('value1', 'double')], 'foo'
)
@pytest.fixture
def t2():
return ibis.table([('key1', 'string'), ('key2', 'string')], 'bar')
def test_simple_existence_predicate(t1, t2):
cond = (t1.key1 == t2.key1).any()
assert isinstance(cond, ir.BooleanColumn)
op = cond.op()
assert isinstance(op, ops.Any)
# it works!
expr = t1[cond]
assert isinstance(expr.op(), ops.Selection)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_cannot_use_existence_expression_in_join(table):
# Join predicates must consist only of comparisons
assert False
def test_not_exists_predicate(t1, t2):
cond = -((t1.key1 == t2.key1).any())
assert isinstance(cond.op(), ops.NotAny)
def test_aggregate_metrics(table):
functions = [
lambda x: x.e.sum().name('esum'),
lambda x: x.f.sum().name('fsum'),
]
exprs = [table.e.sum().name('esum'), table.f.sum().name('fsum')]
result = table.aggregate(functions[0])
expected = table.aggregate(exprs[0])
assert_equal(result, expected)
result = table.aggregate(functions)
expected = table.aggregate(exprs)
assert_equal(result, expected)
def test_group_by_keys(table):
m = table.mutate(foo=table.f * 2, bar=table.e / 2)
expr = m.group_by(lambda x: x.foo).size()
expected = m.group_by('foo').size()
assert_equal(expr, expected)
expr = m.group_by([lambda x: x.foo, lambda x: x.bar]).size()
expected = m.group_by(['foo', 'bar']).size()
assert_equal(expr, expected)
def test_having(table):
m = table.mutate(foo=table.f * 2, bar=table.e / 2)
expr = m.group_by('foo').having(lambda x: x.foo.sum() > 10).size()
expected = m.group_by('foo').having(m.foo.sum() > 10).size()
assert_equal(expr, expected)
def test_filter(table):
m = table.mutate(foo=table.f * 2, bar=table.e / 2)
result = m.filter(lambda x: x.foo > 10)
result2 = m[lambda x: x.foo > 10]
expected = m[m.foo > 10]
assert_equal(result, expected)
assert_equal(result2, expected)
result = m.filter([lambda x: x.foo > 10, lambda x: x.bar < 0])
expected = m.filter([m.foo > 10, m.bar < 0])
assert_equal(result, expected)
def test_sort_by2(table):
m = table.mutate(foo=table.e + table.f)
result = m.sort_by(lambda x: -x.foo)
expected = m.sort_by(-m.foo)
assert_equal(result, expected)
result = m.sort_by(lambda x: ibis.desc(x.foo))
expected = m.sort_by(ibis.desc('foo'))
assert_equal(result, expected)
result = m.sort_by(ibis.desc(lambda x: x.foo))
expected = m.sort_by(ibis.desc('foo'))
assert_equal(result, expected)
def test_projection2(table):
m = table.mutate(foo=table.f * 2)
def f(x):
return (x.foo * 2).name('bar')
result = m.projection([f, 'f'])
result2 = m[f, 'f']
expected = m.projection([f(m), 'f'])
assert_equal(result, expected)
assert_equal(result2, expected)
def test_mutate2(table):
m = table.mutate(foo=table.f * 2)
def g(x):
return x.foo * 2
def h(x):
return x.bar * 2
result = m.mutate(bar=g).mutate(baz=h)
m2 = m.mutate(bar=g(m))
expected = m2.mutate(baz=h(m2))
assert_equal(result, expected)
def test_groupby_mutate(table):
t = table
g = t.group_by('g').order_by('f')
expr = g.mutate(foo=lambda x: x.f.lag(), bar=lambda x: x.f.rank())
expected = g.mutate(foo=t.f.lag(), bar=t.f.rank())
assert_equal(expr, expected)
def test_groupby_projection(table):
t = table
g = t.group_by('g').order_by('f')
expr = g.projection(
[lambda x: x.f.lag().name('foo'), lambda x: x.f.rank().name('bar')]
)
expected = g.projection([t.f.lag().name('foo'), t.f.rank().name('bar')])
assert_equal(expr, expected)
def test_set_column(table):
def g(x):
return x.f * 2
result = table.set_column('f', g)
expected = table.set_column('f', table.f * 2)
assert_equal(result, expected)
def test_pickle_table_expr():
schema = [('time', 'timestamp'), ('key', 'string'), ('value', 'double')]
t0 = ibis.table(schema, name='t0')
raw = pickle.dumps(t0, protocol=2)
t1 = pickle.loads(raw)
assert t1.equals(t0)
def test_pickle_table_node(table):
n0 = table.op()
assert_pickle_roundtrip(n0)
def test_pickle_projection_node(table):
m = table.mutate(foo=table.f * 2)
def f(x):
return (x.foo * 2).name('bar')
node = m.projection([f, 'f']).op()
assert_pickle_roundtrip(node)
def test_pickle_group_by(table):
m = table.mutate(foo=table.f * 2, bar=table.e / 2)
expr = m.group_by(lambda x: x.foo).size()
node = expr.op()
assert_pickle_roundtrip(node)
def test_pickle_asof_join():
left = ibis.table([('time', 'int32'), ('value', 'double')])
right = ibis.table([('time', 'int32'), ('value2', 'double')])
joined = api.asof_join(left, right, 'time')
node = joined.op()
assert_pickle_roundtrip(node)
def test_group_by_key_function():
t = ibis.table([('a', 'timestamp'), ('b', 'string'), ('c', 'double')])
expr = t.groupby(new_key=lambda t: t.b.length()).aggregate(foo=t.c.mean())
assert expr.columns == ['new_key', 'foo']
def test_unbound_table_name():
t = ibis.table([('a', 'timestamp')])
name = t.op().name
match = re.match(r'^unbound_table_\d+$', name)
assert match is not None
def test_mutate_chain():
one = ibis.table([('a', 'string'), ('b', 'string')], name='t')
two = one.mutate(b=lambda t: t.b.fillna('Short Term'))
three = two.mutate(a=lambda t: t.a.fillna('Short Term'))
a, b = three.op().selections
# we can't fuse these correctly yet
assert isinstance(a.op(), ops.IfNull)
assert isinstance(b.op(), ops.TableColumn)
assert isinstance(b.op().table.op().selections[1].op(), ops.IfNull)
|
{
"content_hash": "2a8a33c359b5b37e7b5684eac6277d1e",
"timestamp": "",
"source": "github",
"line_count": 1406,
"max_line_length": 79,
"avg_line_length": 27.77524893314367,
"alnum_prop": 0.6227593977261088,
"repo_name": "cloudera/ibis",
"id": "8d452b2262b972e94365c564ce6f3042a8291fad",
"size": "39052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/tests/expr/test_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Python",
"bytes": "2570944"
},
{
"name": "Shell",
"bytes": "1989"
}
],
"symlink_target": ""
}
|
from pylons import Response, c, g, cache, request, session
from pylons.controllers import WSGIController, XMLRPCController
from pylons.controllers.util import abort, redirect_to, etag_cache
from pylons.decorators import jsonify, validate
from pylons.templating import render, render_response
from pylons.i18n import N_, _, ungettext
import projectname.model as model
import projectname.lib.helpers as h
class BaseController(WSGIController):
def __call__(self, environ, start_response):
# Insert any code to be run per request here. The Routes match
# is under environ['pylons.routes_dict'] should you want to check
# the action or route vars here
return WSGIController.__call__(self, environ, start_response)
# Include the '_' function in the public names
__all__ = [__name for __name in locals().keys() if not __name.startswith('_') \
or __name == '_']
|
{
"content_hash": "36498009d92ab1c84ba99bd8b415ed4e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 47.526315789473685,
"alnum_prop": 0.7220376522702104,
"repo_name": "santisiri/popego",
"id": "2df549eee7079d4da5da289b90249ad59ea80aff",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/Pylons-0.9.6.1-py2.5.egg/tests/test_webapps/filestotest/base_with_xmlrpc.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
}
|
"""
FILE: sample_translation_with_glossaries_async.py
DESCRIPTION:
This sample demonstrates how to translate documents and apply custom glossaries to the translation.
To set up your containers for translation and generate SAS tokens to your containers (or files)
with the appropriate permissions, see the README.
USAGE:
python sample_translation_with_glossaries_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_DOCUMENT_TRANSLATION_ENDPOINT - the endpoint to your Document Translation resource.
2) AZURE_DOCUMENT_TRANSLATION_KEY - your Document Translation API key.
3) AZURE_SOURCE_CONTAINER_URL - the container SAS URL to your source container which has the documents
to be translated.
4) AZURE_TARGET_CONTAINER_URL - the container SAS URL to your target container where the translated documents
will be written.
5) AZURE_TRANSLATION_GLOSSARY_URL - the SAS URL to your glossary file
"""
import asyncio
async def sample_translation_with_glossaries_async():
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.translation.document.aio import DocumentTranslationClient
from azure.ai.translation.document import (
TranslationGlossary
)
endpoint = os.environ["AZURE_DOCUMENT_TRANSLATION_ENDPOINT"]
key = os.environ["AZURE_DOCUMENT_TRANSLATION_KEY"]
source_container_url = os.environ["AZURE_SOURCE_CONTAINER_URL"]
target_container_url = os.environ["AZURE_TARGET_CONTAINER_URL"]
glossary_url = os.environ["AZURE_TRANSLATION_GLOSSARY_URL"]
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
async with client:
poller = await client.begin_translation(
source_container_url,
target_container_url,
"es",
glossaries=[TranslationGlossary(glossary_url=glossary_url, file_format="TSV")]
)
result = await poller.result()
print(f"Status: {poller.status()}")
print(f"Created on: {poller.details.created_on}")
print(f"Last updated on: {poller.details.last_updated_on}")
print(f"Total number of translations on documents: {poller.details.documents_total_count}")
print("\nOf total documents...")
print(f"{poller.details.documents_failed_count} failed")
print(f"{poller.details.documents_succeeded_count} succeeded")
async for document in result:
print(f"Document ID: {document.id}")
print(f"Document status: {document.status}")
if document.status == "Succeeded":
print(f"Source document location: {document.source_document_url}")
print(f"Translated document location: {document.translated_document_url}")
print(f"Translated to language: {document.translated_to}\n")
else:
print(f"Error Code: {document.error.code}, Message: {document.error.message}\n")
async def main():
await sample_translation_with_glossaries_async()
if __name__ == '__main__':
asyncio.run(main())
|
{
"content_hash": "8e5bb9ddf2364d1a3bd9b163397ec40a",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 113,
"avg_line_length": 41.11842105263158,
"alnum_prop": 0.69408,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d22828cd63510d7cdd9a00e0d3cc4b59408bce63",
"size": "3277",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/translation/azure-ai-translation-document/samples/async_samples/sample_translation_with_glossaries_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
"""
import primes
primeNumbers = primes.create_primes(20)
primeFactors = {}
def get_prime_factors(number):
prime_factors = {}
original_number = number
for p in primeNumbers:
number = original_number
factor = 0
while number % p == 0:
number /= p
factor += 1
prime_factors[p] = factor
return prime_factors
def merge_prime_factors(new_prime_factors):
for p in new_prime_factors:
if p not in primeFactors:
primeFactors[p] = new_prime_factors[p]
else:
primeFactors[p] = max(primeFactors[p], new_prime_factors[p])
for i in xrange(2, 20):
merge_prime_factors(get_prime_factors(i))
product = 1
for x in primeFactors:
while primeFactors[x] > 0:
product *= x
primeFactors[x] -= 1
print str(product)
|
{
"content_hash": "78d83426bbc8f5ec73af7ba57d1054cd",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 106,
"avg_line_length": 22.382978723404257,
"alnum_prop": 0.6349809885931559,
"repo_name": "pgrm/project-euler",
"id": "062217e57b17407209834c8d5dd68d816b984039",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0001-0050/05-Smallest_multiple.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "56689"
}
],
"symlink_target": ""
}
|
import os, sys, time, platform, random
import re, json, cookielib
# requirements
import requests, termcolor
requests = requests.Session()
requests.cookies = cookielib.LWPCookieJar('cookies')
try:
requests.cookies.load(ignore_discard=True)
except:
pass
class Logging:
flag = True
@staticmethod
def error(msg):
if Logging.flag == True:
print "".join( [ termcolor.colored("ERROR", "red"), ": ", termcolor.colored(msg, "white") ] )
@staticmethod
def warn(msg):
if Logging.flag == True:
print "".join( [ termcolor.colored("WARN", "yellow"), ": ", termcolor.colored(msg, "white") ] )
@staticmethod
def info(msg):
# attrs=['reverse', 'blink']
if Logging.flag == True:
print "".join( [ termcolor.colored("INFO", "magenta"), ": ", termcolor.colored(msg, "white") ] )
@staticmethod
def debug(msg):
if Logging.flag == True:
print "".join( [ termcolor.colored("DEBUG", "magenta"), ": ", termcolor.colored(msg, "white") ] )
@staticmethod
def success(msg):
if Logging.flag == True:
print "".join( [ termcolor.colored("SUCCES", "green"), ": ", termcolor.colored(msg, "white") ] )
# Setting Logging
Logging.flag = True
class LoginPasswordError(Exception):
def __init__(self, message):
if type(message) != type("") or message == "": self.message = u"帐号密码错误"
else: self.message = message
Logging.error(self.message)
class NetworkError(Exception):
def __init__(self, message):
if type(message) != type("") or message == "": self.message = u"网络异常"
else: self.message = message
Logging.error(self.message)
class AccountError(Exception):
def __init__(self, message):
if type(message) != type("") or message == "": self.message = u"帐号类型错误"
else: self.message = message
Logging.error(self.message)
def download_captcha():
url = "http://www.zhihu.com/captcha.gif"
r = requests.get(url, params={"r": random.random()} )
if int(r.status_code) != 200:
raise NetworkError(u"验证码请求失败")
image_name = u"captcha." + r.headers['content-type'].split("/")[1]
# open( image_name, "wb").write(r.content)
with open(image_name, 'wb') as fd:
fd.write(r.content)
fd.close()
from yunsu import get_pincode
results = get_pincode()
print results
p = re.compile(r'<Result>(.*?)</Result>', re.M)
captcha_code = p.findall(results)[0]
captcha_code = str(captcha_code)
print captcha_code
return captcha_code
def search_xsrf():
url = "http://www.zhihu.com/"
r = requests.get(url)
if int(r.status_code) != 200:
print r.content
raise NetworkError(u"验证码请求失败")
results = re.compile(r"\<input\stype=\"hidden\"\sname=\"_xsrf\"\svalue=\"(\S+)\"", re.DOTALL).findall(r.text)
if len(results) < 1:
Logging.info(u"提取XSRF 代码失败" )
return None
return results[0]
def build_form(account, password):
if re.match(r"^1\d{10}$", account): account_type = "phone_num"
elif re.match(r"^\S+\@\S+\.\S+$", account): account_type = "email"
else: raise AccountError(u"帐号类型错误")
form = {account_type: account, "password": password, "remember_me": True }
form['_xsrf'] = search_xsrf()
form['captcha'] = download_captcha()
print form
return form
def upload_form(form):
if "email" in form: url = "http://www.zhihu.com/login/email"
elif "phone_num" in form: url = "http://www.zhihu.com/login/phone_num"
else: raise ValueError(u"账号类型错误")
headers = {
'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36",
'Host': "www.zhihu.com",
'Origin': "http://www.zhihu.com",
'Pragma': "no-cache",
'Referer': "http://www.zhihu.com/",
'X-Requested-With': "XMLHttpRequest"
}
r = requests.post(url, data=form, headers=headers)
if int(r.status_code) != 200:
raise NetworkError(u"表单上传失败!")
if r.headers['content-type'].lower() == "application/json":
try:
# 修正 justkg 提出的问题: https://github.com/egrcc/zhihu-python/issues/30
result = json.loads(r.content)
except Exception as e:
Logging.error(u"JSON解析失败!")
Logging.debug(e)
Logging.debug(r.content)
result = {}
if result["r"] == 0:
Logging.success(u"登录成功!" )
return {"result": True}
elif result["r"] == 1:
Logging.success(u"登录失败!" )
return {"error": {"code": int(result['errcode']), "message": result['msg'], "data": result['data'] } }
else:
Logging.warn(u"表单上传出现未知错误: \n \t %s )" % ( str(result) ) )
return {"error": {"code": -1, "message": u"unknow error"} }
else:
Logging.warn(u"无法解析服务器的响应内容: \n \t %s " % r.text )
return {"error": {"code": -2, "message": u"parse error"} }
def islogin():
# check session
url = "http://www.zhihu.com/settings/profile"
r = requests.get(url, allow_redirects=False)
status_code = int(r.status_code)
if status_code == 301 or status_code == 302:
# 未登录
return False
elif status_code == 200:
return True
else:
Logging.warn(u"网络故障")
return None
def read_account_from_config_file(config_file="config.ini"):
# NOTE: The ConfigParser module has been renamed to configparser in Python 3.
# The 2to3 tool will automatically adapt imports when converting your sources to Python 3.
# https://docs.python.org/2/library/configparser.html
from ConfigParser import ConfigParser
cf = ConfigParser()
if os.path.exists(config_file) and os.path.isfile(config_file):
Logging.info(u"正在加载配置文件 ...")
cf.read(config_file)
email = cf.get("info", "email")
password = cf.get("info", "password")
if email == "" or password == "":
Logging.warn(u"帐号信息无效")
return (None, None)
else: return (email, password)
else:
Logging.error(u"配置文件加载失败!")
return (None, None)
def login(account=None, password=None):
if islogin() == True:
Logging.success(u"你已经登录过咯")
return True
if account == None:
(account, password) = read_account_from_config_file()
if account == None:
account = raw_input("请输入登录帐号: ")
password = raw_input("请输入登录密码: ")
form_data = build_form(account, password)
"""
result:
{"result": True}
{"error": {"code": 19855555, "message": "unknow.", "data": "data" } }
{"error": {"code": -1, "message": u"unknow error"} }
"""
result = upload_form(form_data)
print result
if "error" in result:
if result["error"]['code'] == 1991829:
# 验证码错误
Logging.error(u"验证码输入错误,请准备重新输入。" )
return login()
else:
Logging.warn(u"unknow error." )
return False
elif "result" in result and result['result'] == True:
# 登录成功
Logging.success(u"登录成功!" )
requests.cookies.save()
return True
if __name__ == "__main__":
# login()
# print requests.cookies
|
{
"content_hash": "51d1baf260213a45f7c4b23bd203a2b2",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 130,
"avg_line_length": 32.36725663716814,
"alnum_prop": 0.5797676008202324,
"repo_name": "richardGaoPy/NetSpider",
"id": "705ade66936f5182e59c10c304e8abbfe2206de2",
"size": "7735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zhihuspyder/zh_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "150816"
},
{
"name": "Python",
"bytes": "78976"
}
],
"symlink_target": ""
}
|
"""Interfaces for LPDDR5 memory devices
These memory "interfaces" contain the timing,energy,etc parameters for each
memory type and are usually based on datasheets for the memory devices.
You can use these interfaces in the MemCtrl object as the `dram` timing
interface.
"""
from m5.objects import DRAMInterface
class LPDDR5_5500_1x16_BG_BL32(DRAMInterface):
"""
A single LPDDR5 x16 interface (one command/address bus)
for a single x16 channel with default timings based on
initial JEDEC specification
Starting with 5.5Gbps data rates and 8Gbit die
Configuring for 16-bank mode with bank-group architecture
burst of 32, which means bursts can be interleaved
"""
# Increase buffer size to account for more bank resources
read_buffer_size = 64
# Set page policy to better suit DMC Huxley
page_policy = "close_adaptive"
# 16-bit channel interface
device_bus_width = 16
# LPDDR5 is a BL16 or BL32 device
# With BG mode, BL16 and BL32 are supported
# Use BL32 for higher command bandwidth
burst_length = 32
# size of device in bytes
device_size = "1GiB"
# 2KiB page with BG mode
device_rowbuffer_size = "2KiB"
# Use a 1x16 configuration
devices_per_rank = 1
# Use a single rank
ranks_per_channel = 1
# LPDDR5 supports configurable bank options
# 8B : BL32, all frequencies
# 16B : BL32 or BL16, <=3.2Gbps
# 16B with Bank Group Arch (4B/BG): BL32 or BL16, >3.2Gbps
# Initial configuration will have 16 banks with Bank Group Arch
# to maximim resources and enable higher data rates
banks_per_rank = 16
bank_groups_per_rank = 4
# 5.5Gb/s DDR with 4:1 WCK:CK ratio for 687.5 MHz CK
tCK = "1.455ns"
# Greater of 2 CK or 18ns
tRCD = "18ns"
# Base RL is 16 CK @ 687.5 MHz = 23.28ns
tCL = "23.280ns"
# Greater of 2 CK or 18ns
tRP = "18ns"
# Greater of 3 CK or 42ns
tRAS = "42ns"
# Greater of 3 CK or 34ns
tWR = "34ns"
# active powerdown and precharge powerdown exit time
# Greater of 3 CK or 7ns
tXP = "7ns"
# self refresh exit time (tRFCab + 7.5ns)
tXS = "217.5ns"
# Greater of 2 CK or 7.5 ns minus 2 CK
tRTP = "4.59ns"
# With BG architecture, burst of 32 transferred in two 16-beat
# sub-bursts, with a 16-beat gap in between.
# Each 16-beat sub-burst is 8 WCK @2.75 GHz or 2 CK @ 687.5 MHz
# tBURST is the delay to transfer the Bstof32 = 6 CK @ 687.5 MHz
tBURST = "8.73ns"
# can interleave a Bstof32 from another bank group at tBURST_MIN
# 16-beats is 8 WCK @2.75 GHz or 2 CK @ 687.5 MHz
tBURST_MIN = "2.91ns"
# tBURST_MAX is the maximum burst delay for same bank group timing
# this is 8 CK @ 687.5 MHz
tBURST_MAX = "11.64ns"
# 8 CK @ 687.5 MHz
tCCD_L = "11.64ns"
# LPDDR5, 8 Gbit/channel for 280ns tRFCab
tRFC = "210ns"
tREFI = "3.9us"
# Greater of 4 CK or 6.25 ns
tWTR = "6.25ns"
# Greater of 4 CK or 12 ns
tWTR_L = "12ns"
# Required RD-to-WR timing is RL+ BL/n + tWCKDQ0/tCK - WL
# tWCKDQ0/tCK will be 1 CK for most cases
# For gem5 RL = WL and BL/n is already accounted for with tBURST
# Result is and additional 1 CK is required
tRTW = "1.455ns"
# Default different rank bus delay to 2 CK, @687.5 MHz = 2.91 ns
tCS = "2.91ns"
# 2 CK
tPPD = "2.91ns"
# Greater of 2 CK or 5 ns
tRRD = "5ns"
tRRD_L = "5ns"
# With Bank Group Arch mode tFAW is 20 ns
tXAW = "20ns"
activation_limit = 4
# at 5Gbps, 4:1 WCK to CK ratio required
# 2 data beats per WCK (DDR) -> 8 per CK
beats_per_clock = 8
# 2 cycles required to send activate command
# 2 command phases can be sent back-to-back or
# with a gap up to tAAD = 8 CK
two_cycle_activate = True
tAAD = "11.640ns"
data_clock_sync = True
class LPDDR5_5500_1x16_BG_BL16(LPDDR5_5500_1x16_BG_BL32):
"""
A single LPDDR5 x16 interface (one command/address bus)
for a single x16 channel with default timings based on
initial JEDEC specification
Starting with 5.5Gbps data rates and 8Gbit die
Configuring for 16-bank mode with bank-group architecture, burst of 16
"""
# LPDDR5 is a BL16 or BL32 device
# With BG mode, BL16 and BL32 are supported
# Use BL16 for smaller access granularity
burst_length = 16
# For Bstof16 with BG arch, 2 CK @ 687.5 MHz with 4:1 clock ratio
tBURST = "2.91ns"
tBURST_MIN = "2.91ns"
# For Bstof16 with BG arch, 4 CK @ 687.5 MHz with 4:1 clock ratio
tBURST_MAX = "5.82ns"
# 4 CK @ 687.5 MHz
tCCD_L = "5.82ns"
class LPDDR5_5500_1x16_8B_BL32(LPDDR5_5500_1x16_BG_BL32):
"""
A single LPDDR5 x16 interface (one command/address bus)
for a single x16 channel with default timings based on
initial JEDEC specification
Starting with 5.5Gbps data rates and 8Gbit die
Configuring for 8-bank mode, burst of 32
"""
# 4KiB page with 8B mode
device_rowbuffer_size = "4KiB"
# LPDDR5 supports configurable bank options
# 8B : BL32, all frequencies
# 16B : BL32 or BL16, <=3.2Gbps
# 16B with Bank Group Arch (4B/BG): BL32 or BL16, >3.2Gbps
# Select 8B
banks_per_rank = 8
bank_groups_per_rank = 0
# For Bstof32 with 8B mode, 4 CK @ 687.5 MHz with 4:1 clock ratio
tBURST = "5.82ns"
tBURST_MIN = "5.82ns"
tBURST_MAX = "5.82ns"
# Greater of 4 CK or 12 ns
tWTR = "12ns"
# Greater of 2 CK or 10 ns
tRRD = "10ns"
# With 8B mode tFAW is 40 ns
tXAW = "40ns"
activation_limit = 4
# Reset BG arch timing for 8B mode
tCCD_L = "0ns"
tRRD_L = "0ns"
tWTR_L = "0ns"
class LPDDR5_6400_1x16_BG_BL32(LPDDR5_5500_1x16_BG_BL32):
"""
A single LPDDR5 x16 interface (one command/address bus)
for a single x16 channel with default timings based on
initial JEDEC specification
6.4Gbps data rates and 8Gbit die
Configuring for 16-bank mode with bank-group architecture
burst of 32, which means bursts can be interleaved
"""
# 5.5Gb/s DDR with 4:1 WCK:CK ratio for 687.5 MHz CK
tCK = "1.25ns"
# Base RL is 17 CK @ 800 MHz = 21.25ns
tCL = "21.25ns"
# With BG architecture, burst of 32 transferred in two 16-beat
# sub-bursts, with a 16-beat gap in between.
# Each 16-beat sub-burst is 8 WCK @3.2 GHz or 2 CK @ 800 MHz
# tBURST is the delay to transfer the Bstof32 = 6 CK @ 800 MHz
tBURST = "7.5ns"
# can interleave a Bstof32 from another bank group at tBURST_MIN
# 16-beats is 8 WCK @2.3 GHz or 2 CK @ 800 MHz
tBURST_MIN = "2.5ns"
# tBURST_MAX is the maximum burst delay for same bank group timing
# this is 8 CK @ 800 MHz
tBURST_MAX = "10ns"
# 8 CK @ 800 MHz
tCCD_L = "10ns"
# Required RD-to-WR timing is RL+ BL/n + tWCKDQ0/tCK - WL
# tWCKDQ0/tCK will be 1 CK for most cases
# For gem5 RL = WL and BL/n is already accounted for with tBURST
# Result is and additional 1 CK is required
tRTW = "1.25ns"
# Default different rank bus delay to 2 CK, @687.5 MHz = 2.5 ns
tCS = "2.5ns"
# 2 CK
tPPD = "2.5ns"
# 2 command phases can be sent back-to-back or
# with a gap up to tAAD = 8 CK
tAAD = "10ns"
class LPDDR5_6400_1x16_BG_BL16(LPDDR5_6400_1x16_BG_BL32):
"""
A single LPDDR5 x16 interface (one command/address bus)
for a single x16 channel with default timings based on initial
JEDEC specifcation
6.4Gbps data rates and 8Gbit die
Configuring for 16-bank mode with bank-group architecture, burst of 16
"""
# LPDDR5 is a BL16 or BL32 device
# With BG mode, BL16 and BL32 are supported
# Use BL16 for smaller access granularity
burst_length = 16
# For Bstof16 with BG arch, 2 CK @ 800 MHz with 4:1 clock ratio
tBURST = "2.5ns"
tBURST_MIN = "2.5ns"
# For Bstof16 with BG arch, 4 CK @ 800 MHz with 4:1 clock ratio
tBURST_MAX = "5ns"
# 4 CK @ 800 MHz
tCCD_L = "5ns"
class LPDDR5_6400_1x16_8B_BL32(LPDDR5_6400_1x16_BG_BL32):
"""
A single LPDDR5 x16 interface (one command/address bus)
for a single x16 channel with default timings based on
initial JEDEC specification
6.4Gbps data rates and 8Gbit die
Configuring for 8-bank mode, burst of 32
"""
# 4KiB page with 8B mode
device_rowbuffer_size = "4KiB"
# LPDDR5 supports configurable bank options
# 8B : BL32, all frequencies
# 16B : BL32 or BL16, <=3.2Gbps
# 16B with Bank Group Arch (4B/BG): BL32 or BL16, >3.2Gbps
# Select 8B
banks_per_rank = 8
bank_groups_per_rank = 0
# For Bstof32 with 8B mode, 4 CK @ 800 MHz with 4:1 clock ratio
tBURST = "5ns"
tBURST_MIN = "5ns"
tBURST_MAX = "5ns"
# Greater of 4 CK or 12 ns
tWTR = "12ns"
# Greater of 2 CK or 10 ns
tRRD = "10ns"
# With 8B mode tFAW is 40 ns
tXAW = "40ns"
activation_limit = 4
# Reset BG arch timing for 8B mode
tCCD_L = "0ns"
tRRD_L = "0ns"
tWTR_L = "0ns"
|
{
"content_hash": "b28bdf1893b61ca059a423e0d7fbd53a",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 75,
"avg_line_length": 28.50314465408805,
"alnum_prop": 0.6407766990291263,
"repo_name": "gem5/gem5",
"id": "f35bef15aeba463ed15de2abaf0c14e944aac5cf",
"size": "11306",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "src/python/gem5/components/memory/dram_interfaces/lpddr5.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
}
|
import submodules
import pretty
class User:
def __init__(self,username):
self.username = username
def load(self,depth='all'):
"""Calls all the necessary submodules to generate
user's contribution history.
Depth is used to specify how detailed the history
should be. 'all' (default), 'key' (only key events),
or 'minimal' (only user statistics)
"""
userdata = {}
collector = submodules.Collector(user=self.username,depth=depth)
collector.collect()
self.userdata = collector.output("userdata")
self.timeline = collector.output("timeline")
def raw(self):
"""Returns a raw dict of data points from the
user's contribution history."""
return self.userdata
def pretty(self):
"""Returns natural language summary of the user's
contribution history."""
return pretty.prettify(user=self.username,userdata=self.userdata,timeline=self.timeline)
if __name__ == '__main__':
import sys
data = User(username=sys.argv[1])
data.load(depth=sys.argv[2])
print data.raw()
print data.pretty()
|
{
"content_hash": "8f77758fdf059da312b1e7b20b042897",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 90,
"avg_line_length": 27.62162162162162,
"alnum_prop": 0.7172211350293543,
"repo_name": "theopolisme/wikitimeline",
"id": "cef6fbf7f2e33f412f50d360dc5b0cc1e504206d",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wikitimeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110996"
}
],
"symlink_target": ""
}
|
from jupyter_packaging import (
wrap_installers,
npm_builder,
get_data_files
)
from pathlib import Path
from setuptools import setup
HERE = Path(__file__).parent.resolve()
IS_REPO = (HERE.parent / '.git').exists()
JS_DIR = HERE / 'src'
data_files_spec = [(
'share/jupyter/nbextensions/jupyter-js-widgets', 'widgetsnbextension/static', 'extension.js*'),
('etc/jupyter/nbconfig/notebook.d' , '.', 'widgetsnbextension.json')
]
post_develop = npm_builder(
build_cmd="yarn", source_dir="src", build_dir=JS_DIR
)
cmdclass = wrap_installers(post_develop=post_develop)
if __name__ == '__main__':
setup(
cmdclass=cmdclass,
data_files=get_data_files(data_files_spec),
)
|
{
"content_hash": "67341053e6a32bc446face824e68f6f5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 99,
"avg_line_length": 25.464285714285715,
"alnum_prop": 0.664796633941094,
"repo_name": "ipython/ipywidgets",
"id": "05b6c335a404cc1f398d5966415a9b215354ac2f",
"size": "880",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/widgetsnbextension/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "45417"
},
{
"name": "HTML",
"bytes": "7937"
},
{
"name": "JavaScript",
"bytes": "54373"
},
{
"name": "Python",
"bytes": "155416"
},
{
"name": "Shell",
"bytes": "2917"
},
{
"name": "TypeScript",
"bytes": "259970"
}
],
"symlink_target": ""
}
|
from dry_rest_permissions.generics import DRYPermissionsField
from rest_framework_json_api import serializers
# Local
from .models import Assignment
from .models import Contest
from .models import Entry
from .models import Session
class AssignmentSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
class Meta:
model = Assignment
fields = (
'id',
'kind',
'category',
'person_id',
'name',
'first_name',
'last_name',
'display_district',
'district',
'area',
'email',
'cell_phone',
'airports',
'bhs_id',
'image_id',
'session',
'permissions',
)
read_only_fields = [
]
class ContestSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
class Meta:
model = Contest
fields = [
'id',
'award_id',
'name',
'kind',
'gender',
'level',
'season',
'description',
'district',
'division',
'age',
'is_novice',
'is_single',
'size',
'size_range',
'scope',
'scope_range',
'tree_sort',
'session',
'entries',
'permissions',
]
class EntrySerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
statelogs = serializers.PrimaryKeyRelatedField(
many=True,
read_only=True,
)
included_serializers = {
'contests': 'apps.registration.serializers.ContestSerializer',
}
class Meta:
model = Entry
fields = [
'id',
'status',
'is_evaluation',
'is_private',
'notes',
'is_mt',
'draw',
'prelim',
'base',
'participants',
'chapters',
'pos',
'area',
'group_id',
'name',
'kind',
'gender',
'district',
'division',
'bhs_id',
'code',
'is_senior',
'is_youth',
'image_id',
'description',
'owners',
'contests',
'session',
'statelogs',
'permissions',
]
read_only_fields = [
'nomen',
]
class SessionSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
statelogs = serializers.PrimaryKeyRelatedField(
many=True,
read_only=True,
)
included_serializers = {
'contests': 'apps.registration.serializers.ContestSerializer',
'entries': 'apps.registration.serializers.EntrySerializer',
'assignments': 'apps.registration.serializers.AssignmentSerializer',
}
class Meta:
model = Session
fields = [
'id',
'url',
'nomen',
'status',
'kind',
'num_rounds',
'is_invitational',
'description',
'notes',
'footnotes',
'legacy_report',
'drcj_report',
'convention_id',
'name',
'district',
'season',
'panel',
'year',
'open_date',
'close_date',
'start_date',
'end_date',
'venue_name',
'location',
# 'timezone',
'divisions',
'image_id',
'rounds_published',
'owners',
'contests',
'entries',
'assignments',
'statelogs',
'permissions',
]
read_only_fields = [
'image_id',
'nomen',
]
|
{
"content_hash": "719cda6c7b70f8bb9cc8ecdf69930350",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 76,
"avg_line_length": 21.145833333333332,
"alnum_prop": 0.43842364532019706,
"repo_name": "barberscore/barberscore-api",
"id": "f2527d2c041808f92839094fbf566d91c0d39343",
"size": "4075",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project/apps/registration/serializers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "54125"
},
{
"name": "JavaScript",
"bytes": "5861"
},
{
"name": "Procfile",
"bytes": "114"
},
{
"name": "Python",
"bytes": "766540"
},
{
"name": "Ruby",
"bytes": "456"
}
],
"symlink_target": ""
}
|
""" CLI to process imagery products to tiles and index in database
"""
from collections import defaultdict
import concurrent.futures
import logging
import os
import click
import six
# TODO: hide many of these imports to improve CLI startup speed
from . import cliutils, options
from .. import multiprocess, products
from .._util import decompress_to, include_bands, mkdir_p
from ..errors import FillValueException
from ..geoutils import reproject_as_needed, reproject_bounds
from ..stores import destination_path, STORAGE_TYPES
def ingest_source(config, source, overwrite, log_name):
""" Ingest (tile and index) a source
Table entries for indexing are created and returned by this function so
that database writes can be performed in parent process/context.
"""
mlogger = multiprocess.get_logger_multiproc(name=os.path.basename(source),
filename=log_name)
echoer = cliutils.Echoer(logger=mlogger)
spec, storage_name, database, cube, dataset = (
cliutils.config_to_resources(config))
echoer.info('Decompressing: {}'.format(os.path.basename(source)))
with decompress_to(source) as tmpdir:
# Find product and get dataset database resource
product = products.registry.sniff_product_type(tmpdir)
collection_name = product.description
# Subset bands
product_config = config.get('products', {}).get(collection_name, {})
if not product_config:
echoer.warning('No inclusion filter specified for product. '
'Ingesting all bands in product.')
desired_bands = product.bands
else:
band_filter = product_config.copy().get('include_filter', {})
band_filter_regex = band_filter.pop('regex', False)
desired_bands = include_bands(product.bands, band_filter,
regex=band_filter_regex)
# Reprojection option
resampling = product_config.get('resampling', 'nearest')
# Retrieve bounding box in tilespec's CRS
bbox = reproject_bounds(product.bounds, 'EPSG:4326', spec.crs)
# Find tiles for product & IDs of these tiles in database
tiles = list(spec.bounds_to_tiles(bbox))
tiles_id = [
cube.ensure_tile(
collection_name, tile.horizontal, tile.vertical)
for tile in tiles
]
tiles_product = {
tile_id: database.get_product_by_name(
tile_id, product.timeseries_id)
for tile_id in tiles_id
}
indexed_products, indexed_bands = {}, defaultdict(list)
for band in desired_bands:
echoer.info('Reprojecting band: {}'.format(band))
with reproject_as_needed(band.src, spec, resampling) as src:
band.src = src
echoer.process('Tiling: {}'.format(band.long_name))
for tile, tile_id in zip(tiles, tiles_id):
db_product = tiles_product[tile_id]
if db_product:
# If product is in DB, check if we have bands to add
_band_names = [b.standard_name for b
in db_product.bands]
if band.standard_name in _band_names and not overwrite:
echoer.item('Already tiled -- skipping')
continue
else:
# Product not in DB -- need to create
db_product = database.create_product(product)
db_product.tile_id = tile_id
tiles_product[tile_id] = db_product
# Setup dataset store
path = destination_path(config, tile, product)
store_cls = STORAGE_TYPES[config['store']['name']]
store = store_cls(path, tile,
meta_options=config['store']['co'])
# Save and record path
try:
dst_path = store.store_variable(
product, band,
img_pattern=config['store']['tile_imgpattern'],
overwrite=overwrite)
except FillValueException:
# TODO: skip tile but complain
continue
band.path = dst_path
# Copy over metadata files
for md_name, md_file in six.iteritems(
product.metadata_files):
if md_file:
dst_path = store.store_file(product, md_file)
product.metadata_files[md_name] = dst_path
# Update index with new product/band entry
if db_product.id:
db_band = (
database.get_band_by_name(db_product.id,
band.standard_name)
or database.create_band(band)
)
else:
db_product = database.create_product(product)
db_product.tile_id = tile_id
db_band = database.create_band(band)
indexed_products[tile_id] = db_product
indexed_bands[tile_id].append(db_band)
# TODO: delete file if index went bad
echoer.item('Tiled band for tile {}'.format(
tile.str_format(config['store']['tile_dirpattern'])
))
# Make sure to close database connection
database.session.close()
return indexed_products, indexed_bands
@click.command(short_help='Ingest known products into tile dataset format')
@options.opt_multiprocess_method
@options.opt_multiprocess_njob
@click.option('--log_dir', 'log_dir',
type=click.Path(exists=False, dir_okay=True, writable=True,
resolve_path=True),
help='Log ingests to this directory (otherwise to stdout)')
@click.option('--overwrite', is_flag=True,
help='Overwriting existing tiled data')
@options.arg_sources
@click.pass_context
def ingest(ctx, sources, overwrite, log_dir, njob, executor):
config = options.fetch_config(ctx)
logger = logging.getLogger('tilez')
echoer = cliutils.Echoer(logger)
spec, storage_name, database, cube, dataset = (
cliutils.config_to_resources(config))
echoer.info('Ingesting {} products'.format(len(sources)))
if log_dir:
mkdir_p(log_dir)
product_ids, band_ids = [], []
futures = {
executor.submit(ingest_source, config, src, overwrite,
log_dir and os.path.join(
log_dir, os.path.basename(src) + '.log')): src
for src in sources
}
sources_indexed = 0
for future in concurrent.futures.as_completed(futures):
src = futures[future]
try:
indexed_products, indexed_bands = future.result()
for k in indexed_products:
prod = indexed_products[k]
with database.scope() as txn:
txn.merge(prod) if prod.id else txn.add(prod)
txn.flush()
for b in indexed_bands[k]:
b.product_id = prod.id
txn.merge(b) if b.id else txn.add(b)
product_ids.append(prod.id)
band_ids.extend([b.id for b in indexed_bands[k]])
except Exception as exc:
echoer.warning('Ingest of {} produced exception: {}'
.format(src, exc))
else:
echoer.item('Ingested: {} (product IDs: {})'
.format(src, [p.id for p in indexed_products.values()])
)
sources_indexed += 1
echoer.process('Indexed {nprod} products to {ntile} tiles of {nband} bands'
.format(nprod=sources_indexed,
ntile=len(set(product_ids)),
nband=len(band_ids)))
|
{
"content_hash": "c8d226327df05914fe5f55eede2751e6",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 79,
"avg_line_length": 42.07070707070707,
"alnum_prop": 0.5368547418967587,
"repo_name": "ceholden/landsat_tile",
"id": "6d14dcd9c8e4e5b3304fc6e93a1ae3311d817534",
"size": "8354",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tilezilla/cli/ingest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "86315"
},
{
"name": "Shell",
"bytes": "465"
}
],
"symlink_target": ""
}
|
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import device_context
from tensorflow.python.util import deprecation
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
if value is None:
value = [1]
elif not isinstance(value, collections_abc.Sized):
value = [value]
current_n = len(value)
if current_n == n + 2:
return value
elif current_n == 1:
value = list((value[0],) * n)
elif current_n == n:
value = list(value)
else:
raise ValueError("{} should be of length 1, {} or {} but was {}".format(
name, n, n + 2, current_n))
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.get_shape().
filter_shape: static filter shape, i.e. filter.get_shape().
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
"""
def __init__(
self,
input_shape,
filter_shape, # pylint: disable=redefined-builtin
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export(v1=["nn.dilation2d"])
def dilation2d_v1( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
rates=None,
padding=None,
name=None,
filters=None,
dilations=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
rates = deprecated_argument_lookup("dilations", dilations, "rates", rates)
return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name)
dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
dilation_rate: see with_space_to_batch
padding: see with_space_to_batch
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see with_space_to_batch
spatial_dims: see with_space_to_batch
data_format: see with_space_to_batch
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers")
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
filters=None,
dilations=None):
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
filters: Alias of filter.
dilations: Alias of dilation_rate.
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
return convolution_internal(
input,
filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation_rate,
name=name)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
def convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
call_from_convolution=True):
"""Internal function which performs rank agnostic convolution."""
if isinstance(input.shape, tensor_shape.TensorShape) and \
input.shape.rank is not None:
n = len(input.shape) - 2
elif not isinstance(input.shape, tensor_shape.TensorShape) and \
input.shape is not None:
n = len(input.shape) - 2
elif isinstance(filters.shape, tensor_shape.TensorShape) and \
filters.shape.rank is not None:
n = len(filters.shape) - 2
elif not isinstance(filters.shape, tensor_shape.TensorShape) and \
filters.shape is not None:
n = len(filters.shape) - 2
else:
raise ValueError("rank of input or filter must be known")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
strides = _get_sequence(strides, n, channel_index, "strides")
dilations = _get_sequence(dilations, n, channel_index, "dilations")
scopes = {1: "conv1d", 2: "Conv2D", 3: "Conv3D"}
if not call_from_convolution and device_context.enclosing_tpu_context(
) is not None:
scope = scopes[n]
else:
scope = "convolution"
with ops.name_scope(name, scope, [input, filters]) as name:
conv_ops = {1: conv1d, 2: gen_nn_ops.conv2d, 3: gen_nn_ops.conv3d}
if device_context.enclosing_tpu_context() is not None or all(
i == 1 for i in dilations):
# fast path for TPU or if no dilation as gradient only supported on GPU
# for dilations
op = conv_ops[n]
return op(
input,
filters,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
if channel_index == 1:
strides = strides[2:]
dilations = dilations[2:]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
op = Convolution(
tensor_shape.as_shape(input.shape),
tensor_shape.as_shape(filters.shape),
padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
return op(input, filters)
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.padding = padding
self.name = name
self.dilation_rate = dilation_rate
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
# TPU convolution supports dilations greater than 1.
if device_context.enclosing_tpu_context() is not None:
return convolution_internal(
inp,
filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilations=self.dilation_rate,
name=self.name,
call_from_convolution=False)
else:
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None,
dilations=None):
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Alias for dilation_rate
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: (Chen et al., 2015). The same operation is
investigated further in (Yu et al., 2016). Previous works that effectively
use atrous convolution in different ways are, among others,
(Sermanet et al., 2014) and (Giusti et al., 2013).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Multi-Scale Context Aggregation by Dilated Convolutions:
[Yu et al., 2016](https://arxiv.org/abs/1511.07122)
([pdf](https://arxiv.org/pdf/1511.07122.pdf))
Semantic Image Segmentation with Deep Convolutional Nets and Fully
Connected CRFs:
[Chen et al., 2015](http://arxiv.org/abs/1412.7062)
([pdf](https://arxiv.org/pdf/1412.7062))
OverFeat - Integrated Recognition, Localization and Detection using
Convolutional Networks:
[Sermanet et al., 2014](https://arxiv.org/abs/1312.6229)
([pdf](https://arxiv.org/pdf/1312.6229.pdf))
Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks:
[Giusti et al., 2013]
(https://ieeexplore.ieee.org/abstract/document/6738831)
([pdf](https://arxiv.org/pdf/1302.1700.pdf))
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def _convert_padding(padding):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(
value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of [batch, in_width, in_channels]. The
`"NCW"` format stores data as [batch, in_channels, in_width].
name: A name for the operation (optional).
input: Alias for value.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
strides = [1] + _get_sequence(stride, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format="NWC",
dilations=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of [batch, in_width, in_channels]. The
`"NCW"` format stores data as [batch, in_channels, in_width].
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name,
dilations=dilations)
@tf_export("nn.conv1d_transpose")
def conv1d_transpose(
input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NWC",
dilations=None,
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d`
rather than an actual deconvolution.
Args:
input: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filters: A 3-D `Tensor` with the same type as `value` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor`, containing three elements, representing the
output shape of the deconvolution op.
strides: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. `'NWC'` and `'NCW'` are supported.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, if
`output_shape` is not at 3-element vector, if `padding` is other than
`'VALID'` or `'SAME'`, or if `data_format` is invalid.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv1d_transpose",
[input, filters, output_shape]) as name:
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
# Reshape the input tensor to [batch, 1, in_width, in_channels]
strides = [1] + _get_sequence(strides, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
input = array_ops.expand_dims(input, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
output_shape = list(output_shape) if not isinstance(
output_shape, ops.Tensor) else output_shape
output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1],
output_shape[spatial_start_dim:]], 0)
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, spatial_start_dim)
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given 4-D `input` and `filters` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Usage Example:
>>> x_in = np.array([[
... [[2], [1], [2], [0], [1]],
... [[1], [3], [2], [2], [3]],
... [[1], [1], [3], [3], [0]],
... [[2], [2], [0], [1], [1]],
... [[0], [0], [3], [1], [2]], ]])
>>> kernel_in = np.array([
... [ [[2, 0.1]], [[3, 0.2]] ],
... [ [[0, 0.3]],[[1, 0.4]] ], ])
>>> x = tf.constant(x_in, dtype=tf.float32)
>>> kernel = tf.constant(kernel_in, dtype=tf.float32)
>>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
<tf.Tensor: shape=(1, 4, 4, 2), dtype=float32, numpy=..., dtype=float32)>
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
# pylint: enable=line-too-long
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `input`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = _convert_padding(padding)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter=None,
out_backprop=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `filter`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value=None,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d`
rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
input: Alias for value.
filters: Alias for filter.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
value = deprecated_argument_lookup("input", input, "value", value)
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
return conv2d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
input: A 4-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 4-D `Tensor` with the same type as `input` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv2d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(
tensor_shape.TensorShape([4])):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, tuple):
output_shape = list(output_shape)
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv3d"])
def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
padding=None,
data_format="NDHWC",
dilations=[1, 1, 1, 1, 1],
name=None,
filters=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
return gen_nn_ops.conv3d(
input, filter, strides, padding, data_format, dilations, name)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NDHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
input: Alias of value.
filters: Alias of filter.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
value = deprecated_argument_lookup("input", input, "value", value)
return conv3d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
dilations=None,
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: A 5-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 5-D `Tensor` with the same type as `value` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `D`, `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 0. The dimension order is
determined by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv3d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
strides = _get_sequence(strides, 3, channel_index, "strides")
dilations = _get_sequence(dilations, 3, channel_index, "dilations")
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
CONV_TRANSPOSE_OPS = (
conv1d_transpose,
conv2d_transpose_v2,
conv3d_transpose_v2,
)
@tf_export("nn.conv_transpose")
def conv_transpose(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format=None,
dilations=None,
name=None):
"""The transpose of `convolution`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: An N+2 dimensional `Tensor` of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC". It must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
filters: An N+2 dimensional `Tensor` with the same type as `input` and
shape `spatial_filter_shape + [in_channels, out_channels]`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the spatial dimensions. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: An int or list of `ints` that has length `1`, `N` or `N+2`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the spatial dimensions. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details.
name: A name for the operation (optional). If not specified "conv_transpose"
is used.
Returns:
A `Tensor` with the same type as `value`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv_transpose",
[input, filter, output_shape]) as name:
if tensor_util.is_tensor(output_shape):
n = output_shape.shape[0] - 2
elif isinstance(output_shape, collections.Sized):
n = len(output_shape) - 2
else:
raise ValueError("output_shape must be a tensor or sized collection.")
if not 1 <= n <= 3:
raise ValueError(
"output_shape must be of length 3, 4 or 5 but was {}.".format(n + 2))
op = CONV_TRANSPOSE_OPS[n-1]
return op(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def _tf_deterministic_ops():
if _tf_deterministic_ops.value is None:
tf_deterministic_ops = os.environ.get("TF_DETERMINISTIC_OPS")
if tf_deterministic_ops is not None:
tf_deterministic_ops = tf_deterministic_ops.lower()
_tf_deterministic_ops.value = (
tf_deterministic_ops == "true" or tf_deterministic_ops == "1")
return _tf_deterministic_ops.value
_tf_deterministic_ops.value = None
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the channel dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the
default) is specified then 'N..C' is assumed.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError if data format is unrecognized, if `value` has less than two
dimensions when `data_format` is 'N..C'/`None` or `value` has less
then three dimensions when `data_format` is `NC..`, if `bias` does not
have exactly one dimension (is a vector), or if the size of `bias`
does not match the size of the channel dimension of `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if data_format is not None:
if data_format.startswith("NC"):
data_format = "NCHW"
elif data_format.startswith("N") and data_format.endswith("C"):
data_format = "NHWC"
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
# TODO(duncanriach): Implement deterministic functionality at CUDA kernel
# level.
if _tf_deterministic_ops():
# Note that this code does not implement the same error checks as the
# pre-existing C++ ops.
if data_format == "NCHW":
broadcast_shape_head = [1, array_ops.size(bias)]
broadcast_shape_tail = array_ops.ones(
array_ops.rank(value) - 2, dtype=dtypes.int32)
broadcast_shape = array_ops.concat(
[broadcast_shape_head, broadcast_shape_tail], 0)
return math_ops.add(
value, array_ops.reshape(bias, broadcast_shape), name=name)
else: # data_format == 'NHWC' or data_format == None
return math_ops.add(value, bias, name=name)
else:
return gen_nn_ops.bias_add(
value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
References:
Understanding and Improving Convolutional Neural Networks via Concatenated
Rectified Linear Units:
[Shang et al., 2016](http://proceedings.mlr.press/v48/shang16)
([pdf](http://proceedings.mlr.press/v48/shang16.pdf))
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
Convolutional Deep Belief Networks on CIFAR-10:
Krizhevsky et al., 2010
([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf))
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models.
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013]
(https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf).
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
References:
Rectifier Nonlinearities Improve Neural Network Acoustic Models:
[Maas et al., 2013]
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.cast(features, dtypes.float32)
if isinstance(alpha, np.ndarray):
alpha = alpha.item()
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim == -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims:
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual softmax on its last dimension.
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
See: https://en.wikipedia.org/wiki/Softmax_function
Example usage:
>>> tf.nn.softmax([-1, 0., 1.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.09003057, 0.24472848, 0.66524094], dtype=float32)>
Args:
logits: A non-empty `Tensor`, or an object whose type has a registered
`Tensor` conversion function. Must be one of the following types:
`half`,`float32`, `float64`. See also `convert_to_tensor`
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
TypeError: If no conversion function is registered for `logits` to
Tensor.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
Usage:
>>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]]
>>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]]
>>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([0.16984604, 0.82474494], dtype=float32)>
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None,
axis=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
axis: Alias for dim.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
dim = deprecated_argument_lookup("axis", axis, "dim", dim)
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"])
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Per-label activations (typically a linear output) of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`. These activation energies are interpreted as unnormalized log
probabilities.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[])
def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1},
num_classes]` and dtype `float16`, `float32`, or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
return sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
@tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"])
def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin
"""Performs the avg pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The average pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
avg_pooling_ops = {
1: avg_pool1d,
2: gen_nn_ops.avg_pool,
3: gen_nn_ops.avg_pool3d
}
op = avg_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"])
def avg_pool(value, ksize, strides, padding, data_format="NHWC",
name=None, input=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = deprecation.deprecated_argument_lookup(
"input", input, "value", value)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool2d", v1=[])
def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool2D", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool1d")
def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "AvgPool1D", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
@tf_export("nn.avg_pool3d")
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 5-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.avg_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool", v1=["nn.max_pool_v2"])
def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None):
"""Performs the max pooling on the input.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
max_pooling_ops = {
1: max_pool1d,
2: gen_nn_ops.max_pool,
3: gen_nn_ops.max_pool3d
}
op = max_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["nn.max_pool"])
def max_pool(value,
ksize,
strides,
padding,
data_format="NHWC",
name=None,
input=None): # pylint: disable=redefined-builtin
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "MaxPool", [value]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if ((np.isscalar(ksize) and ksize == 0) or
(isinstance(ksize,
(list, tuple, np.ndarray)) and any(v == 0 for v in ksize))):
raise ValueError("ksize cannot be zero.")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool1d")
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool2d")
def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool2d", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool3d")
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 5-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is: [batch, in_channels, in_depth, in_height,
in_width].
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.max_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
include_batch_in_index=False,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
`include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c`
if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
include_batch_in_index: An optional `boolean`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
ksize = _get_sequence(ksize, 2, 3, "ksize")
strides = _get_sequence(strides, 2, 3, "strides")
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
include_batch_in_index=include_batch_in_index,
name=name)
@tf_export(v1=["nn.max_pool_with_argmax"])
def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
Targmax=None,
name=None,
output_dtype=None,
include_batch_in_index=False):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
Targmax = deprecated_argument_lookup(
"output_dtype", output_dtype, "Targmax", Targmax)
if Targmax is None:
Targmax = dtypes.int64
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__
@ops.RegisterStatistics("Conv3D", "flops")
def _calc_conv3d_flops(graph, node):
"""Calculates the compute resources needed for Conv3D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_time = int(filter_shape[0])
filter_height = int(filter_shape[1])
filter_width = int(filter_shape[2])
filter_in_depth = int(filter_shape[3])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_in_depth * filter_time *
filter_height * filter_width * 2))
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x.
When converting 1.x code, please use named arguments to ensure behavior stays
consistent.
See also: `tf.keras.layers.Dropout` for a dropout layer.
[Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN
models. Inputs elements are randomly set to zero (and the other elements are
rescaled). This encourages each node to be independently useful, as it cannot
rely on the output of other nodes.
More precisely: With probability `rate` elements of `x` are set to `0`.
The remaining elemenst are scaled up by `1.0 / (1 - rate)`, so that the
expected value is preserved.
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy()
array([[2., 0., 0., 2., 2.],
[2., 2., 2., 2., 2.],
[2., 0., 2., 0., 2.]], dtype=float32)
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy()
array([[0., 0., 0., 5., 5.],
[0., 5., 0., 5., 0.],
[5., 0., 5., 0., 5.]], dtype=float32)
>>> tf.nn.dropout(x, rate = 0.0) == x
<tf.Tensor: shape=(3, 5), dtype=bool, numpy=
array([[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, True, True, True, True]])>
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. This is useful for dropping whole
channels from an image or sequence. For example:
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,10])
>>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy()
array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32)
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point
tensor. `rate=1` is disallowed, because theoutput would be all zeros,
which is likely not what was intended.
"""
with ops.name_scope(name, "dropout", [x]) as name:
is_rate_number = isinstance(rate, numbers.Real)
if is_rate_number and (rate < 0 or rate >= 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
x = ops.convert_to_tensor(x, name="x")
x_dtype = x.dtype
if not x_dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going "
"to be scaled. Got a %s tensor instead." % x_dtype)
is_executing_eagerly = context.executing_eagerly()
if not tensor_util.is_tensor(rate):
if is_rate_number:
keep_prob = 1 - rate
scale = 1 / keep_prob
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
else:
rate.get_shape().assert_has_rank(0)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th smallest value for the last dimension.
Note that n is zero-indexed.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`.
Pooling ratio for each dimension of `value`, currently only supports row
and col dimension and should be >= 1.0. For example, a valid pooling ratio
looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0
because we don't allow pooling on batch and channels dimensions. 1.44 and
1.73 are pooling ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio")
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is finite (not inf, -inf, or nan) and among
the top `k` predictions among all predictions for example `i`. Note that the
behavior of `InTopK` differs from the `TopK` op in its handling of ties; if
multiple classes have the same prediction value and straddle the top-`k`
boundary, all of those classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
|
{
"content_hash": "69850ef12d085cab88415d5ff27045ba",
"timestamp": "",
"source": "github",
"line_count": 5000,
"max_line_length": 116,
"avg_line_length": 40.3668,
"alnum_prop": 0.6527740618528097,
"repo_name": "jhseu/tensorflow",
"id": "e4a477ecda4953180d91e802378e39954af56057",
"size": "202523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/nn_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "27480"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "875455"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "80051513"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112748"
},
{
"name": "Go",
"bytes": "1853641"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1729057"
},
{
"name": "Makefile",
"bytes": "62498"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "304661"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19515"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "36791185"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "56741"
},
{
"name": "Shell",
"bytes": "685877"
},
{
"name": "Smarty",
"bytes": "35147"
},
{
"name": "Starlark",
"bytes": "3504187"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import
import sys
from subprocess import check_call, check_output
import subprocess
import tempfile
import os
import re
from collections import namedtuple
from numba import config
from numba.roc.hsadrv import devices
from .common import AMDGCNModule
from .config import ROCM_BC_PATH
from . import TRIPLE
from datetime import datetime
from contextlib import contextmanager
from numba import utils
from numba.roc.hsadrv.error import HsaSupportError
_real_check_call = check_call
NOISY_CMDLINE = False
@contextmanager
def error_pipe():
if NOISY_CMDLINE:
yield subprocess.STDOUT
else:
if utils.IS_PY3:
yield subprocess.DEVNULL
else:
with open(os.devnull, 'wb') as devnull:
yield devnull
def check_call(*args, **kwargs):
# This is so that time is stamped against invocation
# such that correlations can be looked for against messages in the
# sys and kernel logs.
try:
with error_pipe() as stderr:
if NOISY_CMDLINE:
print(datetime.now().strftime("%b %d %H:%M:%S"),
file=sys.stdout)
print('CMD: ' + ';'.join(args), file=sys.stdout)
ret = _real_check_call(*args, stderr=stderr, **kwargs)
except subprocess.CalledProcessError as e:
print(e)
raise(e)
return ret
class CmdLine(object):
def _initialize(self):
if not self.initialized:
dev_ctx = devices.get_context()
target_cpu = dev_ctx.agent.name.decode('UTF-8')
self.target_cpu = "-mcpu %s" % target_cpu
self.CMD_OPT = ' '.join([
self.opt,
"-O3",
self.triple_flag,
self.target_cpu,
"-disable-simplify-libcalls",
"-verify",
"-S",
"-o {fout}",
"{fin}"])
self.CMD_VERIFY = ' '.join([
self.opt,
"-verify",
self.triple_flag,
self.target_cpu,
"-S",
"-o {fout}",
"{fin}"])
self.CMD_GEN_HSAIL = ' '.join([self.llc,
"-O2",
self.triple_flag,
self.target_cpu,
"-filetype=asm",
"-o {fout}",
"{fin}"])
self.CMD_GEN_BRIG = ' '.join([self.llc,
"-O2",
self.triple_flag,
self.target_cpu,
"-filetype=obj",
"-o {fout}",
"{fin}"])
self.CMD_LINK_BUILTINS = ' '.join([
self.llvm_link,
"-S",
"-o {fout}",
"{fin}",
"{lib}"])
self.CMD_LINK_LIBS = ' '.join([self.llvm_link,
"-S",
"-o {fout}",
"{fin}"])
self.CMD_LINK_BRIG = ' '.join([self.ld_lld,
"-shared",
"-o {fout}",
"{fin}"])
def __init__(self):
self._binary_path = os.environ.get('HSAILBIN', None)
def _setup_path(tool):
if self._binary_path is not None:
return os.path.join(self._binary_path, tool)
else:
binpath = os.path.join(sys.prefix, 'bin', tool)
return binpath
self._triple = TRIPLE
self.opt = _setup_path("opt")
self.llc = _setup_path("llc")
self.llvm_link = _setup_path("llvm-link")
self.ld_lld = _setup_path("ld.lld")
self.triple_flag = "-mtriple %s" % self._triple
self.initialized = False
def check_tooling(self):
# make sure the llc can actually target amdgcn, ideally all tooling
# should be checked but most don't print anything useful and so
# compilation for AMDGCN would have to be tested instead. This is a
# smoke test like check.
try:
if not os.path.isfile(self.llc):
raise HsaSupportError('llc not found')
output = check_output([self.llc, '--version'],
universal_newlines=True)
olines = [x.strip() for x in output.splitlines()]
tgtidx = olines.index('Registered Targets:')
targets = olines[tgtidx + 1:]
for tgt in targets:
if 'amdgcn' in tgt:
break
else:
msg = 'Command line tooling does not support "amdgcn" target'
raise HsaSupportError(msg)
except BaseException as e:
raise
def verify(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_VERIFY.format(fout=opath, fin=ipath), shell=True)
def optimize(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_OPT.format(fout=opath, fin=ipath), shell=True)
def generate_hsail(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_GEN_HSAIL.format(fout=opath, fin=ipath), shell=True)
def generate_brig(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_GEN_BRIG.format(fout=opath, fin=ipath), shell=True)
def link_libs(self, ipath, libpaths, opath):
if not self.initialized:
self._initialize()
cmdline = self.CMD_LINK_LIBS.format(fout=opath, fin=ipath)
cmdline += ' '.join(["{0}".format(lib) for lib in libpaths])
check_call(cmdline, shell=True)
def link_brig(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_LINK_BRIG.format(fout=opath, fin=ipath), shell=True)
class Module(AMDGCNModule):
def __init__(self):
"""
Setup
"""
self._tmpdir = tempfile.mkdtemp()
self._tempfiles = []
self._linkfiles = []
self._cmd = CmdLine()
AMDGCNModule.__init__(self)
def __del__(self):
return
self.close()
def close(self):
# Remove all temporary files
for afile in self._tempfiles:
os.unlink(afile)
#Remove directory
os.rmdir(self._tmpdir)
def _create_temp_file(self, name, mode='wb'):
path = self._track_temp_file(name)
fobj = open(path, mode=mode)
return fobj, path
def _track_temp_file(self, name):
path = os.path.join(self._tmpdir,
"{0}-{1}".format(len(self._tempfiles), name))
self._tempfiles.append(path)
return path
def load_llvm(self, llvmir):
"""
Load LLVM with HSAIL SPIR spec
"""
# Preprocess LLVM IR
llvmir = self._preprocess(llvmir)
# Create temp file to store the input file
tmp_llvm_ir, fin = self._create_temp_file("dump-llvm-ir")
with tmp_llvm_ir:
tmp_llvm_ir.write(llvmir.encode('ascii'))
# Create temp file for optimization
fout = self._track_temp_file("verified-llvm-ir")
self._cmd.verify(ipath=fin, opath=fout)
if config.DUMP_OPTIMIZED:
with open(fout, 'rb') as fin_opt:
print(fin_opt.read().decode('ascii'))
self._linkfiles.append(fout)
def link_builtins(self, ipath, opath):
# progressively link in all the bitcodes
for bc in self.bitcodes:
if bc != self.bitcodes[-1]:
tmp_opath = opath + bc.replace('/', '_').replace('.','_')
else:
tmp_opath = opath
lib = os.path.join(ROCM_BC_PATH, bc)
cmd = self._cmd.CMD_LINK_BUILTINS.format(fout=tmp_opath, fin=ipath, lib=lib)
check_call(cmd, shell=True)
ipath = tmp_opath
def generateGCN(self):
"""
Generate GCN from a module and also return the HSAIL code.
"""
assert not self._finalized, "Module already has GCN generated"
# Link dependencies libraries
llvmfile = self._linkfiles[0]
pre_builtin_path = self._track_temp_file("link-dep")
libpaths = self._linkfiles[1:]
self._cmd.link_libs(ipath=llvmfile, libpaths=libpaths,
opath=pre_builtin_path)
# Link library with the builtin modules
linked_path = self._track_temp_file("linked-path")
self.link_builtins(ipath=pre_builtin_path, opath=linked_path)
# Optimize
opt_path = self._track_temp_file("optimized-llvm-ir")
self._cmd.optimize(ipath=linked_path, opath=opt_path)
if config.DUMP_OPTIMIZED:
with open(opt_path, 'rb') as fin:
print(fin.read().decode('ascii'))
# Compile the llvm to HSAIL
hsail_path = self._track_temp_file("create-hsail")
self._cmd.generate_hsail(ipath=opt_path, opath=hsail_path)
# Compile the llvm to BRIG
brig_path = self._track_temp_file("create-brig")
self._cmd.generate_brig(ipath=opt_path, opath=brig_path)
# link
end_brig_path = self._track_temp_file("linked-brig")
self._cmd.link_brig(ipath = brig_path, opath=end_brig_path)
self._finalized = True
# Read HSAIL
with open(hsail_path, 'rb') as fin:
hsail = fin.read().decode('ascii')
# Read BRIG
with open(end_brig_path, 'rb') as fin:
brig = fin.read()
if config.DUMP_ASSEMBLY:
print(hsail)
return namedtuple('FinalizerResult', ['hsail', 'brig'])(hsail, brig)
|
{
"content_hash": "0f339fdc957f7705242e123829aeb5d1",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 88,
"avg_line_length": 32.649180327868855,
"alnum_prop": 0.5280176742317735,
"repo_name": "jriehl/numba",
"id": "cd370ede30bea7104598bff616ad00f5bceb6e4e",
"size": "10064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/roc/hlc/hlc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7023"
},
{
"name": "C",
"bytes": "657637"
},
{
"name": "C++",
"bytes": "49158"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Jupyter Notebook",
"bytes": "110326"
},
{
"name": "Python",
"bytes": "6611899"
},
{
"name": "Shell",
"bytes": "7290"
}
],
"symlink_target": ""
}
|
urlpatterns = []
|
{
"content_hash": "bac8bc9121b5871dab66b4746daafe2b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 16,
"avg_line_length": 17,
"alnum_prop": 0.6470588235294118,
"repo_name": "codingjoe/django-vies",
"id": "2f424ae76b1fad5d49b5d28c58fb9d8724800261",
"size": "41",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21557"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils import setup
setup(
name="Dynomite",
version="0.1",
packages=['dynomite'],
tests_require=['nose>=0.11.0.dev', 'boto']
)
|
{
"content_hash": "aab4b50ea041dea0c621acee67ce3209",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 19.818181818181817,
"alnum_prop": 0.6330275229357798,
"repo_name": "seabirdzh/dynomite",
"id": "bbda5e32472125af973789b96bd3a45fbe6973d3",
"size": "218",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pylibs/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "20149"
},
{
"name": "C++",
"bytes": "77"
},
{
"name": "CSS",
"bytes": "780"
},
{
"name": "Erlang",
"bytes": "334433"
},
{
"name": "HTML",
"bytes": "10143"
},
{
"name": "JavaScript",
"bytes": "27411"
},
{
"name": "Python",
"bytes": "57734"
},
{
"name": "Ruby",
"bytes": "43177"
},
{
"name": "Thrift",
"bytes": "662"
}
],
"symlink_target": ""
}
|
__author__ = 'redwards'
from .edit_distance import edit_distance
from .gapped_alignment import gap_alignment
from .dna_alignment import dna_gapped_alignment, dna_score_alignment
from .gapped_alignment2 import gapped_alignment, score_alignment
from local_alignment import local_alignment
|
{
"content_hash": "90ce49af4d61f8e4f4d224447bcefb8b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 68,
"avg_line_length": 36.125,
"alnum_prop": 0.8131487889273357,
"repo_name": "linsalrob/bioinformatics",
"id": "0da3132e3a2c94dbf383527a948b97405be03f39",
"size": "289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Modules/alignment/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23310"
},
{
"name": "C++",
"bytes": "2388"
},
{
"name": "Java",
"bytes": "4571"
},
{
"name": "Makefile",
"bytes": "1983"
},
{
"name": "Perl",
"bytes": "976745"
},
{
"name": "Python",
"bytes": "41991"
},
{
"name": "Shell",
"bytes": "1292"
}
],
"symlink_target": ""
}
|
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from django.utils.functional import cached_property
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions, Thing2Literal
from MySQLdb.constants import FIELD_TYPE, CLIENT
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
warnings.filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
uses_savepoints = True
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
# table type, that's enough to evaluate the feature.
cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'")
result = cursor.fetchone()
cursor.execute('DROP TABLE INTROSPECT_TEST')
return result[1]
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return cursor._last_executed.decode('utf-8')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# Truncate already resets the AUTO_INCREMENT field from
# MySQL version 5.0.13 onwards. Refs #16961.
if self.connection.mysql_version < (5, 0, 13):
return ["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences]
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.close()
return False
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
cursor = self.connection.cursor()
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
cursor.close()
def _cursor(self):
if not self._valid_connection():
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
@cached_property
def mysql_version(self):
if not self.server_version:
new_connection = False
if not self._valid_connection():
# Ensure we have a connection with the DB by using a temporary
# cursor
new_connection = True
self.cursor().close()
server_info = self.connection.get_server_info()
if new_connection:
# Make sure we close the connection
self.close()
m = server_version_re.match(server_info)
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
self.cursor().execute('SET foreign_key_checks=1')
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
|
{
"content_hash": "16e13e5cc816d4435242f9664190675c",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 120,
"avg_line_length": 42.37274549098196,
"alnum_prop": 0.6339860007567159,
"repo_name": "chrisfranzen/django",
"id": "f24df93bf4f581cdfeea171e417182992c1521cf",
"size": "21144",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/db/backends/mysql/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42663"
},
{
"name": "HTML",
"bytes": "95024"
},
{
"name": "JavaScript",
"bytes": "94313"
},
{
"name": "Python",
"bytes": "8216479"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""
Provided to you by Emlid Ltd (c) 2014.
twitter.com/emlidtech || www.emlid.com || info@emlid.com
Example: Get pressure from MS5611 barometer onboard of Navio shield for Raspberry Pi
To run this example navigate to the directory containing it and run following commands:
sudo python Barometer_example.py
"""
import time
import navio.ms5611
import navio.util
navio.util.check_apm()
baro = navio.ms5611.MS5611()
baro.initialize()
while(True):
baro.refreshPressure()
time.sleep(0.01) # Waiting for pressure data ready 10ms
baro.readPressure()
baro.refreshTemperature()
time.sleep(0.01) # Waiting for temperature data ready 10ms
baro.readTemperature()
baro.calculatePressureAndTemperature()
print "Temperature(C): %.6f" % (baro.TEMP), "Pressure(millibar): %.6f" % (baro.PRES)
time.sleep(1)
|
{
"content_hash": "28b5b5fe51f472ea713fc1d7a1cdc1c3",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 87,
"avg_line_length": 23.764705882352942,
"alnum_prop": 0.7524752475247525,
"repo_name": "shawn1231/snowflakex-iii",
"id": "6e9480e0cb8295cf7cb7375b7434a362f2718bc3",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Barometer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "25398"
},
{
"name": "C++",
"bytes": "1033497"
},
{
"name": "Makefile",
"bytes": "5011"
},
{
"name": "Python",
"bytes": "164606"
},
{
"name": "Roff",
"bytes": "207370"
}
],
"symlink_target": ""
}
|
"""Ce fichier définit le contexte-éditeur 'SelectionTags'."""
from primaires.interpreteur.editeur.selection import Selection
from primaires.format.fonctions import supprimer_accents
class SelectionTags(Selection):
"""Contexte-éditeur pour la sélection de tags."""
nom = "editeur:tags:selection"
def __init__(self, pere, objet=None, attribut=None, liste=None,
tagge=None):
Selection.__init__(self, pere, objet, attribut, liste)
self.tagge = tagge
@staticmethod
def afficher_apercu(apercu, objet, valeur, liste=None, tagge=None):
"""Affichage de l'aperçu."""
return Selection.afficher_apercu(apercu, objet, valeur, liste)
def interpreter(self, msg):
"""Interprétation du contexte"""
nom = msg
msg_sa = supprimer_accents(msg).lower()
liste = getattr(self.objet, self.attribut)
cles = list(self.liste)
cles_sa = [supprimer_accents(c).lower() for c in cles]
if msg_sa in cles_sa:
cle = cles[cles_sa.index(msg_sa)]
if cle in liste:
while cle in liste:
liste.remove(cle)
else:
liste.append(cle)
# Ajout des évènements à l'objet taggé
tag = importeur.tags.tags[cle]
script = tag.script
for evenement in script.evenements.values():
evt = self.tagge.script[evenement.nom]
evt.copier_depuis(evenement)
self.pere << "Copie de l'évènement {}.".format(
evenement.nom)
liste[:] = [e for e in liste if e]
self.actualiser()
else:
self.pere << "|err|La clé {} est introuvable.|ff|".format(
repr(msg))
|
{
"content_hash": "a5b81b502458230bb5e9f44354119b25",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 71,
"avg_line_length": 36.4,
"alnum_prop": 0.5642857142857143,
"repo_name": "vlegoff/tsunami",
"id": "68c568577a26f19133a00cdb7bac2cb4621686fd",
"size": "3399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/tags/editeurs/selection_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
# make sure there was enough utxos
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
# prevent 0 change output
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 500)
assert_equal(self.nodes[0].getbalance("*", 1), 500)
assert_equal(self.nodes[0].getbalance("*", 1, True), 500)
assert_equal(self.nodes[0].getbalance("*", 1, True, False), 500)
assert_equal(self.nodes[0].getbalance(minconf=1, addlocked=True), 500)
assert_equal(self.nodes[0].getbalance(minconf=1, avoid_reuse=False), 500)
assert_equal(self.nodes[0].getbalance(minconf=1), 500)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 1000)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 500)
# Send 490 BTC from 0 to 1 and 960 BTC from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 490 , [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 960, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
def test_balances(*, fee_node_1=0):
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('30') - fee_node_1) # change from node 1's send
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('30') - fee_node_1)
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('960')) # output of node 1's spend
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('960'))
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0')) # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('0'))
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
# self.nodes[1].sendrawtransaction(txs[1]['hex']) # disabled, no RBF in Dash
#self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation # disabled, no RBF in Dash
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
# test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
# balances are correct after the transactions are confirmed
assert_equal(self.nodes[0].getbalance(), Decimal('969.99')) # node 1's send plus change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('29.99')) # change from node 0's send
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.98'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getunconfirmedbalance()
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet('')
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet('')
after = self.nodes[1].getunconfirmedbalance()
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 999)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 999 * 10**8).hex(),
struct.pack("<q", 998 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
self.sync_blocks()
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0', '-checklevel=0'])
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
self.sync_blocks()
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
|
{
"content_hash": "661e348795a218c7d8bbd4892011d70f",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 147,
"avg_line_length": 48.20095693779904,
"alnum_prop": 0.6475084375620409,
"repo_name": "UdjinM6/dash",
"id": "15bb7a268b104e900729500b49bb0f00afe62ae6",
"size": "10283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/wallet_balance.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1866352"
},
{
"name": "C++",
"bytes": "9729795"
},
{
"name": "CMake",
"bytes": "32255"
},
{
"name": "CSS",
"bytes": "113028"
},
{
"name": "Dockerfile",
"bytes": "6344"
},
{
"name": "GDB",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "235904"
},
{
"name": "Makefile",
"bytes": "128711"
},
{
"name": "Objective-C++",
"bytes": "5478"
},
{
"name": "Python",
"bytes": "1899906"
},
{
"name": "QMake",
"bytes": "1389"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "134642"
}
],
"symlink_target": ""
}
|
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import TTLocalizer
class DistributedBankMgr(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBankMgr')
neverDisable = 1
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
def generate(self):
if base.cr.bankManager != None:
base.cr.bankManager.delete()
base.cr.bankManager = self
DistributedObject.DistributedObject.generate(self)
return
def disable(self):
base.cr.bankManager = None
DistributedObject.DistributedObject.disable(self)
return
def delete(self):
base.cr.bankManager = None
DistributedObject.DistributedObject.delete(self)
return
def d_transferMoney(self, amount):
self.sendUpdate('transferMoney', [amount])
|
{
"content_hash": "093a54b848ba199ca24d59a37f7f1a52",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 32.2,
"alnum_prop": 0.7070393374741201,
"repo_name": "ksmit799/Toontown-Source",
"id": "7a086ebcbe1a6141ffb38cd921764a6fda75fddd",
"size": "966",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "toontown/estate/DistributedBankMgr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
}
|
import json,traceback,prop,copy,lrilog
immutable_props=["urn:lri:property_type:guid",
"urn:lri:property_type:timestamp",
"urn:lri:property_type:creator"]
class prop(object):
def __init__(self,rec={},entity_guid=None,entity=None,creator=None,proptype=None,value=None,db=None,guid=None,validate_entity=True):
self.db=db
self.rec=copy.deepcopy(rec)
self.errors=[]
self.creator = creator
self.invalid_fields={}
self.is_valid=False
self.link = None
self.log = lrilog.lrilog("PROP")
self.entity=entity
if self.entity:
self.entity_guid = self.entity.rec.get("urn:lri:property_type:guid")
else:
self.entity_guid = entity_guid
if self.creator:
self.rec["creator"] = self.creator
self.log.debug("PROP INIT ENTITY GUID =",self.entity_guid,self.entity)
# Extract our property name/value
for k,v in self.rec.items():
if k.startswith('urn:') and v != None:
self.rec['proptype'] = k
self.rec['value'] = v
self.guid=guid
if guid:
# prop exists, let's get it
self.link = self.db.get_prop(guid)
if self.link:
for k,v in self.db.internal_properties(self.link).items():
if k != 'rec':
self.rec[k] = self.link[k]
self.is_valid = True
else:
self.errors.append("Invalid property GUID. Could not find.")
elif proptype and value != None:
self.log.debug("INIT FOR PROPERTY CREATE",entity,proptype,value,"\n\n\n")
# we need to generate our fields and create our property
self.rec["proptype"] = proptype
self.rec["value"] = value
self.prepare_for_creation()
self.validate_fields()
elif 'guid' not in self.rec and 'proptype' in self.rec and ('value' in self.rec or 'to' in self.rec):
self.prepare_for_creation()
self.validate_fields()
else:
self.errors.append("Not enough specification to find or create property.")
def __str__(self):
return json.dumps(self.rec)
def parse_json(self,jsondata):
''' Parse json for property '''
try:
self.rec=json.loads(jsondata)
except Exception, e:
self.errors.append("JSON PARSE ERROR: "+traceback.format_exc())
return False
if not isinstance(self.rec,dict):
self.errors.append("JSON PROPERTY INIT DATA IS NOT A DICTIONARY")
return False
def prepare_for_creation(self):
if not self.rec["proptype"].startswith("urn:"):
# Resolve our proper proptype
self.rec["proptype"] = self.db.schema.index["id"].get(self.rec["proptype"],self.rec["proptype"])
if not self.rec.get("from") and self.entity_guid:
self.rec["from"] = self.entity_guid
# Use default creator if needed
if not self.creator:
if 'creator' in self.rec:
self.creator = self.rec['creator']
else:
self.creator = self.db.creator_guid
self.rec['creator'] = self.creator
return True
def validate_fields(self):
''' Validate myself. This is a very weak validation and should be improved.'''
self.is_valid=False
required_fields=['creator','proptype']
for rf in required_fields:
if not self.rec.get(rf):
self.errors.append("Field '"+rf+"' is missing or empty.")
return False # Missing field
if self.rec.get("value") == None and self.rec.get("to") == None:
self.errors.append("Fields 'value' and 'to' are both missing or empty.")
return False # Missing field
pt = self.rec.get('proptype')
if not pt or pt not in self.db.schema.index['properties']:
self.errors.append("Property %s does not seem to exist." % (repr(pt)))
return False
# get the schema for our proptype
if not self.db.in_bootstrap:
if not self.db.schema.index["id"].get(pt):
self.errors.append("Property %s does not seem to exist." % (repr(pt)))
return False
propkind = self.db.schema.property_target_kind(pt,in_bootstrap=self.db.in_bootstrap)
if propkind == 'link':
if 'value' in self.rec:
if not 'allow_bad_ids' in self.db.config: # hideous hack for js
id_errors = self.db.problems_with_id(self.rec["value"])
if id_errors:
self.errors.extend(id_errors)
return False
if self.db.in_bootstrap:
self.is_valid = True
return True
ptrec = self.db.schema.index['id'].get(self.db.schema.index['properties'][pt])
if 'urn:lri:entity_type:enumeration_member' in ptrec.get('urn:lri:property_type:ranges'):
# Is enumerated value. Let's make sure the value points
# to an actual enumeratee
if 'value' in self.rec:
r = self.db.property_search({"value":self.rec['value'],
"proptype":"urn:lri:property_type:id"})
if r:
target_guid = r[0]['from']
eid = self.rec['value']
else:
self.errors.append("Enumeration member %s does not seem to exist." % (repr(self.rec['value'])))
return False
elif 'to' in self.rec:
target_guid = self.rec['to']
eid = target_guid
props = self.db.get_entity_properties(target_guid)['props']
if 'urn:lri:entity_type:enumeration_member' in props['urn:lri:property_type:types'] or 'urn:lri:property_type:is_member_of_enumeration' in props:
# Let's be generous-- either participation or types is good enough to be a valid enum member
self.is_valid = True
return True
else:
self.errors.append("Entity %s is not an enumeration member." % (repr(eid)))
return False
self.is_valid=True
return True
def create(self,is_update=False,allow_nodeprops=False):
if self.errors:
return False
if not allow_nodeprops and self.rec.get("proptype") in immutable_props:
self.errors.append("Creation of property type %s is not allowed." % (self.rec["proptype"]))
return None
if not self.rec.get("from"):
if self.entity_guid:
self.rec["from"] = self.entity_guid
else:
self.errors.append("Entity GUID missing!. Cannot create.")
return None
if not self.errors:
self.log.debug("PROP CREATE REC",self.rec)
if self.entity:
self.link = self.db.create_property(self.rec,
node=self.entity.node,
in_bootstrap=True,
is_update=is_update)
else:
self.link = self.db.create_property(self.rec,
in_bootstrap=True,
is_update=is_update)
if not self.link:
self.errors.extend(self.db.errors)
self.log.debug("PROPERTY CREATION FAILURE BECAUSE:",self.db.errors)
self.db.errors=[]
return False
for k,v in self.db.internal_properties(self.link).items():
if k != 'rec':
self.rec[k] = v
return True
def update(self,newrec):
if self.rec.get("proptype") in immutable_props:
self.errors.append("Update of property type %s is not allowed." % (self.rec["proptype"]))
return None
if self.rec["replaced_by"] != "":
self.errors.append("Property "+self.rec.get("guid")+" already updated.")
return None
if not self.rec.get("from"):
self.errors.append("'from' field missing in existing property! Cannot update")
return None
# Make sure the update is idempotent
if "to" in newrec and "to" in self.rec:
if newrec["to"] == self.rec["to"]:
self.errors.append("Can't update property with same 'to' field.")
return None
elif "value" in newrec and "value" in self.rec:
if newrec["value"] == self.rec["value"]:
self.errors.append("Can't update property with same 'value' field.")
return None
elif "value" in newrec and "to" in self.rec:
to_guids = self.db.get_guids(newrec["value"])
if to_guids:
if to_guids[0] == self.rec["to"]:
self.errors.append("Can't update property with same target.")
return None
newrec["to"] = to_guids[0]
del newrec["value"]
elif "to" in newrec and "value" in self.rec:
if self.rec["complete"] == False:
# Could be manual property completion
if "value" in newrec:
del newrec["value"]
else:
# Can't have 'to' in literal property
self.errors.append("Literal property cannot be updated to link property.")
return None
if not self.errors:
self.rec['internal'] = self.link
self.log.debug("IN PROP UPDATE -- OLDREC ",self.rec)
self.log.debug("IN PROP UPDATE -- NEWREC ",newrec)
return self.db.update_property(oldrec=self.rec,newrec=newrec)
def delete(self):
# Really delete
#self.log.debug("\n\nDELETING LINK: %s \n\n" % (str(self.rec))
self.link.delete()
|
{
"content_hash": "2b0bd0ac629b2d3a4ff0a8c04848d099",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 157,
"avg_line_length": 38.507407407407406,
"alnum_prop": 0.5229393094161777,
"repo_name": "bollacker/lri-b",
"id": "227f37fae31f8021e747b4de7b104e3ce56ddfe7",
"size": "11017",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "prop.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from main import views
from main.forms import CaptchaPasswordResetForm
from main.views import ProfileView
app_name = 'main'
urlpatterns = [
url(r'^$', views.index, name='index'),
# Profile
url(r'^profile/(?P<pk>[0-9]+)/$', login_required(ProfileView.as_view()), name='profile'),
url(r'^edit-profile/(?P<pk>[0-9]+)/$', views.edit_profile, name='edit_profile'),
url(r'^edit-profile/user/(?P<pk>[0-9]+)/$', views.edit_profile_user, name='edit_profile_user'),
url(r'^edit-profile/password/(?P<pk>[0-9]+)/$', views.edit_profile_password, name='edit_profile_password'),
url(r'^edit-profile/email/(?P<pk>[0-9]+)/$', views.edit_profile_email, name='edit_profile_email'),
url(r'^edit-profile/extended/(?P<pk>[0-9]+)/$', views.edit_profile_extended, name='edit_profile_extended'),
# Auth
url(r'login/$', auth_views.login, {'template_name': 'auth/login.html'}, name='login'),
url(r'logout/$', auth_views.logout, {'template_name': 'auth/logout.html'}, name='logout'),
url(r'^locked/$', views.locked_out, name='locked_out'),
url(r'password_reset/$', auth_views.password_reset,
{'template_name': 'auth/password_reset.html', 'email_template_name': 'main/email/password_reset_email.html',
'subject_template_name': 'main/email/password_reset_subject.txt',
'password_reset_form': CaptchaPasswordResetForm, 'post_reset_redirect': 'main:password_reset_done'},
name='password_reset'),
url(r'password_reset/done/$', auth_views.password_reset_done, {'template_name': 'auth/password_reset_done.html'},
name='password_reset_done'),
url(r'password_reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.password_reset_confirm, {'template_name': 'auth/password_reset_confirm.html',
'post_reset_redirect': 'main:password_reset_complete'},
name='password_reset_confirm'),
url(r'password_reset/complete/$', auth_views.password_reset_complete,
{'template_name': 'auth/password_reset_complete.html'}, name='password_reset_complete'),
]
|
{
"content_hash": "6af2e2c46d73b6888afbdb1a84b4c007",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 117,
"avg_line_length": 58.333333333333336,
"alnum_prop": 0.6589010989010989,
"repo_name": "NBajanca/django-non-profit",
"id": "49fc718f0ca2a445d1c78ad06d4440ee2987820e",
"size": "2275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "32615"
},
{
"name": "JavaScript",
"bytes": "8365"
},
{
"name": "Python",
"bytes": "49337"
}
],
"symlink_target": ""
}
|
""" A script to build specific fasta databases """
from __future__ import print_function
import collections
import sys
Sequence = collections.namedtuple('Sequence', ['header', 'sequence'])
def FASTAReader_gen(fasta_filename):
with open(fasta_filename) as fasta_file:
line = fasta_file.readline()
while True:
if not line:
return
assert line.startswith('>'), "FASTA headers must start with >"
header = line.rstrip()
sequence_parts = []
line = fasta_file.readline()
while line and line[0] != '>':
sequence_parts.append(line.rstrip())
line = fasta_file.readline()
sequence = "".join(sequence_parts)
yield Sequence(header, sequence)
def target_match(target, search_entry):
''' Matches '''
search_entry = search_entry.upper()
for atarget in target:
if search_entry.find(atarget) > -1:
return atarget
return None
def main():
used_sequences = set()
work_summary = {'wanted': 0, 'found': 0, 'duplicates': 0}
with open(sys.argv[1]) as f_target:
targets = [">%s" % _.strip().upper() for _ in f_target]
work_summary['wanted'] = len(targets)
for entry in FASTAReader_gen(sys.argv[2]):
target_matched_results = target_match(targets, entry.header)
if target_matched_results:
work_summary['found'] += 1
targets.remove(target_matched_results)
sequence = entry.sequence
used_sequences.add(sequence)
print(entry.header)
print(sequence)
if __name__ == "__main__":
main()
|
{
"content_hash": "6c86858bd37e123141ff904fed3189b0",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 74,
"avg_line_length": 29.54385964912281,
"alnum_prop": 0.578978622327791,
"repo_name": "TGAC/earlham-galaxytools",
"id": "b60d6707fb9323dfb723e7e12cecdb53610a4555",
"size": "1706",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/t_coffee/filter_by_fasta_ids.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5165"
},
{
"name": "Dockerfile",
"bytes": "1234"
},
{
"name": "HTML",
"bytes": "22265"
},
{
"name": "JavaScript",
"bytes": "8604"
},
{
"name": "Mako",
"bytes": "9916"
},
{
"name": "Perl",
"bytes": "8754"
},
{
"name": "Python",
"bytes": "69875"
},
{
"name": "R",
"bytes": "2178"
}
],
"symlink_target": ""
}
|
from twisted.application.service import ServiceMaker
CustomInterpreter = ServiceMaker(
"Twisted Manhole with custom interpreter",
"mypkg.interp",
("An interactive remote debugger service accessible via telnet "
"and ssh and providing syntax coloring and basic line editing "
"functionality."),
"interp")
|
{
"content_hash": "18f4ba18cbdfc1c3f709d4f63d52b84e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 68,
"avg_line_length": 33.2,
"alnum_prop": 0.7379518072289156,
"repo_name": "oubiwann/carapace",
"id": "a2d1f9636bfbf0bed2dc1d4e1c2dc702fde548b4",
"size": "332",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sandbox/oubiwann/twisted/plugins/custom_interpreter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40633"
}
],
"symlink_target": ""
}
|
"""Code for setting up the property system."""
import redis
import time
import livetech.prop.prop as prop
import sys
r = redis.Redis(host="localhost", port=6379, db=0)
h = prop.RedisPropertyHierarchy(r)
ranges = {
1: "1-25",
2: "26-50",
3: "51-75",
4: "76-100",
5: "101-105"
}
destination = "live.clients.team.team.team"
for team in range(1,106):
h.getProperty("live.teams." + str(team) + ".path").setValue("http://192.168.1.14" + ("1" if team % 2 == 1 else "2") + ":" + str(58000 + team))
for client, teams in ranges.iteritems():
h.getProperty("live.clients.grid-" + str(client) + ".video.preview.destination").setValue(destination)
h.getProperty("live.clients.grid-" + str(client) + ".video.preview.range").setValue(teams)
h.getProperty(destination).setValue(1)
sys.exit(0)
|
{
"content_hash": "2b782de1795d590e76c7b8e3ab9a2fe9",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 146,
"avg_line_length": 36.21739130434783,
"alnum_prop": 0.6398559423769508,
"repo_name": "JSund/videogrid",
"id": "ca9f011c0dd29310c9c2e4f1bff9fe33fa078270",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "set-prop.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "14670"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
import frappe.utils
from frappe.utils.xlsutils import get_xls
from frappe.utils.csvutils import to_csv
max_reports_per_user = 3
class AutoEmailReport(Document):
def autoname(self):
self.name = _(self.report)
def validate(self):
self.validate_report_count()
self.validate_emails()
def validate_emails(self):
'''Cleanup list of emails'''
if ',' in self.email_to:
self.email_to.replace(',', '\n')
valid = []
for email in self.email_to.split():
if email:
frappe.utils.validate_email_add(email, True)
valid.append(email)
self.email_to = '\n'.join(valid)
def validate_report_count(self):
'''check that there are only 3 enabled reports per user'''
count = frappe.db.sql('select count(*) from `tabAuto Email Report` where user=%s and enabled=1', self.user)[0][0]
if count > max_reports_per_user + (-1 if self.flags.in_insert else 0):
frappe.throw(_('Only {0} emailed reports are allowed per user').format(max_reports_per_user))
def get_report_content(self):
'''Returns file in for the report in given format'''
report = frappe.get_doc('Report', self.report)
raw = report.get_data(limit=self.no_of_rows or 100, user = self.user, filters = self.filters)
if len(raw)==1 and self.send_if_data:
return None
if self.format == 'HTML':
return self.get_html_table(raw)
elif self.format == 'XLS':
return get_xls(raw)
elif self.format == 'CSV':
return to_csv(raw)
else:
frappe.throw(_('Invalid Output Format'))
def get_html_table(self, data):
return frappe.render_template('frappe/templates/includes/print_table.html', {
'headings': data[0],
'data': data[1:]
})
def get_file_name(self):
return "{0}.{1}".format(self.report.replace(" ", "-").replace("/", "-"), self.format.lower())
def send(self):
if self.filter_meta and not self.filters:
frappe.throw(_("Please set filters value in Report Filter table."))
data = self.get_report_content()
if not data:
return
attachments = None
message = '<p>{0}</p>'.format(_('{0} generated on {1}')\
.format(frappe.bold(self.name),
frappe.utils.format_datetime(frappe.utils.now_datetime())))
if self.description:
message += '<hr>' + self.description
if self.format=='HTML':
message += '<hr>' + data
else:
attachments = [{
'fname': self.get_file_name(),
'fcontent': data
}]
message += '<hr><p style="font-size: 10px;"> Edit Auto Email Report Settings: {0}</p>'.format(frappe.utils.get_link_to_form('Auto Email Report', self.name))
frappe.sendmail(
recipients = self.email_to.split(),
subject = self.name,
message = message,
attachments = attachments
)
@frappe.whitelist()
def download(name):
'''Download report locally'''
auto_email_report = frappe.get_doc('Auto Email Report', name)
auto_email_report.check_permission()
data = auto_email_report.get_report_content()
if not data:
frappe.msgprint(_('No Data'))
return
frappe.local.response.filecontent = data
frappe.local.response.type = "download"
frappe.local.response.filename = auto_email_report.get_file_name()
@frappe.whitelist()
def send_now(name):
'''Send Auto Email report now'''
auto_email_report = frappe.get_doc('Auto Email Report', name)
auto_email_report.check_permission()
auto_email_report.send()
def send_daily():
'''Check reports to be sent daily'''
now = frappe.utils.now_datetime()
for report in frappe.get_all('Auto Email Report',
{'enabled': 1, 'frequency': ('in', ('Daily', 'Weekly'))}):
auto_email_report = frappe.get_doc('Auto Email Report', report.name)
# if not correct weekday, skip
if auto_email_report.frequency=='Weekly':
if now.weekday()!={'Monday':0,'Tuesday':1,'Wednesday':2,
'Thursday':3,'Friday':4,'Saturday':5,'Sunday':6}[auto_email_report.day_of_week]:
continue
auto_email_report.send()
def send_monthly():
'''Check reports to be sent monthly'''
for report in frappe.get_all('Auto Email Report', {'enabled': 1, 'frequency': 'Monthly'}):
frappe.get_doc('Auto Email Report', report.name).send()
|
{
"content_hash": "7e5ec35c6af02c6eef3f0b3b5cb8d96a",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 158,
"avg_line_length": 29.43262411347518,
"alnum_prop": 0.6780722891566265,
"repo_name": "elba7r/builder",
"id": "a99c90d2d66c4acbc5fa62ed88e3f4e4f3ac7c4b",
"size": "4284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frappe/email/doctype/auto_email_report/auto_email_report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "334596"
},
{
"name": "HTML",
"bytes": "194919"
},
{
"name": "JavaScript",
"bytes": "1259553"
},
{
"name": "Python",
"bytes": "1604630"
},
{
"name": "Shell",
"bytes": "517"
},
{
"name": "TSQL",
"bytes": "9068"
}
],
"symlink_target": ""
}
|
"""Generators for classes of graphs used in studying social networks."""
import itertools
import math
import random
import networkx as nx
# Copyright(C) 2011, 2015 by
# Ben Edwards <bedwards@cs.unm.edu>
# Aric Hagberg <hagberg@lanl.gov>
# Konstantinos Karakatsanis <dinoskarakas@gmail.com>
# All rights reserved.
# BSD license.
__author__ = """\n""".join(['Ben Edwards (bedwards@cs.unm.edu)',
'Aric Hagberg (hagberg@lanl.gov)',
'Konstantinos Karakatsanis '
'<dinoskarakas@gmail.com>'])
__all__ = ['caveman_graph', 'connected_caveman_graph',
'relaxed_caveman_graph', 'random_partition_graph',
'planted_partition_graph', 'gaussian_random_partition_graph',
'ring_of_cliques']
def caveman_graph(l, k):
"""Returns a caveman graph of ``l`` cliques of size ``k``.
Parameters
----------
l : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.caveman_graph(3, 3)
See also
--------
connected_caveman_graph
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
# l disjoint cliques of size k
G = nx.empty_graph(l*k)
G.name = "caveman_graph(%s,%s)" % (l*k, k)
if k > 1:
for start in range(0, l*k, k):
edges = itertools.combinations(range(start, start+k), 2)
G.add_edges_from(edges)
return G
def connected_caveman_graph(l, k):
"""Returns a connected caveman graph of ``l`` cliques of size ``k``.
The connected caveman graph is formed by creating ``n`` cliques of size
``k``, then a single edge in each clique is rewired to a node in an
adjacent clique.
Parameters
----------
l : int
number of cliques
k : int
size of cliques
Returns
-------
G : NetworkX Graph
connected caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.connected_caveman_graph(3, 3)
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
G = nx.caveman_graph(l, k)
G.name = "connected_caveman_graph(%s,%s)" % (l, k)
for start in range(0, l*k, k):
G.remove_edge(start, start+1)
G.add_edge(start, (start-1) % (l*k))
return G
def relaxed_caveman_graph(l, k, p, seed=None):
"""Return a relaxed caveman graph.
A relaxed caveman graph starts with ``l`` cliques of size ``k``. Edges are
then randomly rewired with probability ``p`` to link different cliques.
Parameters
----------
l : int
Number of groups
k : int
Size of cliques
p : float
Probabilty of rewiring each edge.
seed : int,optional
Seed for random number generator(default=None)
Returns
-------
G : NetworkX Graph
Relaxed Caveman Graph
Raises
------
NetworkXError:
If p is not in [0,1]
Examples
--------
>>> G = nx.relaxed_caveman_graph(2, 3, 0.1, seed=42)
References
----------
.. [1] Santo Fortunato, Community Detection in Graphs,
Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174.
http://arxiv.org/abs/0906.0612
"""
if not seed is None:
random.seed(seed)
G = nx.caveman_graph(l, k)
nodes = list(G)
G.name = "relaxed_caveman_graph (%s,%s,%s)" % (l, k, p)
for (u, v) in G.edges():
if random.random() < p: # rewire the edge
x = random.choice(nodes)
if G.has_edge(u, x):
continue
G.remove_edge(u, v)
G.add_edge(u, x)
return G
def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False):
"""Return the random partition graph with a partition of sizes.
A partition graph is a graph of communities with sizes defined by
s in sizes. Nodes in the same group are connected with probability
p_in and nodes of different groups are connected with probability
p_out.
Parameters
----------
sizes : list of ints
Sizes of groups
p_in : float
probability of edges with in groups
p_out : float
probability of edges between groups
directed : boolean optional, default=False
Whether to create a directed graph
seed : int optional, default None
A seed for the random number generator
Returns
-------
G : NetworkX Graph or DiGraph
random partition graph of size sum(gs)
Raises
------
NetworkXError
If p_in or p_out is not in [0,1]
Examples
--------
>>> G = nx.random_partition_graph([10,10,10],.25,.01)
>>> len(G)
30
>>> partition = G.graph['partition']
>>> len(partition)
3
Notes
-----
This is a generalization of the planted-l-partition described in
[1]_. It allows for the creation of groups of any size.
The partition is store as a graph attribute 'partition'.
References
----------
.. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. http://arxiv.org/abs/0906.0612
http://arxiv.org/abs/0906.0612
"""
# Use geometric method for O(n+m) complexity algorithm
# partition=nx.community_sets(nx.get_node_attributes(G,'affiliation'))
if not seed is None:
random.seed(seed)
if not 0.0 <= p_in <= 1.0:
raise nx.NetworkXError("p_in must be in [0,1]")
if not 0.0 <= p_out <= 1.0:
raise nx.NetworkXError("p_out must be in [0,1]")
if directed:
G = nx.DiGraph()
else:
G = nx.Graph()
G.graph['partition'] = []
n = sum(sizes)
G.add_nodes_from(range(n))
# start with len(sizes) groups of gnp random graphs with parameter p_in
# graphs are unioned together with node labels starting at
# 0, sizes[0], sizes[0]+sizes[1], ...
next_group = {} # maps node key (int) to first node in next group
start = 0
group = 0
for n in sizes:
edges = ((u+start, v+start)
for u, v in
nx.fast_gnp_random_graph(n, p_in, directed=directed).edges())
G.add_edges_from(edges)
next_group.update(dict.fromkeys(range(start, start+n), start+n))
G.graph['partition'].append(set(range(start, start+n)))
group += 1
start += n
# handle edge cases
if p_out == 0:
return G
if p_out == 1:
for n in next_group:
targets = range(next_group[n], len(G))
G.add_edges_from(zip([n]*len(targets), targets))
if directed:
G.add_edges_from(zip(targets, [n]*len(targets)))
return G
# connect each node in group randomly with the nodes not in group
# use geometric method like fast_gnp_random_graph()
lp = math.log(1.0 - p_out)
n = len(G)
if directed:
for u in range(n):
v = 0
while v < n:
lr = math.log(1.0 - random.random())
v += int(lr/lp)
# skip over nodes in the same group as v, including self loops
if next_group.get(v, n) == next_group[u]:
v = next_group[u]
if v < n:
G.add_edge(u, v)
v += 1
else:
for u in range(n-1):
v = next_group[u] # start with next node not in this group
while v < n:
lr = math.log(1.0 - random.random())
v += int(lr/lp)
if v < n:
G.add_edge(u, v)
v += 1
return G
def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False):
"""Return the planted l-partition graph.
This model partitions a graph with n=l*k vertices in
l groups with k vertices each. Vertices of the same
group are linked with a probability p_in, and vertices
of different groups are linked with probability p_out.
Parameters
----------
l : int
Number of groups
k : int
Number of vertices in each group
p_in : float
probability of connecting vertices within a group
p_out : float
probability of connected vertices between groups
seed : int,optional
Seed for random number generator(default=None)
directed : bool,optional (default=False)
If True return a directed graph
Returns
-------
G : NetworkX Graph or DiGraph
planted l-partition graph
Raises
------
NetworkXError:
If p_in,p_out are not in [0,1] or
Examples
--------
>>> G = nx.planted_partition_graph(4, 3, 0.5, 0.1,seed=42)
See Also
--------
random_partition_model
References
----------
.. [1] A. Condon, R.M. Karp, Algorithms for graph partitioning
on the planted partition model,
Random Struct. Algor. 18 (2001) 116-140.
.. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. http://arxiv.org/abs/0906.0612
"""
return random_partition_graph([k]*l, p_in, p_out, seed, directed)
def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False,
seed=None):
"""Generate a Gaussian random partition graph.
A Gaussian random partition graph is created by creating k partitions
each with a size drawn from a normal distribution with mean s and variance
s/v. Nodes are connected within clusters with probability p_in and
between clusters with probability p_out[1]
Parameters
----------
n : int
Number of nodes in the graph
s : float
Mean cluster size
v : float
Shape parameter. The variance of cluster size distribution is s/v.
p_in : float
Probabilty of intra cluster connection.
p_out : float
Probability of inter cluster connection.
directed : boolean, optional default=False
Whether to create a directed graph or not
seed : int
Seed value for random number generator
Returns
-------
G : NetworkX Graph or DiGraph
gaussian random partition graph
Raises
------
NetworkXError
If s is > n
If p_in or p_out is not in [0,1]
Notes
-----
Note the number of partitions is dependent on s,v and n, and that the
last partition may be considerably smaller, as it is sized to simply
fill out the nodes [1]
See Also
--------
random_partition_graph
Examples
--------
>>> G = nx.gaussian_random_partition_graph(100,10,10,.25,.1)
>>> len(G)
100
References
----------
.. [1] Ulrik Brandes, Marco Gaertler, Dorothea Wagner,
Experiments on Graph Clustering Algorithms,
In the proceedings of the 11th Europ. Symp. Algorithms, 2003.
"""
if s > n:
raise nx.NetworkXError("s must be <= n")
assigned = 0
sizes = []
while True:
size = int(random.normalvariate(s, float(s) / v + 0.5))
if size < 1: # how to handle 0 or negative sizes?
continue
if assigned + size >= n:
sizes.append(n-assigned)
break
assigned += size
sizes.append(size)
return random_partition_graph(sizes, p_in, p_out, directed, seed)
def ring_of_cliques(num_cliques, clique_size):
"""Defines a "ring of cliques" graph.
A ring of cliques graph is consisting of cliques, connected through single
links. Each clique is a complete graph.
Parameters
----------
num_cliques : int
Number of cliques
clique_size : int
Size of cliques
Returns
-------
G : NetworkX Graph
ring of cliques graph
Raises
------
NetworkXError
If the number of cliques is lower than 2 or
if the size of cliques is smaller than 2.
Examples
--------
>>> G = nx.ring_of_cliques(8, 4)
See Also
--------
connected_caveman_graph
Notes
-----
The `connected_caveman_graph` graph removes a link from each clique to
connect it with the next clique. Instead, the `ring_of_cliques` graph
simply adds the link without removing any link from the cliques.
"""
if num_cliques < 2:
raise nx.NetworkXError('A ring of cliques must have at least '
'two cliques')
if clique_size < 2:
raise nx.NetworkXError('The cliques must have at least two nodes')
G = nx.Graph()
for i in range(num_cliques):
edges = itertools.combinations(range(i*clique_size, i*clique_size +
clique_size), 2)
G.add_edges_from(edges)
G.add_edge(i*clique_size+1, (i+1)*clique_size %
(num_cliques*clique_size))
return G
|
{
"content_hash": "5e9cca13769332d46824847b9d45b539",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 79,
"avg_line_length": 29.324034334763947,
"alnum_prop": 0.5830954994511526,
"repo_name": "andnovar/networkx",
"id": "a54cc8e48bf24f74de930cb84ade6f5c21dbb122",
"size": "13665",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "networkx/generators/community.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3218696"
}
],
"symlink_target": ""
}
|
import os as os
from enum import Enum
from inspect import getframeinfo, currentframe
import cv2
import numpy as _np
from bot.providers.predefined import Predefined
from bot.shared import nox_current_version, tupletodict
duel_variant_v = {
'v1' : (800, 800),
'v2-duel' : (640, 800),
'v2-autoduel': (970, 800)
}
class SteamAreas(Enum):
MAINAREA = 1
CARDINFO = 2
LOG = 3
class SteamPredefined(Predefined):
files_need = [
os.path.join("steam", "auto_duel_on.png"),
os.path.join("steam", "auto_duel_off.png"),
os.path.join("steam", "new_duel_variant.png")
]
files_needed_for_comparision = [
]
dataset = 'steam'
def __init__(self, *args, **kwargs):
super(SteamPredefined, self).__init__(*args, **kwargs)
def run_prechecks(self):
for file in self.files_need:
assert (os.path.exists(os.path.join(self.assets,
file))), "Missing File for stats generations: if you git cloned this repo you probably have a miss configured home!!!"
def generate(self):
self.run_prechecks()
save = {}
temp_dict = self.generate_autoduel_stats()
save = {**save, **temp_dict}
temp_dict = self.generate_duel_button_stats()
save = {**save, **temp_dict}
save['version'] = nox_current_version
self.write_hdf5(save, self.dataset)
def generate_autoduel_stats(self):
location = self.assets
autoduelon = os.path.join(location, "steam", "auto_duel_on.png")
autodueloff = os.path.join(location, "steam", "auto_duel_off.png")
a = self.get_image_stats(cv2.imread(autodueloff), **self.autoduel)
b = self.get_image_stats(cv2.imread(autoduelon), **self.autoduel)
save = {
'auto_duel_off': a,
'auto_duel_on' : b
}
return save
def generate_duel_button_stats(self):
location = self.assets
new_duel_variant = os.path.join(location, "steam", "new_duel_variant.png")
im = cv2.imread(new_duel_variant, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(im, 240, 255)
a = self.get_image_stats(_np.array(edges), **self.duel_variant)
save = {
'duel_variant': a
}
return save
def get_area(self, area):
return {
SteamAreas.MAINAREA: self.main_area,
SteamAreas.CARDINFO: self.card_info_area,
SteamAreas.LOG : self.log_area
}[area]
def relative(self, x, y, area):
area = self.get_area(area)
xrel, yrel = area.get('left'), area.get('top')
return x + xrel, y + yrel
def relative_area(self, x, y, height, width, area):
area = self.get_area(area)
xrel, yrel = area.get('top'), area.get('left')
return tupletodict(x + xrel, y + yrel, height, width)
@staticmethod
def duel_variant_version(value):
return duel_variant_v.get(value, None)
@property
def window_name(self):
return "Yu-Gi-Oh! DUEL LINKS"
@property
def window_exe_name(self):
return "dlpc.exe"
@property
def steam_url_start(self):
return "steam://rungameid/601510"
@property
def yugioh_initiate_link(self):
return 800, 650
@property
def autoduel(self):
return tupletodict(107, 945, 115, 40)
@property
def duel_variant(self):
return tupletodict(780, 460, 60, 680)
@property
def auto_duel_location_pre(self):
"""This location points to the autoduel button before the battle starts"""
return tupletodict(790, 840, 40, 260)
@property
def duel_location_pre(self):
"""This location points to the duel button before the battle starts"""
return tupletodict(790, 480, 40, 260)
@property
def dialog_ok(self):
return 150, 800
@property
def ok_button_duel(self):
"""This specifies the location of the ok button for duels"""
return tupletodict(855, 720, 50, 180)
@property
def button_duel(self):
"""Specifies the location of the button to click"""
return 800, 870
@property
def resolution(self):
return 1600, 900
@property
def street_replay(self):
raise NotImplementedError("Function {} has not been implemented".format(getframeinfo(currentframe())[2]))
@property
def street_replay_location(self):
"""Indicates what page the street replay icon is located"""
return 2
@property
def auto_duel_button(self):
"""Autoduel button during battle"""
return self.relative(600, 100, area=SteamAreas.MAINAREA)
@property
def page_area(self):
return {
'left' : 464,
'top' : 866,
'width' : 678,
'height': 50
}
_main_area = {}
@property
def main_area(self):
return {
'left' : 464,
'top' : 25,
'width' : 678,
'height': None
}
_card_info_area = {}
@property
def card_info_area(self):
return {
'left' : 1142,
'top' : 25,
'width' : 1604 - 1142,
'height': None
}
_log_area = {}
@property
def log_area(self):
return {
'left' : 0,
'top' : 25,
'width' : 462,
'height': None
}
@property
def backlist(self):
"""Backlisted areas"""
return [
{'left' : 450,
'top' : 415,
'width' : 62,
'height': 110},
{'left' : 1090,
'top' : 425,
'width' : 62,
'height': 110}
]
@property
def move_right_button(self):
"""Steam has button to move to right page"""
return self.relative(655, 430, SteamAreas.MAINAREA)
@property
def move_left_button(self):
"""Steam has button to move to left page"""
return self.relative(0, 430, SteamAreas.MAINAREA)
@property
def duelist_name_area(self):
return self.relative_area(660, 5, 35, 150, SteamAreas.MAINAREA)
|
{
"content_hash": "d805d3f23e9de576401057d8af315f34",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 166,
"avg_line_length": 27.07792207792208,
"alnum_prop": 0.5566746602717826,
"repo_name": "will7200/Yugioh-bot",
"id": "be29306c404c225b26c66a75a4aaedf809279fab",
"size": "6255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/providers/steam/predefined.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "147"
},
{
"name": "Python",
"bytes": "258596"
}
],
"symlink_target": ""
}
|
__author__ = 'srio'
import sys
import numpy
import scipy.constants as codata
from oasys.widgets import widget
from orangewidget import gui
from PyQt4 import QtGui
from crystalpy.util.PolarizedPhotonBunch import PolarizedPhotonBunch
from crystalpy.util.PolarizedPhoton import PolarizedPhoton
from crystalpy.util.Vector import Vector
from crystalpy.util.StokesVector import StokesVector
from orangecontrib.shadow.util.shadow_objects import ShadowBeam, EmittingStream, TTYGrabber
from orangecontrib.shadow.util.shadow_util import ShadowCongruence, ShadowPlot
import Shadow
from orangecontrib.shadow.util.shadow_objects import ShadowBeam, ShadowOpticalElement, ShadowOEHistoryItem
class ShadowConverter(widget.OWWidget):
name = "ShadowConverter"
description = "Converts PolarizedPhotonBunch to Shadow Beam and back"
icon = "icons/converter.png"
maintainer = "Manuel Sanchez del Rio"
maintainer_email = "srio(@at@)esrf.eu"
priority = 45
category = "crystalpy"
keywords = ["PhotonViewer", "crystalpy", "viewer", "oasyscrystalpy", "shadowOui"]
# the widget takes in a collection of Photon objects and
# sends out an object of the same type made up of scattered photons.
inputs = [{"name": "photon bunch",
"type": PolarizedPhotonBunch,
"handler": "_set_input_photon_bunch",
"doc": ""},
{"name": "Input Beam",
"type": ShadowBeam,
"handler": "_set_input_shadow_beam",
"doc": ""},
]
outputs = [{"name": "photon bunch",
"type": PolarizedPhotonBunch,
"doc": "transfer diffraction results"},
{"name":"Beam",
"type":ShadowBeam,
"doc":"Shadow Beam",
"id":"beam"},
]
want_main_area = 0
want_control_area = 1
def __init__(self):
self.setFixedWidth(600)
self.setFixedHeight(100)
gui.separator(self.controlArea, height=20)
gui.label(self.controlArea, self, " CONVERSION POINT: PolarizedPhotonBunch <-> ShadowOuiBeam", orientation="horizontal")
gui.rubber(self.controlArea)
def _set_input_photon_bunch(self, photon_bunch):
if photon_bunch is not None:
print("<><> CONVERTER has received PolarizedPhotonBunch)")
self._input_available = True
self.incoming_bunch = photon_bunch
self.send_photon_bunch(photon_bunch)
#
# translate
#
shadow_beam = self.from_photon_bunch_to_shadow()
self.send_shadow_beam(shadow_beam)
def _set_input_shadow_beam(self, beam):
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
print("<><> CONVERTER has received GOOD Shadow BEAM)")
self._input_available = True
self.incoming_shadow_beam = beam
self.send_shadow_beam(beam)
#
# translate
#
photon_bunch = self.from_shadow_beam_to_photon_bunch()
self.send_photon_bunch(photon_bunch)
else:
QtGui.QMessageBox.critical(self, "Error",
"Data not displayable: No good rays or bad content",
QtGui.QMessageBox.Ok)
def send_photon_bunch(self, photon_bunch):
self.send("photon bunch", photon_bunch)
def send_shadow_beam(self, shadow_beam):
self.send("Beam", shadow_beam)
def from_shadow_beam_to_photon_bunch(self):
vx = self.incoming_shadow_beam._beam.getshcol(4,nolost=1)
vy = self.incoming_shadow_beam._beam.getshcol(5,nolost=1)
vz = self.incoming_shadow_beam._beam.getshcol(6,nolost=1)
s0 = self.incoming_shadow_beam._beam.getshcol(30,nolost=1)
s1 = self.incoming_shadow_beam._beam.getshcol(31,nolost=1)
s2 = self.incoming_shadow_beam._beam.getshcol(32,nolost=1)
s3 = self.incoming_shadow_beam._beam.getshcol(33,nolost=1)
energies = self.incoming_shadow_beam._beam.getshcol(11,nolost=1)
photon_bunch = PolarizedPhotonBunch([])
photons_list = list()
for i,energy in enumerate(energies):
photon = PolarizedPhoton(energy_in_ev=energy,
direction_vector=Vector(vx[i],vy[i],vz[i]),
stokes_vector=StokesVector([s0[i],s1[i],s2[i],s3[i]]))
#photon_bunch.add(photon)
# print("<><> appending photon",i)
photons_list.append(photon)
photon_bunch.addPhotonsFromList(photons_list)
return photon_bunch
def create_dummy_oe(self):
empty_element = ShadowOpticalElement.create_empty_oe()
# TODO: check this
empty_element._oe.DUMMY = 100.0 # self.workspace_units_to_cm
empty_element._oe.T_SOURCE = 0.0
empty_element._oe.T_IMAGE = 0.0
empty_element._oe.T_INCIDENCE = 0.0
empty_element._oe.T_REFLECTION = 180.0
empty_element._oe.ALPHA = 0.0
empty_element._oe.FWRITE = 3
empty_element._oe.F_ANGLE = 0
return empty_element
def from_photon_bunch_to_shadow(self):
photon_beam = self.incoming_bunch
N = photon_beam.getArrayByKey("number of photons")
energies = photon_beam.getArrayByKey("energies")
S0 = photon_beam.getArrayByKey("s0")
S1 = photon_beam.getArrayByKey("s1")
S2 = photon_beam.getArrayByKey("s2")
S3 = photon_beam.getArrayByKey("s3")
vx = photon_beam.getArrayByKey("vx")
vy = photon_beam.getArrayByKey("vy")
vz = photon_beam.getArrayByKey("vz")
beam = Shadow.Beam(N)
A2EV = 2.0 * numpy.pi / (codata.h*codata.c/codata.e*1e2)
for i in range(N):
s0 = S0[i]
s1 = S1[i]
s2 = S2[i]
s3 = S3[i]
energy = energies[i]
if (numpy.abs(s1**2 + s2**2 + s3**2 - s0**2) > 1e-4 ):
s0 = numpy.sqrt(s1**2 + s2**2 + s3**2)
print("Warning: Beam is not fully polarized.")
Ex2 = 0.5 * (s0 + s1)
Ez2 = 0.5 * (s0 - s1)
Ex = numpy.sqrt( Ex2 )
Ez = numpy.sqrt( Ez2 )
if s0 == s1:
sin2delta = 0.0
else:
sin2delta = -0.5 * ( (s2**2 - s3**2) / ( 4 * Ex2 * Ez2) - 1)
delta = numpy.arcsin( numpy.sign(s3) * numpy.sqrt(sin2delta) )
beam.rays[i,0] = 0.0 # x
beam.rays[i,1] = 0.0 # x
beam.rays[i,2] = 0.0 # x
beam.rays[i,3] = vx[i] # v
beam.rays[i,4] = vy[i] # v
beam.rays[i,5] = vz[i] # v
beam.rays[i,6] = Ex # Es
beam.rays[i,7] = 0.0 # Es
beam.rays[i,8] = 0.0 # Es
beam.rays[i,9] = 1.0 # lost ray flag
beam.rays[i,10] = A2EV * energy # k
beam.rays[i,11] = i # ray index
beam.rays[i,12] = 0.0 # path length
beam.rays[i,13] = 0.0 # phase-s
beam.rays[i,14] = delta # phase-ps
beam.rays[i,15] = 0.0 # Ep
beam.rays[i,16] = 0.0 # Ep
beam.rays[i,17] = Ez # Ep
beam_out = ShadowBeam(beam=beam)
beam_out.history.append(ShadowOEHistoryItem()) # fake Source
beam_out._oe_number = 0
# just to create a safe history for possible re-tracing
beam_out.traceFromOE(beam_out, self.create_dummy_oe(), history=True)
#self.send("Beam", beam_out)
return beam_out
if __name__ == "__main__":
a = QtGui.QApplication(sys.argv)
ow = ShadowConverter()
ow.show()
a.exec_()
ow.saveSettings()
|
{
"content_hash": "ae21fc6ccf2186d8a9bf927d5c5862e6",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 137,
"avg_line_length": 34.19396551724138,
"alnum_prop": 0.5584268246564982,
"repo_name": "edocappelli/oasys-crystalpy",
"id": "6733c105af28ff9e5ac3b4934293702d66695e34",
"size": "7933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orangecontrib/oasyscrystalpy/widgets/elements/ShadowConverter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "151726"
}
],
"symlink_target": ""
}
|
import six
from agate.data_types.base import DataType
class Text(DataType):
"""
Data representing text.
:param cast_nulls:
If :code:`True`, values in :data:`.DEFAULT_NULL_VALUES` will be
converted to `None`. Disable to retain them as strings.
"""
def __init__(self, cast_nulls=True, **kwargs):
super(Text, self).__init__(**kwargs)
self.cast_nulls = cast_nulls
def cast(self, d):
"""
Cast a single value to :func:`unicode` (:func:`str` in Python 3).
:param d:
A value to cast.
:returns:
:func:`unicode` (:func:`str` in Python 3) or :code:`None`
"""
if d is None:
return d
elif isinstance(d, six.string_types):
if self.cast_nulls and d.strip().lower() in self.null_values:
return None
return six.text_type(d)
|
{
"content_hash": "bc138d4c733a4cfaf8be505eb6a7295b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 26.441176470588236,
"alnum_prop": 0.5517241379310345,
"repo_name": "wireservice/agate",
"id": "6bd210eac27ee4bfc4586872c3ee8be6f26143bf",
"size": "922",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "agate/data_types/text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "133848"
},
{
"name": "Python",
"bytes": "475674"
}
],
"symlink_target": ""
}
|
import random
import sys
random.seed(42)
# sys.setrecursionlimit(2000)
print "use pypy"
DEBUG = False
class Node(object):
"""
a node with `num_channels` actively opens half of them with other nodes
and has `deposit`.
The other half is reserved for other nodes connecting.
"""
def __init__(self, sim, id, num_channels, capacity_per_channel):
self.sim = sim
self.id = id
assert num_channels % 2 == 0
self.num_channels = num_channels
self.num_initiated_channels = num_channels // 2
self.capacity_per_channel = capacity_per_channel
self.channels = dict() # nodeid => capacity
def __repr__(self):
return '<Node:%d>' % self.id
@property
def targets(self):
"""
connect to closest node larger than self!
"""
distances = [self.sim.max_id / 2**i for i in range(self.num_initiated_channels)]
return [(self.id + d) % self.sim.max_id for d in distances]
def initiate_channels(self):
for target_id in self.targets:
assert target_id != self.targets
node_id = self.sim.get_closest_node_id(target_id)
self.connect(node_id)
# connect other
self.sim.node_by_id[node_id].connect(self.id)
def connect(self, other_id):
assert other_id
assert other_id in self.sim.node_by_id
if other_id in self.channels:
return
node = self.sim.node_by_id[other_id]
self.channels[node.id] = self.capacity_per_channel
def randomize_capacity(self):
for n, c in self.channels.items():
c *= 2 * random.random()
self.channels[n] = c
def _channels_by_distance(self, target_id, amount):
def _distance(node_id):
d = target_id - node_id
if d < 0:
d += self.sim.max_id
return d
nodeids = sorted(self.channels.keys(), lambda a, b: cmp(_distance(a), _distance(b)))
assert len(nodeids) < 2 or _distance(nodeids[0]) <= _distance(nodeids[1])
return [nid for nid in nodeids if self.channels[nid] >= amount]
def _channels_by_distance_plus(self, target_id, amount):
"""
sort nodes by their shortest distance to target_id
"""
def _distance(node_id):
d = target_id - node_id
if d < 0:
d += self.sim.max_id
return d
distance_channel = []
for nodeid, capacity in self.channels.items():
if capacity < amount:
continue
res = self.sim.node_by_id[nodeid]._channels_by_distance(target_id, amount)
assert len(res) < 2 or _distance(res[0]) <= _distance(res[1])
if res and _distance(res[0]) < _distance(nodeid):
distance_channel.append((_distance(res[0]), nodeid))
else:
distance_channel.append((_distance(nodeid), nodeid))
distance_channel.sort()
if DEBUG:
print 'closest d,nodeid', distance_channel[0]
return [n for d, n in distance_channel]
def transfer(self, transfer, proactive_routing=False):
"""
try to transfer along a channel with a node that has a lower id than target.
closest node first
"""
# print 'in transfer', self, transfer.receiver
if self in transfer.tried:
return False
transfer.tried.append(self)
transfer.path.append(self)
# sort connections by distance to target
target_id = transfer.receiver
if target_id == self.id:
return True
if len(transfer.tried) > self.sim.max_tried:
if DEBUG:
print target_id
print [n.id for n in transfer.path]
print [n.id for n in transfer.tried]
return False
if not proactive_routing:
candidates = self._channels_by_distance(target_id, transfer.amount)
else:
candidates = self._channels_by_distance_plus(target_id, transfer.amount)
if DEBUG:
candidates2 = self._channels_by_distance(target_id, transfer.amount)
print
print "node:", self.id, "target", target_id
print candidates
print candidates2
assert len(candidates2) >= len(candidates)
# candidates = candidates2
res = False
for node_id in candidates:
assert node_id not in transfer.tried
if node_id > target_id:
if len(transfer.path) > 1: # not first
break
assert self.channels[node_id] >= transfer.amount
node = self.sim.node_by_id[node_id]
res = node.transfer(transfer, proactive_routing)
if res:
break
if not res:
transfer.path.pop()
return False
return True
class Transfer(object):
def __init__(self, sender, receiver, amount):
self.sender = sender
self.receiver = receiver
self.amount = amount
self.tried = []
self.path = []
self.success = False
# print self
def __repr__(self):
return '<Transfer v=%d t=%s>' % (self.amount, self.receiver)
class Simulation(object):
num_nodes = 100000
max_id = 2**256
# max_id = num_nodes * 10
num_channels = 32
max_capacity = 128
min_capacity = 1
randomize_capacity = False
proactive_routing = True
max_tried = 1000
def __init__(self):
self.node_by_id = dict()
self.nodeids = []
def _capacity_distribution(self, x):
assert isinstance(x, float)
assert 0 <= x <= 1
# p = self.max_capacity * pareto(x, self.max_capacity / float(self.min_capacity))
p = self.min_capacity + (self.max_capacity - self.min_capacity) * x
return p
def _get_closest_node_id(self, target_id):
# this is very slow
for node_id in self.nodeids:
if node_id > target_id:
return node_id
return self.nodeids[0]
def get_closest_node_id(self, target_id):
start, end = 0, len(self.nodeids) - 1
while end - start > 1:
idx = start + (end - start) / 2
if self.nodeids[idx] > target_id:
end = idx
else:
start = idx
assert end - start <= 1, (end, start)
ds = abs(self.nodeids[start] - target_id)
de = abs(self.nodeids[end] - target_id)
idx = min((ds, start), (de, end))[1]
# assert abs(self.nodeids[idx] -
# target_id) <= abs(self._get_closest_node_id(target_id) - target_id)
return self.nodeids[idx]
def _iteredges(self):
for node in self.node_by_id.values():
for nid, c in node.channels.items():
yield nid, c
def setup_network(self):
print 'setting up nodes'
for i in range(self.num_nodes):
node_id = random.randrange(self.max_id)
if node_id in self.node_by_id:
continue # id space collision
capacity = self._capacity_distribution(i / float(self.num_nodes))
node = Node(self, node_id, self.num_channels, capacity)
self.node_by_id[node.id] = node
self.nodeids = sorted(self.node_by_id.keys())
print 'num_nodes', len(self.nodeids)
print 'setting up channels'
for node in self.node_by_id.values():
node.initiate_channels()
if self.randomize_capacity:
node.randomize_capacity()
capacities = [c for nid, c in self._iteredges()]
num_edges = len(capacities)
median_num_edges_per_node = sorted(len(n.channels)
for n in self.node_by_id.values())[self.num_nodes / 2]
avg_capacity = sum(capacities) / float(len(capacities))
median_capacity = sorted(capacities)[len(capacities) / 2]
fmt = 'num_edges:{} per node:{:.1f} median per node:{:.1f}'
print fmt.format(num_edges, num_edges / float(len(self.nodeids)),
median_num_edges_per_node)
print 'avg_capacity:{} median capacity:{}'.format(avg_capacity, median_capacity)
def dump_nodes(self, num=4):
# dump some nodes and their channels
for nodeid in self.nodeids[:num]:
print
node = self.node_by_id[nodeid]
assert len(node.targets) < len(node.channels)
# print node, zip(sorted(node.channels.keys()), sorted(node.targets))
print 'targets:', sorted(node.targets)
print 'edges:', sorted(node.channels.keys())
print 'capacities:', sorted(node.channels.values())
def rand_transfer(self, amount):
candidates = [n for n in self.node_by_id.values() if n.capacity_per_channel >= amount]
sender = random.choice(candidates)
receiver = sender
while receiver is sender:
receiver = random.choice(candidates)
if DEBUG:
print
print "new transfer", receiver
t = Transfer(sender.id, receiver.id, amount)
res = sender.transfer(t, self.proactive_routing)
t.success = res
return t
def run(self, steps=4, samples=100):
random.seed(42)
for i in range(steps):
value = self._capacity_distribution(i / float(steps))
usable_nodes = len([n for n in self.node_by_id.values()
if n.capacity_per_channel >= value]) / float(len(self.nodeids))
transfers = []
for i in range(samples):
_ = self.rand_transfer(value)
transfers.append(_)
successful = [t for t in transfers if t.success]
num_successful = len(successful)
pct_successful = num_successful / float(len(transfers))
if num_successful > 0:
avg_path_len = sum([len(t.path) for t in successful]) / float(num_successful)
median_path_len = sorted([len(t.path) for t in successful])[num_successful // 2]
max_path_len = max([len(t.path) for t in successful])
else:
avg_path_len = median_path_len = max_path_len = 0
avg_tried_len = sum([len(t.tried) for t in transfers]) / float(len(transfers))
median_tried_len = sorted([len(t.tried) for t in transfers])[len(transfers) // 2]
max_tried_len = max([len(t.tried) for t in transfers])
fmt = 'value:{:-5.2f}{:-5.2f} success:{:.2f} p_len:{:-4.0f} {:-4.0f} {:-4.0f} ' + \
'tried:{:-4.0f} {:-4.0f} {:-4.0f}'
print fmt.format(value, usable_nodes, pct_successful, avg_path_len,
median_path_len, max_path_len, avg_tried_len,
median_tried_len, max_tried_len)
if __name__ == '__main__':
sim = Simulation()
# config
sim.num_nodes = 1000 * 1000
sim.num_channels = 32
sim.max_capacity = 128
sim.min_capacity = 1
sim.randomize_capacity = False
sim.proactive_routing = True
sim.max_tried = 100
# setup
sim.setup_network()
sim.dump_nodes(4)
# run sim
print "running simulation w/proactive routing"
sim.run(steps=10, samples=100)
sim.proactive_routing = False
print "running simulation"
sim.run(steps=10, samples=100)
# sim.rand_transfer(1.5)
"""
Todo Next: Model that nodes preferably connect nodes of similar capacity
Thoughts: The less value one has, the less valuable he is as a mediator
therefore the less channels he should have open.
The other way round for nodes with high values.
Q: Does it make sense to have different capacity based on the distance?
Being able to transfer to neighbours is essential
so transfers can be facillitated at all
While having a short path is secondary.
"""
|
{
"content_hash": "dce3d49b6fca0cdecc8b8e3a2f8778ca",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 97,
"avg_line_length": 35.955621301775146,
"alnum_prop": 0.5610960256726734,
"repo_name": "tomashaber/raiden",
"id": "860028451da12da916103a85c63764597c893e6f",
"size": "12187",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/kadsim2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4536"
},
{
"name": "HTML",
"bytes": "21998"
},
{
"name": "JavaScript",
"bytes": "1996"
},
{
"name": "Makefile",
"bytes": "5184"
},
{
"name": "Python",
"bytes": "1222610"
},
{
"name": "Shell",
"bytes": "4570"
},
{
"name": "TypeScript",
"bytes": "75150"
}
],
"symlink_target": ""
}
|
from .boostaroota import *
|
{
"content_hash": "9811540b30cc083f852132496b95d8e8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.7777777777777778,
"repo_name": "ZRiddle/BoostARoota",
"id": "7f821f75cbb530d1df1f9259e75b850fd36d55a9",
"size": "28",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "boostaroota/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36987"
},
{
"name": "R",
"bytes": "503"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2015 Christian Uhsat <christian@uhsat.de>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import base64
import binascii
import os
import re
import sys
try:
from urllib.error import URLError
from urllib.request import urlopen
except:
from urllib2 import URLError, urlopen # Python 2 fallback
try:
from Crypto.Cipher import AES
from Crypto.Hash import SHA512
except ImportError:
sys.exit("Requires PyCrypto (https://github.com/dlitz/pycrypto)")
__all__, __version__ = ["TinyCrypt"], "0.2.2"
class TinyCrypt(object):
"""
Uses TinyURL as a key/value storage for encrypted messages.
"""
SALT = b"Use Your Own Salt"
def __init__(self, decoy="http://test.com"):
"""
Sets the decoy URL.
"""
self.decoy = decoy + "%%3Fdata=%s"
def __repr__(self):
"""
Returns the protocol version.
"""
return "TinyCrypt " + __version__
def __hash(self, key):
"""
Returns the SHA512 hash and the alias of the key.
"""
key = SHA512.new(TinyCrypt.SALT + key.encode("utf-8")).digest()
return (key, binascii.hexlify(key)[:40].decode("ascii"))
def __encrypt(self, key, data):
"""
Returns the AES (CFB8) encrypted data.
"""
return AES.new(key[:32], AES.MODE_CFB, key[-16:]).encrypt(data)
def __decrypt(self, key, data):
"""
Returns the AES (CFB8) decrypted data.
"""
return AES.new(key[:32], AES.MODE_CFB, key[-16:]).decrypt(data)
def push(self, key, message):
"""
Pushes a message.
"""
key, alias = self.__hash(key)
data = self.__encrypt(key, message)
data = base64.urlsafe_b64encode(data).decode("ascii")
url = "http://tinyurl.com/create.php?alias=%s&url=" + self.decoy
urlopen(url % (alias, data))
def pull(self, key):
"""
Returns a previously pushed message or None.
"""
try:
key, alias = self.__hash(key)
url = urlopen("http://tinyurl.com/" + alias).geturl()
data = re.split("^.+data=", url, 1)[1]
data = base64.urlsafe_b64decode(data)
return self.__decrypt(key, data).decode("utf-8")
# No message found else error
except URLError as ex:
if getattr(ex, "code", None) != 404:
raise ex
def usage(text, *args):
"""
Prints the usage text.
"""
for line in (text % args).split("\n")[1:-1]:
line = line[4:]
if os.name in ["posix"]:
# Color description
if re.match("^.* Version \d+\.\d+\.\d+$", line):
line = line.replace("Version", "Version\x1B[34;1m")
line = "\x1B[39;1m%s\x1B[0m" % line
# Color list titles
elif re.match("^[A-Za-z ]+:$", line):
line = "\x1B[34m%s\x1B[0m" % line
# Color list points
elif re.match("^ (-.|[a-z]+)", line):
line = line.replace(" ", " \x1B[37;0m")
line = "\x1B[34;1m%s\x1B[0m" % line
print(line)
def main(script, arg="--help", *args):
"""
_________ __ _______ ___
/__ ___/__/_______ ___ ___/ ____/________ ___________/ /__
/ / / / ___ / / / / / / ___/ / / / ___ / ___/
/ / / / / / / /__/ / /___/ / / /__/ / /__/ / /__
/__/ /__/__/ /__/\____ /______/__/ \____ / _____/\____/
/_______/ /_______/__/
Version %s
Usage:
%s [option|key] [message...]
Options:
-h --help Shows this text
-l --license Shows license
-v --version Shows version
Report bugs to <christian@uhsat.de>
"""
try:
script = os.path.basename(script)
if arg in ("/?", "-h", "--help"):
usage(main.__doc__, __version__, script)
elif arg in ("-l", "--license"):
print(__doc__.strip())
elif arg in ("-v", "--version"):
print("TinyCrypt " + __version__)
else:
tinycrypt = TinyCrypt()
message = tinycrypt.pull(arg)
if message:
print(message)
elif args:
tinycrypt.push(arg, " ".join(args))
except Exception as ex:
return "%s error: %s" % (script, ex)
if __name__ == "__main__":
sys.exit(main(*sys.argv))
|
{
"content_hash": "dcd89bc35b5af8670b2edf787e2653c1",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 78,
"avg_line_length": 29.365079365079364,
"alnum_prop": 0.5293693693693694,
"repo_name": "cuhsat/tinycrypt",
"id": "f5ddb801438d28f078f2a2bdcb41fe70563b1739",
"size": "5572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7678"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.