text stringlengths 0 1.05M | meta dict |
|---|---|
__all__ = ["build_template"]
import numpy as np
from tempfile import mktemp
from .reflect_image import reflect_image
from .interface import registration
from .apply_transforms import apply_transforms
from .resample_image import resample_image_to_target
from ..core import ants_image_io as iio
from .. import utils
def build_template(
initial_template=None,
image_list=None,
iterations=3,
gradient_step=0.2,
blending_weight=0.75,
weights=None,
**kwargs
):
"""
Estimate an optimal template from an input image_list
ANTsR function: N/A
Arguments
---------
initial_template : ANTsImage
initialization for the template building
image_list : ANTsImages
images from which to estimate template
iterations : integer
number of template building iterations
gradient_step : scalar
for shape update gradient
blending_weight : scalar
weight for image blending
weights : vector
weight for each input image
kwargs : keyword args
extra arguments passed to ants registration
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read( ants.get_ants_data('r16') )
>>> image2 = ants.image_read( ants.get_ants_data('r27') )
>>> image3 = ants.image_read( ants.get_ants_data('r85') )
>>> timage = ants.build_template( image_list = ( image, image2, image3 ) ).resample_image( (45,45))
>>> timagew = ants.build_template( image_list = ( image, image2, image3 ), weights = (5,1,1) )
"""
if "type_of_transform" not in kwargs:
type_of_transform = "SyN"
else:
type_of_transform = kwargs.pop("type_of_transform")
if weights is None:
weights = np.repeat(1.0 / len(image_list), len(image_list))
weights = [x / sum(weights) for x in weights]
if initial_template is None:
initial_template = image_list[0] * 0
for i in range(len(image_list)):
temp = image_list[i] * weights[i]
temp = resample_image_to_target(temp, initial_template)
initial_template = initial_template + temp
xavg = initial_template.clone()
for i in range(iterations):
for k in range(len(image_list)):
w1 = registration(
xavg, image_list[k], type_of_transform=type_of_transform, **kwargs
)
if k == 0:
wavg = iio.image_read(w1["fwdtransforms"][0]) * weights[k]
xavgNew = w1["warpedmovout"] * weights[k]
else:
wavg = wavg + iio.image_read(w1["fwdtransforms"][0]) * weights[k]
xavgNew = xavgNew + w1["warpedmovout"] * weights[k]
print(wavg.abs().mean())
wscl = (-1.0) * gradient_step
wavg = wavg * wscl
wavgfn = mktemp(suffix=".nii.gz")
iio.image_write(wavg, wavgfn)
xavg = apply_transforms(xavgNew, xavgNew, wavgfn)
if blending_weight is not None:
xavg = xavg * blending_weight + utils.iMath(xavg, "Sharpen") * (
1.0 - blending_weight
)
return xavg
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/registration/build_template.py",
"copies": "1",
"size": "3143",
"license": "apache-2.0",
"hash": 8487119511016370000,
"line_mean": 29.5145631068,
"line_max": 103,
"alpha_frac": 0.6019726376,
"autogenerated": false,
"ratio": 3.5715909090909093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4673563546690909,
"avg_score": null,
"num_lines": null
} |
__all__ = ('BulkWhoisCymru')
from bulkwhois import BulkWhois
class BulkWhoisCymru(BulkWhois):
"""
An interface to the Shadowserver bulk whois service.
Usage:
from bulkwhois.shadowserver import BulkWhoisShadowserver
bw = BulkWhoisShadowserver()
bw.lookup_ips(["69.12.38.242", "212.58.241.131"]))
returns:
{
'212.58.241.131':
{
'allocated': '1999-10-08',
'as_name': 'TELIANET TeliaNet Global Network',
'asn': '1299',
'bgp_prefix': '212.58.224.0/19',
'cc': 'GB',
'ip': '212.58.241.131',
'registry': 'ripencc'
},
'69.12.38.242':
{
'allocated': '2002-12-04',
'as_name': 'LEVEL3 Level 3 Communications',
'asn': '3356',
'bgp_prefix': '69.12.0.0/17',
'cc': 'US',
'ip': '69.12.38.242',
'registry': 'arin'
}
}
"""
def __init__(self, **kwargs):
super(BulkWhoisCymru, self).__init__(**kwargs)
self.server = "whois.cymru.com"
self.leader = "begin\nverbose"
self.has_result_header = True
self.field_names=["asn", "ip", "bgp_prefix", "cc", "registry", "allocated", "as_name"]
if __name__ == "__main__":
lookups = ["201.21.203.254", "203.21.203.254", "130.102.6.192", "192.168.0.10", "203.20.1.2", "200.200.200.200", "8.8.8.8"]
bw = BulkWhoisCymru()
print "Server: " + bw.server
print "Port: " + bw.port
print "Leader: " + bw.leader
print "Footer: " + bw.footer
print bw.lookup_ips_raw(lookups)
print bw.lookup_ips(lookups)
| {
"repo_name": "csirtfoundry/BulkWhois",
"path": "deb_dist/bulkwhois-0.2.1/build/lib.linux-x86_64-2.6/bulkwhois/cymru.py",
"copies": "5",
"size": "1946",
"license": "mit",
"hash": -8133520806268474000,
"line_mean": 32.5517241379,
"line_max": 127,
"alpha_frac": 0.4398766701,
"autogenerated": false,
"ratio": 3.4874551971326166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02048008389009631,
"num_lines": 58
} |
__all__ = ('BulkWhois')
import telnetlib
import socket
import logging
class BulkWhois(object):
"""
Query a list of IP addresses from a bulk whois server. This is an
efficient way to query a large number of IP addresses. It sends all
the IP addresses at once, and receives all whois results together.
This module takes the approach that you know what you're doing: if
you have non-IP data in there (such as hostnames), the whois server
will ignore them and they won't be included in results.
This class is not designed to be called directly: rather, use one of
the subclass interfaces to specific bulk whois servers such as
bulkwhois.cymru or bulkwhois.shadowserver, which are set to appropriate
default settings for those services.
Usage:
import BulkWhois from bulkwhois
bw = BulkWhois()
records = lookup_ips(["192.168.0.1", "10.1.1.1"])
Args:
leader: Any text that needs to appear before the bulk whois query
footer: Any text that needs to appear after the bulk whois query
server: the hostname of the whois server to use
port: the whois server port number to connect to
record_delim: the char to split records received from the server
who
field_delim: the char to split individual fields in each record
has_results_header: set to True if the whois server send a header
line in the results which has no whois data
fields: a list defining the order of the names of the fields
returned by the server. Used to populate the dict returned.
"""
leader = ""
footer = ""
server = ""
port = -1
record_delim = ""
field_delim = ""
has_result_header = False
field_names = []
def __init__(self,
leader="begin",
footer="end",
server="asn.shadowserver.org",
port="43",
record_delim="\n",
field_delim="|",
has_result_header=False):
self.leader = leader
self.footer = footer
self.server = server
self.port = port
self.record_delim = record_delim
self.field_delim = field_delim
self.has_result_header = has_result_header
def _lookup(self, ip_list):
"""
Take a list of IP addresses, format them according to the
whois server spec, connect on the specified port, send the
formatted data, return the data received.
Raises:
IOError on any connection problems
"""
result = ""
ip_list = self._filter_ipv4(ip_list)
query = self._format_list(ip_list)
try:
tn = telnetlib.Telnet(self.server, self.port)
tn.write(query)
result = tn.read_all()
tn.close()
except socket.gaierror as se:
raise IOError("Couldn't connect to %s:%s" % (self.server,
self.port))
except EOFError as ee:
raise IOError("Server dropped connection")
return result
def lookup_ips_raw(self, ip_list):
"""
Get the raw output returned by the whois server as a string.
"""
return self._lookup(ip_list)
def lookup_ips(self, ip_list):
"""
Return a dict of dicts indexed by IP address with whois
results.
Ensure that the "ip" field exists in the field_names array in the
position of the IP address.
Args:
ip_list: an array of IP addresses. We don't check that
the IP addresses are valid: the whois server will not return
a result for invalid addresses.
Returns:
A dict mapping records by IP address. Dict fields are named
according to the fields_name array.
Raises:
ValueError is "ip" field is not set in field_names.
"""
raw = self._lookup(ip_list)
records = {}
ip_index = self.field_names.index("ip")
if "ip" not in self.field_names:
raise ValueError("You need to include an 'ip' field in the field_names array.")
for line_num, line in enumerate(raw.split(self.record_delim)):
# some whois results have a header we'll throw away
if line_num == 0 and self.has_result_header:
next
fields = line.split(self.field_delim)
# lots of fields space padded
fields = [field.strip() for field in fields]
if len(fields) < len(self.field_names):
# skip this line: malformed, or doesn't match out template
pass
else:
records.setdefault(fields[ip_index], dict(zip(self.field_names, fields)))
return records
def _filter_ipv4(self, ip_list):
clean_ips = []
for ip in ip_list:
try:
socket.inet_pton(socket.AF_INET, ip)
except socket.error:
logging.info("'%s' isn't an IPv4 address: ignoring" % str(ip))
else:
clean_ips.append(ip)
return clean_ips
def _format_list(self, ip_list):
return self.record_delim.join([self.leader, self.record_delim.join(ip_list), \
self.footer]) + self.record_delim
if __name__ == "__main__":
lookups = ["201.21.203.254", "203.21.203.254", "130.102.6.192", "192.168.0.10", "203.20.1.2", "200.200.200.200", "8.8.8.8"]
bw = BulkWhois(leader="begin origin")
bw.field_names=["ip", "asn", "bgp_prefix", "as_name", "cc", "register", "org_name"]
print bw.lookup_ips_raw(lookups)
print bw.lookup_ips(lookups)
bw2 = BulkWhois(leader="begin\nverbose", server="asn.cymru.com")
bw2.field_names=["asn", "ip", "bgp_prefix", "cc", "registry", "allocated", "as_name"]
print bw2.lookup_ips_raw(lookups)
print bw2.lookup_ips(lookups)
| {
"repo_name": "csirtfoundry/BulkWhois",
"path": "bulkwhois/__init__.py",
"copies": "1",
"size": "6227",
"license": "mit",
"hash": 7188524273878808000,
"line_mean": 33.9831460674,
"line_max": 127,
"alpha_frac": 0.5651196403,
"autogenerated": false,
"ratio": 4.187626092804304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252745733104304,
"avg_score": null,
"num_lines": null
} |
__all__ = ('BulkWhoisShadowserver')
import socket
from bulkwhois import BulkWhois
class BulkWhoisShadowserver(BulkWhois):
"""
An interface to the Shadowserver bulk whois service.
Usage:
from bulkwhois.shadowserver import BulkWhoisShadowserver
bw = BulkWhoisShadowserver()
bw.lookup_ips(["69.12.38.242", "212.58.241.131"]))
returns:
{
'10.1.1.1': {'as_name': '',
'asn': '',
'bgp_prefix': '',
'cc': '-',
'ip': '10.1.1.1',
'org_name': 'PRIVATE IP ADDRESS LAN',
'register': '-'},
'192.168.0.1': {'as_name': '',
'asn': '',
'bgp_prefix': '',
'cc': '-',
'ip': '192.168.0.1',
'org_name': 'PRIVATE IP ADDRESS LAN',
'register': '-'}
}
"""
def __init__(self, **kwargs):
super(BulkWhoisShadowserver, self).__init__(**kwargs)
self.server = "asn.shadowserver.org"
self.leader = "begin origin"
self.field_names=["ip", "asn", "bgp_prefix", "as_name", "cc",
"register", "org_name"]
# TODO: shadowserver whois returns nothing if there's one invalid IP addy,
# unlike Cymru. Probably need to add check function - that's annoying
if __name__ == "__main__":
lookups = ["201.21.203.254", "203.21.203.254", "130.102.6.192", "192.168.0.10", "203.20.1.2", "200.200.200.200", "8.8.8.8"]
bw = BulkWhoisShadowserver()
print "Server: " + bw.server
print "Port: " + bw.port
print "Leader: " + bw.leader
print "Footer: " + bw.footer
print bw.lookup_ips_raw(lookups)
print bw.lookup_ips(lookups)
| {
"repo_name": "csirtfoundry/BulkWhois",
"path": "deb_dist/bulkwhois-0.2.1/bulkwhois/shadowserver.py",
"copies": "5",
"size": "1802",
"license": "mit",
"hash": 4705747407618506000,
"line_mean": 30.0689655172,
"line_max": 127,
"alpha_frac": 0.4961154273,
"autogenerated": false,
"ratio": 3.3808630393996246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6376978466699624,
"avg_score": null,
"num_lines": null
} |
__all__ = ['CacheHandler', 'CacheFileHandler', 'DjangoSessionCacheHandler', 'MemoryCacheHandler']
import errno
import json
import logging
import os
from spotipy.util import CLIENT_CREDS_ENV_VARS
logger = logging.getLogger(__name__)
class CacheHandler():
"""
An abstraction layer for handling the caching and retrieval of
authorization tokens.
Custom extensions of this class must implement get_cached_token
and save_token_to_cache methods with the same input and output
structure as the CacheHandler class.
"""
def get_cached_token(self):
"""
Get and return a token_info dictionary object.
"""
# return token_info
raise NotImplementedError()
def save_token_to_cache(self, token_info):
"""
Save a token_info dictionary object to the cache and return None.
"""
raise NotImplementedError()
return None
class CacheFileHandler(CacheHandler):
"""
Handles reading and writing cached Spotify authorization tokens
as json files on disk.
"""
def __init__(self,
cache_path=None,
username=None):
"""
Parameters:
* cache_path: May be supplied, will otherwise be generated
(takes precedence over `username`)
* username: May be supplied or set as environment variable
(will set `cache_path` to `.cache-{username}`)
"""
if cache_path:
self.cache_path = cache_path
else:
cache_path = ".cache"
username = (username or os.getenv(CLIENT_CREDS_ENV_VARS["client_username"]))
if username:
cache_path += "-" + str(username)
self.cache_path = cache_path
def get_cached_token(self):
token_info = None
try:
f = open(self.cache_path)
token_info_string = f.read()
f.close()
token_info = json.loads(token_info_string)
except IOError as error:
if error.errno == errno.ENOENT:
logger.debug("cache does not exist at: %s", self.cache_path)
else:
logger.warning("Couldn't read cache at: %s", self.cache_path)
return token_info
def save_token_to_cache(self, token_info):
try:
f = open(self.cache_path, "w")
f.write(json.dumps(token_info))
f.close()
except IOError:
logger.warning('Couldn\'t write token to cache at: %s',
self.cache_path)
class MemoryCacheHandler(CacheHandler):
"""
A cache handler that simply stores the token info in memory as an
instance attribute of this class. The token info will be lost when this
instance is freed.
"""
def __init__(self, token_info=None):
"""
Parameters:
* token_info: The token info to store in memory. Can be None.
"""
self.token_info = token_info
def get_cached_token(self):
return self.token_info
def save_token_to_cache(self, token_info):
self.token_info = token_info
class DjangoSessionCacheHandler(CacheHandler):
"""
A cache handler that stores the token info in the session framework
provided by Django.
Read more at https://docs.djangoproject.com/en/3.2/topics/http/sessions/
"""
def __init__(self, request):
"""
Parameters:
* request: HttpRequest object provided by Django for every
incoming request
"""
self.request = request
def get_cached_token(self):
token_info = None
try:
token_info = self.request.session['token_info']
except KeyError:
logger.debug("Token not found in the session")
return token_info
def save_token_to_cache(self, token_info):
try:
self.request.session['token_info'] = token_info
except Exception as e:
logger.warning("Error saving token to cache: " + str(e))
| {
"repo_name": "plamere/spotipy",
"path": "spotipy/cache_handler.py",
"copies": "1",
"size": "4102",
"license": "mit",
"hash": 7064776875447933000,
"line_mean": 28.3,
"line_max": 97,
"alpha_frac": 0.5862993662,
"autogenerated": false,
"ratio": 4.39186295503212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001531431192059488,
"num_lines": 140
} |
__all__ = ['Callback', 'ModeScriptCallback']
class Callback:
def __init__(self, name, parameter):
self.name = name
self.parameter = parameter
if name == 'ManiaPlanet.PlayerChat':
self.playerid = parameter[0]
self.login = parameter[1]
self.text = parameter[2]
self.isCommand = parameter[3]
return
if name == 'TrackMania.PlayerCheckpoint':
self.playerid = parameter[0]
self.login = parameter[1]
self.time_score = parameter[2]
self.current_lap = parameter[3]
self.checkpoint_index = parameter[4]
return
if name == 'TrackMania.PlayerFinish':
self.playerid = parameter[0]
self.login = parameter[1]
self.time_score = parameter[2]
return
if name == 'ManiaPlanet.PlayerManialinkPageAnswer':
self.playerid = parameter[0]
self.login = parameter[1]
self.answer = parameter[2]
self.entries = parameter[3]
return
if name == 'ManiaPlanet.PlayerConnect':
self.login = parameter[0]
self.isSpectator = parameter[1]
return
if name == 'ManiaPlanet.PlayerDisconnect':
self.login = parameter[0]
self.DisconnectionReason = parameter[1]
return
if name == 'ManiaPlanet.MapListModified':
self.CurrentMapIndex = parameter[0]
self.NextMapIndex = parameter[1]
self.IsListModified = parameter[2]
return
class ModeScriptCallback:
def __init__(self, name, parameter):
self.name = parameter[0]
self.parameter = parameter[1] | {
"repo_name": "juergenz/pie",
"path": "src/pie/callbacks.py",
"copies": "1",
"size": "1762",
"license": "mit",
"hash": 2428973012078145500,
"line_mean": 27.9016393443,
"line_max": 59,
"alpha_frac": 0.5550510783,
"autogenerated": false,
"ratio": 4.2457831325301205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.530083421083012,
"avg_score": null,
"num_lines": null
} |
__all__ = ['camera']
import os
import pickle
from paraview.simple import GetActiveCamera, RenderAllViews, WriteImage
class camera:
"""An object to store a single camera location/view. You can make a list/dict of these objects to save interesting views for your project. This object saves just a few parameters about the camera so that it can easily be reconstructed."""
__displayname__ = 'Camera'
__category__ = 'macro'
def __init__(self, cam=None):
"""
@params:
cam : vtkRenderingOpenGL2Python.vtkOpenGLCamera : optional : The camera you wish to update this object to. Totally optional
"""
if cam is None:
# This allows use to dynamicly select cameras
cam = GetActiveCamera()
self.orientation = cam.GetOrientation()
self.position = cam.GetPosition()
self.focus = cam.GetFocalPoint()
self.viewup = cam.GetViewUp()
# Variable access for internal use
def _getOrientation(self):
return self.orientation
def _getPosition(self):
return self.position
def _getFocalPoint(self):
return self.focus
def _getViewUp(self):
return self.viewup
# Use new location
def update(self, cam=None):
"""Updates the camera location to that which is in the currently activated view unless a vtkOpenGLCamera is specified.
Args:
cam (vtkRenderingOpenGL2Python.vtkOpenGLCamera) : The camera you wish to update this object to. Totally optional
"""
if cam is None:
# This allows use to dynamicly select cameras
cam = GetActiveCamera()
self.orientation = cam.GetOrientation()
self.position = cam.GetPosition()
self.focus = cam.GetFocalPoint()
self.viewup = cam.GetViewUp()
# Change the camera view
def view(self, cam=None):
"""Use this method to update the camera to the saved location
Args:
cam (vtkRenderingOpenGL2Python.vtkOpenGLCamera) : The camera you wish to view/update in the current render view
"""
if cam is None:
# This allows use to dynamicly select cameras
cam = GetActiveCamera()
orientation = self._getOrientation()
position = self._getPosition()
focus = self._getFocalPoint()
viewup = self._getViewUp()
# set the camera position and orientation
cam.SetPosition(position)
cam.SetViewUp(viewup)
cam.SetFocalPoint(focus)
RenderAllViews()
# Save Screenshot of single view
def screenShot(self, cam=None, path=os.path.expanduser('~'), basenm='view'):
"""Save a screenshot of a single camera view
Args:
cam (vtkRenderingOpenGL2Python.vtkOpenGLCamera) : The camera you wish to view then save a screenshot
path (str) : The directory you wish to save the screenshot. Defaults to user home directory
basenm (str) : The file basename for the screenshot
"""
if cam is None:
# This allows use to dynamicly select cameras
cam = GetActiveCamera()
os.chdir(path)
self.view(cam=cam)
WriteImage("%s.png" % (basenm))
# Static Methods for structures containt cameras
@staticmethod
def saveViews(lib, filename='views', path=os.path.expanduser('~')):
"""Save a serialized dictionaty/list/whatever of views out to a file. Dafault saves to user's home directory
Args:
lib (dict or list) : some iterable object containg multiple `camera` objects
filename (str) : The file basename for the serialized file
path (str) : The directory you wish to save the views. Defaults to user home directory
"""
ext = '.camera'
os.chdir(path)
f = open(filename + ext, 'wb')
pickle.dump(lib, f, pickle.HIGHEST_PROTOCOL)
f.close()
@staticmethod
def loadViews(filename='views.camera', path=os.path.expanduser('~')):
"""Load a file containg a serialized camera objects. Dafault loads from home directory if relative path
Args:
filename (str) : The file basename for the serialized file (defualt is default for output def)
path (str): The directory from which you wish to load the views. Defaults to user home directory for relative paths.
"""
os.chdir(path)
with open(filename, 'rb') as f:
return pickle.load(f)
@staticmethod
def screenShotViews(views, cam=None, path=os.path.expanduser('~'), basenm='view'):
"""Save screenshots of many views/cameras
Args:
view d(ict or list) : some iterable object containg multiple `camera` objects
cam (vtkRenderingOpenGL2Python.vtkOpenGLCamera) : The camera you wish to view then save a screenshot
path (str): The directory you wish to save the screenshot. Defaults to user home directory
basenm (str): The file basename for the screenshot
"""
if cam is None:
# This allows use to dynamicly select cameras
cam = GetActiveCamera()
def _iter(obj):
return obj if isinstance(obj, dict) else xrange(len(obj))
os.chdir(path)
for v in _iter(views):
views[v].view(cam=cam)
WriteImage("%s_%s.png" % (basenm, v))
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "pvmacros/vis/objs.py",
"copies": "1",
"size": "5418",
"license": "bsd-3-clause",
"hash": 8970695320851323000,
"line_mean": 37.1549295775,
"line_max": 242,
"alpha_frac": 0.6321520856,
"autogenerated": false,
"ratio": 4.249411764705882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011515075621182345,
"num_lines": 142
} |
__all__ = ["Capabilities", \
"Switches"]
from browser.status import *
from base.bind import Bind
from base.log import VLOG
class Switches(object):
def __init__(self):
self.switch_map = {}
def SetSwitch(self, name, value=""):
self.switch_map[name] = value
# In case of same key, |switches| will override.
def SetFromSwitches(self, switches):
for key, value in switches.switch_map.iteritems():
self.switch_map[key] = value
# Sets a switch from the capabilities, of the form [--]name[=value].
def SetUnparsedSwitch(self, unparsed_switch):
equals_index = unparsed_switch.find('=')
if equals_index != -1:
value = unparsed_switch[equals_index + 1:]
start_index = 0
if unparsed_switch[:2] == "--":
start_index = 2
name = unparsed_switch[start_index:equals_index]
self.SetSwitch(name, value)
def RemoveSwitch(self, name):
del self.switch_map[name]
def HasSwitch(self, name):
return self.switch_map.has_key[name]
def GetSwitchValue(self, name):
if name not in self.switch_map:
return ""
return self.switch_map[name]
def GetSwitchValueNative(self, name):
if name not in self.switch_map:
return ""
return self.switch_map[name]
def GetSize(self):
return len(self.switch_map)
def ToString(self):
string = ""
for key, value in self.switch_map.iteritems():
string += "--" + key
if len(value):
if value.find(" ") != -1:
value = "true"
string += "=" + value + " "
return string
def ParseBoolean(member, option, capabilities):
if type(option) != bool:
return Status(kUnknownError, "must be a boolean")
elif not hasattr(capabilities, member):
return Status(kUnknownError, "has no such member variety")
else:
setattr(capabilities, member, option)
return Status(kOk)
def ParseString(member, option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be a string")
elif not option:
return Status(kUnknownError, "cannot be empty")
elif not hasattr(capabilities, member):
return Status(kUnknownError, "has no such member variety")
else:
setattr(capabilities, member, option)
return Status(kOk)
def IgnoreCapability(option, capabilities):
return Status(kOk)
def ParseTizenXwalk(option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be 'host:port'")
values = option.split(":")
if len(values) != 2:
return Status(kUnknownError, "must be 'host:port'")
port = int(values[1])
if port <= 0:
return Status(kUnknownError, "port must be > 0")
# TODO: I make debugger_address equal to "host:port" in string type
capabilities.tizen_debugger_address = option
return Status(kOk)
def ParseUseExistingBrowser(option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be 'host:port'")
values = option.split(":")
if len(values) != 2:
return Status(kUnknownError, "must be 'host:port'")
port = int(values[1])
if port <= 0:
return Status(kUnknownError, "port must be > 0")
# TODO: I make debugger_address equal to "host:port" in string type
capabilities.debugger_address = option
return Status(kOk)
def ParseFilePath(member, option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be a string")
elif not hasattr(capabilities, member):
return Status(kUnknownError, "has no such member variety")
else:
setattr(capabilities, member, option)
return Status(kOk)
def ParseDict(member, option, capabilities):
if type(option) != dict:
return Status(kUnknownError, "must be a dictionary")
elif not hasattr(capabilities, member):
return Status(kUnknownError, "has no such member variety")
else:
setattr(capabilities, member, option)
return Status(kOk)
def ParseLogPath(option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be a string")
else:
capabilities.log_path = option
return Status(kOk)
def ParseExtensions(option, capabilities):
if type(option) != list:
return Status(kUnknownError, "must be a list")
for extension in option:
if type(extension) != str and type(extension) != unicode:
return Status(StatusCode.kUnknownError, "each extension must be a base64 encoded string")
capabilities.extensions.append(extension)
return Status(kOk)
def IgnoreDeprecatedOption(option_name, option, capabilities):
VLOG(2, "Deprecated xwalk option is ignored: " + option)
return Status(kOk)
def ParseExcludeSwitches(option, capabilities):
if type(option) != list:
return Status(kUnknownError, "must be a list")
for switch_name in option:
if type(switch_name) != str and type(switch_name) != unicode:
return Status(kUnknownError, "each switch to be removed must be a string")
capabilities.exclude_switches.add(switch_name)
return Status(kOk)
def ParseSwitches(option, capabilities):
if type(option) != list:
return Status(kUnknownError, "must be a list")
for arg_string in option:
if type(arg_string) != str and type(arg_string) != unicode:
return Status(kUnknownError, "each argument must be a string")
capabilities.switches.SetUnparsedSwitch(arg_string);
return Status(kOk)
def ParseXwalkOptions(capability, capabilities):
if type(capability) != dict:
return Status(kUnknownError, "must be a dictionary")
is_android = capability.has_key("androidPackage")
is_existing = capability.has_key("debuggerAddress")
is_tizen = capability.has_key("tizenDebuggerAddress")
parser_map = {}
# Ignore 'args', 'binary' and 'extensions' capabilities by default, since the
# Java client always passes them.
parser_map["args"] = Bind(IgnoreCapability)
parser_map["binary"] = Bind(IgnoreCapability)
parser_map["extensions"] = Bind(IgnoreCapability)
if is_android:
parser_map["androidActivity"] = Bind(ParseString, ["android_activity", capability.get("androidActivity"), capabilities])
parser_map["androidDeviceSerial"] = Bind(ParseString, ["android_device_serial", capability.get("androidDeviceSerial"), capabilities])
parser_map["androidPackage"] = Bind(ParseString, ["android_package", capability.get("androidPackage"), capabilities])
parser_map["androidProcess"] = Bind(ParseString, ["android_process", capability.get("androidProcess"), capabilities])
parser_map["androidUseRunningApp"] = Bind(ParseBoolean, ["android_use_running_app", capability.get("androidUseRunningApp"), capabilities])
parser_map["args"] = Bind(ParseSwitches, [capability.get("args"), capabilities])
parser_map["loadAsync"] = Bind(IgnoreDeprecatedOption, ["loadAsync", capability.get("loadAsync"), capabilities])
elif is_tizen:
parser_map["tizenDebuggerAddress"] = Bind(ParseTizenXwalk, [capability.get("tizenDebuggerAddress"), capabilities])
parser_map["tizenAppId"] = Bind(ParseString, ["tizen_app_id", capability.get("tizenAppId"), capabilities])
parser_map["tizenAppName"] = Bind(ParseString, ["tizen_app_name", capability.get("tizenAppName"), capabilities])
parser_map["tizenDeviceSerial"] = Bind(ParseString, ["tizen_device_serial", capability.get("tizenDeviceSerial"), capabilities])
parser_map["tizenUseRunningApp"] = Bind(ParseBoolean, ["tizen_use_running_app", capability.get("tizenUseRunningApp"), capabilities])
elif is_existing:
parser_map["debuggerAddress"] = Bind(ParseUseExistingBrowser, [capability.get("debuggerAddress"), capabilities])
else:
parser_map["args"] = Bind(ParseSwitches, [capability.get("args"), capabilities])
parser_map["binary"] = Bind(ParseFilePath, ["binary", capability.get("binary"), capabilities])
parser_map["detach"] = Bind(ParseBoolean, ["detach", capability.get("detach"), capabilities])
parser_map["excludeSwitches"] = Bind(ParseExcludeSwitches, [capability.get("excludeSwitches"), capabilities])
parser_map["extensions"] = Bind(ParseExtensions, [capability.get("extensions"), capabilities])
parser_map["forceDevToolsScreenshot"] = Bind(ParseBoolean, ["force_devtools_screenshot", capability.get("forceDevToolsScreenshot"), capabilities])
parser_map["loadAsync"] = Bind(IgnoreDeprecatedOption, ["loadAsync", capability.get("loadAsync"), capabilities])
parser_map["localState"] = Bind(ParseDict, ["local_state", capability.get("localState"), capabilities])
parser_map["logPath"] = Bind(ParseLogPath, [capability.get("logPath"), capabilities])
parser_map["minidumpPath"] = Bind(ParseString, ["minidump_path", capability.get("minidumpPath"), capabilities])
parser_map["prefs"] = Bind(ParseDict, ["prefs", capability.get("prefs"), capabilities])
for key, value in capability.iteritems():
if capability.get(key, None) != None:
status = parser_map[key].Run()
if status.IsError():
VLOG(0, "error parse xwalk option: " + key)
return Status(kUnknownError, "cannot parse " + key)
return Status(kOk)
def ParseProxy(option, capabilities):
proxy_dict = option
if type(proxy_dict) != dict:
return Status(kUnknownError, "must be a dictionary")
proxy_type = proxy_dict.get("proxyType")
#if type(proxy_type) != str and type(proxy_type) != unicode:
if type(proxy_type) != str:
return Status(kUnknownError, "'proxyType' must be a string")
proxy_type.lower()
if proxy_type == "direct":
capabilities.switches.SetSwitch("no-proxy-server")
elif proxy_type == "system":
# Xwalk default.
pass
elif proxy_type == "pac":
proxy_pac_url = proxy_dict.get("proxyAutoconfigUrl")
#if type(proxy_pac_url) != str and type(proxy_pac_url) != unicode:
if type(proxy_pac_url) != str:
return Status(kUnknownError, "'proxyAutoconfigUrl' must be a string")
capabilities.switches.SetSwitch("proxy-pac-url", proxy_pac_url)
elif proxy_type == "autodetect":
capabilities.switches.SetSwitch("proxy-auto-detect")
elif proxy_type == "manual":
proxy_servers_options = [
["ftpProxy", "ftp"], ["httpProxy", "http"], ["sslProxy", "https"]]
option_value = ""
proxy_servers = ""
for item in proxy_servers_options:
option_value = proxy_dict.get(item[0], None)
if option_value == None:
continue
value = option_value
if type(value) != str and type(value) != unicode:
return Status(kUnknownError, item[0] + " must be a string")
# Converts into Xwalk proxy scheme.
# Example: "http=localhost:9000;ftp=localhost:8000".
if proxy_servers:
proxy_servers += ";"
proxy_servers += item[1] + "=" + value
proxy_bypass_list = ""
option_value = proxy_dict.get("noProxy", None)
if option_value != None:
proxy_bypass_list = option_value
#if type(proxy_bypass_list) != str and type(proxy_bypass_list) != unicode:
if type(proxy_bypass_list) != str:
return Status(kUnknownError, "'noProxy' must be a string")
if not proxy_servers and not proxy_bypass_list:
return Status(kUnknownError, "proxyType is 'manual' but no manual proxy capabilities were found")
if proxy_servers:
capabilities.switches.SetSwitch("proxy-server", proxy_servers)
if proxy_bypass_list:
capabilities.switches.SetSwitch("proxy-bypass-list", proxy_bypass_list)
else:
return Status(kUnknownError, "unrecognized proxy type:" + proxy_type)
return Status(kOk)
class Capabilities(object):
def __init__(self):
self.android_activity = ""
self.android_device_serial = ""
self.android_package = ""
self.android_process = ""
self.android_use_running_app =False
self.tizen_debugger_address = None
self.tizen_app_id = ""
self.tizen_app_name = ""
self.tizen_device_serial = ""
self.tizen_use_running_app = False
self.binary = ""
# If provided, the remote debugging address to connect to.
self.debugger_address = None
# Whether the lifetime of the started Xwalk browser process should be
# bound to XwalkDriver's process. If true, Xwalk will not quit if
# XwalkDriver dies.
self.detach = False
# Set of switches which should be removed from default list when launching
# Xwalk.
self.exclude_switches = set()
self.extensions = []
# True if should always use DevTools for taking screenshots.
# This is experimental and may be removed at a later point.
self.force_devtools_screenshot = False
self.local_state = {}
self.log_path = ""
self.logging_prefs = {}
# If set, enable minidump for xwalk crashes and save to this directory.
self.minidump_path = ""
self.prefs = {}
self.switches = Switches()
# Return true if existing host:port session is to be used.
def IsExistingBrowser(self):
return self.debugger_address > 0 and self.debugger_address < 65536
# Return true if android package is specified.
def IsAndroid(self):
return self.android_package != ""
# Return true if tizen package is specified.
def IsTizen(self):
return self.tizen_debugger_address > 0 and self.tizen_debugger_address < 65536
def Parse(self, desired_caps):
parser_map = {}
if desired_caps.get("xwalkOptions", None) != None:
parser_map["xwalkOptions"] = Bind(ParseXwalkOptions, [desired_caps["xwalkOptions"], self])
if desired_caps.get("loggingPrefs", None) != None:
parser_map["loggingPrefs"] = Bind(ParseLoggingPrefs, [desired_caps["loggingPrefs"], self])
if desired_caps.get("proxy", None) != None:
parser_map = Bind(ParseProxy, [desired_caps["proxy"], self])
for label, cmd in parser_map.iteritems():
status = cmd.Run()
if status.IsError():
return Status(kUnknownError, "cannot parse capability: " + label)
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "misc/capabilities.py",
"copies": "1",
"size": "13867",
"license": "bsd-3-clause",
"hash": -4661816422217241000,
"line_mean": 40.7680722892,
"line_max": 150,
"alpha_frac": 0.6923631643,
"autogenerated": false,
"ratio": 3.709737827715356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9797712594170294,
"avg_score": 0.020877679569012323,
"num_lines": 332
} |
# All card-related events take (self, card) / (self, card, index) as arguments
# so we can call them as follows:
#
# raise_strategy_card_events(self.board, 'construct_placed', card_name)
#
#
# All deck-related events take (self) / (self, index) as arguments,
# so we can call them as follows:
#
# raise_strategy_deck_events(self.board, 'deck_finished')
#
def raise_strategy_card_events(board, event_key, card_name):
card = board.card_dictionary.find_card(card_name)
(me_func, others_func) = events[event_key]
me_func(board.current_player().strategy, card)
for p in board.other_players():
others_func(p.strategy, board.current_player_index, card)
def raise_strategy_card_events_for_player(board, me_player_index, event_key, card_name):
card = board.card_dictionary.find_card(card_name)
(me_func, others_func) = events[event_key]
me_player = board.players[me_player_index]
other_players = [board.players[i] for i in xrange(len(board.players)) if i != me_player_index]
me_func(me_player.strategy, card)
for p in other_players:
others_func(p.strategy, me_player_index, card)
def raise_strategy_deck_events(board, event_key):
(me_func, others_func) = events[event_key]
me_func(board.current_player().strategy)
for p in board.other_players():
others_func(p.strategy, board.current_player_index)
def raise_end_round_events(board):
for s in board.strategies:
s.round_finished(board)
events = { \
'acquired_card': \
(lambda strategy, card: strategy.me_acquired_card(card), \
lambda strategy, index, card: strategy.opponent_acquired_card(index, card)), \
'defeated_card': \
(lambda strategy, card: strategy.me_defeated_card(card), \
lambda strategy, index, card: strategy.opponent_defeated_card(index, card)), \
'banished_from_deck': \
(lambda strategy, card: strategy.me_banished_from_deck(card), \
lambda strategy, index, card: strategy.opponent_banished_from_deck(index, card)), \
'banished_from_center': \
(lambda strategy, card: strategy.me_banished_from_center(card), \
lambda strategy, index, card: strategy.opponent_banished_from_center(index, card)), \
'construct_placed': \
(lambda strategy, card: strategy.me_construct_placed(card), \
lambda strategy, index, card: strategy.opponent_construct_placed(index, card)), \
'construct_removed': \
(lambda strategy, card: strategy.me_construct_removed(card), \
lambda strategy, index, card: strategy.opponent_construct_removed(index, card)), \
'deck_finished': \
(lambda strategy: strategy.me_deck_finished(), \
lambda strategy, index: strategy.opponent_deck_finished(index)) \
}
| {
"repo_name": "obi1kenobi/ascension-bot",
"path": "src/events.py",
"copies": "1",
"size": "2653",
"license": "mit",
"hash": 7742021171424770000,
"line_mean": 42.4918032787,
"line_max": 96,
"alpha_frac": 0.7075009423,
"autogenerated": false,
"ratio": 3.188701923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4396202865376923,
"avg_score": null,
"num_lines": null
} |
__all__ = ['cd', 'ad', 'ld', 'lookback', 'age']
from math import sqrt,sinh,sin
DH = 2997.92458 #in Mpc/h
OM = 0.3
NZ = 10000
TH = 9.77813106 #in Gyr/h
def E(z,om,ol,ok):
zp1 = float(z) + 1.0
return sqrt((om*zp1 + ok)*zp1*zp1 + ol)
def chi(z,om,ol,ok,Nz=NZ):
dz = z/float(Nz)
return sum(map(lambda i: 1.0/E(dz*i,om,ol,ok), xrange(Nz)))*dz
def dm(z,om,ol,ok,h):
dc_dh = chi(z,om,ol,ok)
sqrt_ok = sqrt(abs(ok))
if ok > 0.0:
return DH/h/sqrt_ok*sinh(sqrt_ok*dc_dh)
elif ok < 0.0:
return DH/h/sqrt_ok*sin(sqrt_ok*dc_dh)
else:
return DH/h*dc_dh
def get_ok_ol(om,ol):
if ol is None:
ol = 1.0 - om
ok = 0.0
else:
ok = 1.0 - ol - om
return ok,ol
def lookback(z,om=OM,ol=None,h=1.0,Na=NZ):
# do integral in a so it converges OK
ok,ol = get_ok_ol(om,ol)
a = 1.0/(1.0 + z)
da = (1.0 - a)/float(Na)
return sum(map(lambda a: 1.0/E(1.0/a-1.0,om,ol,ok)/a, \
(da*i+a for i in xrange(Na))))*da*TH/h
def age(z,om=OM,ol=None,h=1.0):
t0 = lookback(1e12,om=om,ol=ol,h=h)
return t0 - lookback(z,om=om,ol=ol,h=h)
def cd(z,om=OM,ol=None,h=1.0):
ok,ol = get_ok_ol(om,ol)
return DH/h*chi(z,om,ol,ok)
def ad(z,om=OM,ol=None,h=1.0):
ok,ol = get_ok_ol(om,ol)
_dm = dm(z,om,ol,ok,h)
return _dm/(1.0 + z)
def ld(z,om=OM,ol=None,h=1.0):
ok,ol = get_ok_ol(om,ol)
_dm = dm(z,om,ol,ok,h)
return _dm*(1.0 + z)
| {
"repo_name": "yymao/slackbots",
"path": "cosmo.py",
"copies": "1",
"size": "1456",
"license": "mit",
"hash": -8030441340861647000,
"line_mean": 23.2666666667,
"line_max": 66,
"alpha_frac": 0.532967033,
"autogenerated": false,
"ratio": 2.044943820224719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.30779108532247196,
"avg_score": null,
"num_lines": null
} |
__all__ = [ 'Cell' ]
import copy, itertools
from .molecules import Atom, Molecule, Cluster
import numpy as np
from matplotlib import pyplot as plt
try:
from applequist import particles
except ImportError:
pass
a0 = 0.52917721092
class Cell( np.ndarray ):
def __new__(cls,
my_min = [0.0, 0.0, 0.0],
my_max = [10.0, 10.0, 10.0],
my_cutoff = 1.5,
AA = False,
):
xdim = int( np.ceil ( (my_max[0] - my_min[0])/my_cutoff ))
ydim = int( np.ceil ( (my_max[1] - my_min[1])/my_cutoff ))
zdim = int( np.ceil ( (my_max[2] - my_min[2])/my_cutoff ))
if xdim == 0:
xdim = 1
if ydim == 0:
ydim = 1
if zdim == 0:
zdim = 1
shape = (xdim, ydim, zdim)
obj = np.zeros(shape, dtype = object ).view(cls)
return obj
def __init__(self,
my_min = [0.0, 0.0, 0.0],
my_max = [10.0, 10.0, 10.0],
my_cutoff = 1.5,
AA = False):
"""docstring for __init__"""
self.AA = AA
self.my_xmin = my_min[0]
self.my_ymin = my_min[1]
self.my_zmin = my_min[2]
self.my_xmax = my_max[0]
self.my_ymax = my_max[1]
self.my_zmax = my_max[2]
self.my_cutoff = my_cutoff
self.xdim = int( np.ceil ( (self.my_xmax - self.my_xmin)/my_cutoff ))
self.ydim = int( np.ceil ( (self.my_ymax - self.my_ymin)/my_cutoff ))
self.zdim = int( np.ceil ( (self.my_zmax - self.my_zmin)/my_cutoff ))
if self.xdim == 0:
self.xdim = 1
if self.ydim == 0:
self.ydim = 1
if self.zdim == 0:
self.zdim = 1
tmp = self.ravel()
tmp[:] = [[] for i in range(self.xdim)
for j in range(self.ydim) for k in range(self.zdim)]
self[:] = tmp.reshape( ( self.xdim, self.ydim, self.zdim ))
def __array_finalize__(self, obj):
if obj is None:
return
@staticmethod
def from_PointDipoleList( pdl, co = 25 ):
"""By default, the cutoff box is 25 Angstroms"""
co /= a0
x, y, z = [], [], []
for p in pdl:
x.append( p._r[0] )
y.append( p._r[1] )
z.append( p._r[2] )
cell = Cell( my_min = [ np.min(x), np.min(y), np.min(z )],
my_max = [ np.max(x), np.max(y), np.max(z )],
my_cutoff = co,
)
for pd in pdl:
cell.add(pd)
return cell
@staticmethod
def from_xyz( fil, co = 2.0, in_AA = False, out_AA = False ):
ats = []
if not in_AA:
co /= a0
for f_ in open(fil ).readlines()[2:]:
el, x, y, z = f_.split()
x, y, z = map(float, [x,y,z] )
if in_AA and not out_AA:
x, y, z = map( lambda x: x/a0, [x,y,z] )
ats.append( Atom( element = el, x= x, y = y, z = z ))
cell = Cell( my_min = [ min( ats, key = lambda x: x.x ).x,
min( ats, key = lambda x: x.y ).y,
min( ats, key = lambda x: x.z ).z],
my_max = [ max( ats, key = lambda x: x.x ).x,
max( ats, key = lambda x: x.y ).y,
max( ats, key = lambda x: x.z ).z],
my_cutoff = co,
AA = out_AA,
)
for at in ats:
cell.add(at)
for at in cell:
if at.Molecule is None:
m = Molecule()
m.add( at )
cell.build_molecules( current = at, closeby = cell.get_closest(at) )
return cell
def build_molecules(self, current = None, visited = [],
closeby = [], max_dist = 1.46, AA = False):
visited.append( current )
if not self.AA:
max_dist /= a0
if closeby == []:
return
for at in closeby:
if at in current.Molecule:
continue
if max_dist < current.dist_to_atom( at ):
continue
current.Molecule.append( at )
at.Molecule = current.Molecule
close = [a for a in self.get_closest( at ) if a not in visited ]
self.build_molecules( current = at, closeby = close, visited = visited )
def add_atom( self, atom ):
assert type( atom ) == Atom
self.add( atom )
def add_molecule( self, mol ):
assert isinstance( mol, Molecule )
for at in mol:
self.add( at )
def add_cluster( self, clus ):
assert isinstance( clus, Cluster )
for item in [at for mol in clus for at in mol ]:
self.add( item )
def add(self, item ):
x_ind, y_ind, z_ind = self.get_index( item )
if item not in self[ x_ind, y_ind, z_ind ]:
self[ x_ind, y_ind, z_ind ].append( item )
def __iter__(self):
for i in range(len(self)):
for j in range(len(self[i])):
for k in range(len(self[i][j])):
for at in self[i][j][k]:
yield at
def get_closest( self, item ):
"""
Return the closest items,
to iterate not over whole cell box but closest
>>> c = Cell( my_cutoff = 1.5 )
>>> a1 = Atom( element = 'H', x = 1.4 ) #in the x index 0
>>> a2 = Atom( element = 'O', x = 1.6 ) #in the x index 1
>>> c.add( a1 )
>>> c.add( a2 )
>>> c.get_closest( a1 ) #Will return list where only the Oxygen exists
[<Atom at 0x0xah5ah3h5]
"""
x_ind, y_ind, z_ind = self.get_index( item )
tmp_list = []
new = []
if x_ind == 0:
if (self.shape[0] - 1 ) == x_ind:
xmin, xmax = 0, 1
else:
xmin, xmax = 0, 2
else:
if (self.shape[0] - 1) == x_ind:
xmin, xmax = x_ind - 1, x_ind + 1
else:
xmin, xmax = x_ind - 1, x_ind + 2
if y_ind == 0:
if (self.shape[1] - 1 ) == y_ind:
ymin, ymax = 0, 1
else:
ymin, ymax = 0, 2
else:
if (self.shape[1] - 1) == y_ind:
ymin, ymax = y_ind - 1, y_ind + 1
else:
ymin, ymax = y_ind - 1, y_ind + 2
if z_ind == 0:
if (self.shape[2] - 1 ) == z_ind:
zmin, zmax = 0, 1
else:
zmin, zmax = 0, 2
else:
if (self.shape[2] - 1) == z_ind:
zmin, zmax = z_ind - 1, z_ind + 1
else:
zmin, zmax = z_ind - 1, z_ind + 2
for i, j, k in itertools.product( range( xmin, xmax ), range( ymin, ymax ), range( zmin, zmax )):
new += self[i, j, k]
new.remove( item )
return new
def update(self):
ats = []
for x in range(len(self)):
for y in range(len(self[x])):
for z in range(len(self[x, y])):
for item in self[x, y, z]:
ats.append( item )
self[x, y, z].remove( item )
if ats == []:
return
cell = Cell( my_min = [ min( ats, key = lambda x: x.x ).x,
min( ats, key = lambda x: x.y ).y,
min( ats, key = lambda x: x.z ).z],
my_max = [ max( ats, key = lambda x: x.x ).x,
max( ats, key = lambda x: x.y ).y,
max( ats, key = lambda x: x.z ).z],
my_cutoff = self.my_cutoff,
AA = self.AA,
)
for item in ats:
if type(item) == Atom:
cell.add_atom( item )
if type(item) == Molecule:
cell.add_molecule( item )
if type(item) == Cluster:
cell.add_cluster( item )
return cell
def plot(self):
"""
Plot all Atoms in the cell
.. code:: python
>>> cell = Cell()
>>> cell.add( Atom(element = 'H' ))
>>> cell.plot()
"""
#Plot water molecule in green and nice xyz axis
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d' )
ax.plot( [0, 1, 0, 0, 0, 0], [0,0 ,0,1,0,0], [0,0,0,0,0,1] )
ax.text( 1.1, 0, 0, "X", color = 'red' )
ax.text( 0, 1.1, 0, "Y", color = 'red' )
ax.text( 0, 0, 1.1, "Z", color = 'red' )
for at in self:
ax.plot( [at.x], [at.y], [at.z], at.Molecule.style[at.element], linewidth= at.Molecule.linewidth[at.element] )
ax.set_zlim3d( -5,5)
plt.xlim(-5,5)
plt.ylim(-5,5)
plt.show()
def get_index( self, item ):
"""
Return the x, y, and z index for cell for this item,
>>> c = Cell( my_cutoff = 1.5 )
>>> a1 = Atom( element = 'H', x = 1.4 ) #in the x index 0
>>> print c.get_index( a1 )
(0, 0, 0,)
"""
if isinstance( item, Atom ):
x, y, z = item.r
assert self.my_xmin <= x <= self.my_xmax
assert self.my_ymin <= y <= self.my_ymax
assert self.my_zmin <= z <= self.my_zmax
if isinstance( item, particles.PointDipole ):
x, y, z = item._r
assert self.my_xmin <= x <= self.my_xmax
assert self.my_ymin <= y <= self.my_ymax
assert self.my_zmin <= z <= self.my_zmax
tmp_xmin = x - self.my_xmin
tmp_ymin = y - self.my_ymin
tmp_zmin = z - self.my_zmin
x_ind = int( np.floor( tmp_xmin / self.my_cutoff))
y_ind = int( np.floor( tmp_ymin / self.my_cutoff))
z_ind = int( np.floor( tmp_zmin / self.my_cutoff))
return (x_ind, y_ind, z_ind)
| {
"repo_name": "fishstamp82/moltools",
"path": "moltools/dstruct.py",
"copies": "1",
"size": "9798",
"license": "mit",
"hash": 4659473820646595000,
"line_mean": 29.7147335423,
"line_max": 122,
"alpha_frac": 0.4456011431,
"autogenerated": false,
"ratio": 3.2124590163934426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41580601594934424,
"avg_score": null,
"num_lines": null
} |
__all__ = ["ChapterParser"]
from .story import Chapter, Story
from html.parser import HTMLParser
from urllib.request import urlopen
from urllib.error import HTTPError
import re
CHARSET_REGEX = re.compile(r"charset=([^ ]+)")
CHAPTER_ID_REGEX = re.compile(r"https://chyoa.com/[^.]+\.([0-9]+)")
CHYOA_CHAPTER_REGEX = re.compile(r"https://chyoa.com/chapter/[A-Za-z0-9\-_]+.[0-9]+")
CHYOA_USER_REGEX = re.compile(r"https://chyoa.com/user/([A-Za-z0-9\-_]+)")
class ChapterParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def _reset(self):
self.name = None
self.title = None
self.description = None
self.author = None
self.in_body = False
self.body = []
self.in_question = False
self.question = []
self.in_choices = False
self.current_choice = None
self.choices = set()
def get_chapter_fields(self, url):
self._reset()
print("Reading %s..." % url)
try:
response = urlopen(url)
except HTTPError as err:
if err.code == 404:
print("Chapter deleted, skipping...")
return None
else:
raise
charset = self.get_charset(response.getheader("Content-Type"))
html = response.read().decode(charset)
self.feed(html)
return {
"url": url,
"name": self.name,
"description": self.description,
"id": self.get_id(url),
"author": self.author,
"text": "".join(self.body),
"question": " ".join(self.question).strip(),
"choices": self.choices,
}
def handle_starttag(self, tag, attrs):
if tag == "meta":
for key, value in attrs:
if key == "property":
if value == "og:title":
self.name = dict(attrs)["content"]
elif value == "og:description":
self.description = dict(attrs)["content"]
if key == "name" and value =="description":
self.description = dict(attrs)["content"]
elif tag == "div":
for key, value in attrs:
if key == "class":
if value == "chapter-content":
self.in_body = True
elif value == "question-content":
self.in_choices = True
elif tag == "header":
for key, value in attrs:
if key == "class" and value == "question-header":
self.in_question = True
elif tag == "a":
for key, value in attrs:
if key == "href":
if self.in_choices and not value.endswith("login"):
self.current_choice = value
else:
match = CHYOA_USER_REGEX.fullmatch(value)
if match:
self.author = match.group(1)
elif self.in_body:
self.body.append(self.create_tag(tag, attrs))
def handle_data(self, data):
if self.in_body:
self.body.append(data)
elif self.in_question:
self.question.append(data.strip())
elif self.in_choices:
if self.current_choice:
name = data.strip()
self.choices.add((self.get_id(self.current_choice), self.current_choice))
self.current_choice = None
def handle_endtag(self, tag):
if self.in_body:
if tag == "div":
self.in_body = False
else:
self.body.append("</%s>" % tag)
elif self.in_question and tag == "header":
self.in_question = False
elif self.in_choices and tag == "div":
self.in_choices = False
@staticmethod
def create_tag(tag, attrs):
if attrs:
parts = [""]
else:
parts = []
for key, value in attrs:
parts.append("%s=\"%s\"" % (key, value))
return "<%s%s>" % (tag, " ".join(parts))
@staticmethod
def get_charset(header):
if not header.startswith("text/html"):
raise ValueError("Document type is not HTML")
match = CHARSET_REGEX.findall(header)
if match:
return match[0]
else:
# Can't detect the charset, take a guess
return "UTF-8"
@staticmethod
def get_id(url):
match = CHAPTER_ID_REGEX.fullmatch(url)
if match is None:
raise ValueError("Unable to extract chapter ID from URL (%s)" % url)
return int(match.group(1))
| {
"repo_name": "sexypants/chyoa-scraper",
"path": "chyoa/parser.py",
"copies": "1",
"size": "4758",
"license": "mit",
"hash": 1817854312785977000,
"line_mean": 31.8137931034,
"line_max": 89,
"alpha_frac": 0.501261034,
"autogenerated": false,
"ratio": 4.108808290155441,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007462444097033779,
"num_lines": 145
} |
__all__ = ["Chapter", "Story"]
from .util import abridge
class Chapter(object):
def __init__(self, url, name, description, id, author, text, question, choices):
self.url = url
self.name = name.replace('%', '%%')
self.id = id
self.author = author.replace('%', '%%')
self.text = text.replace('%', '%%')
self.question = question.replace('%', '%%')
self.choices = choices # set( (id, chapter_url) )
def __repr__(self):
return """Chapter(url=%r, name=%r, author=%r, text=%r, question=%r, choices=%s\n)""" % (
self.url, self.name, self.author, abridge(self.text), self.question, self.choices)
class Story(Chapter):
def __init__(self, **kwargs):
Chapter.__init__(self, **kwargs)
self.title = kwargs["name"]
self.name = "Introduction"
self.description = kwargs["description"]
self.chapters = {}
# chapters: { id: chapter_object }
def __repr__(self):
return """Story(title=%r, description=%r, root=%s\n)""" % (
self.title, self.description, Chapter.__repr__(self))
| {
"repo_name": "sexypants/chyoa-scraper",
"path": "chyoa/story.py",
"copies": "1",
"size": "1130",
"license": "mit",
"hash": 2314980385330058000,
"line_mean": 35.4516129032,
"line_max": 98,
"alpha_frac": 0.5495575221,
"autogenerated": false,
"ratio": 3.5987261146496814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9464678930303175,
"avg_score": 0.0367209412893011,
"num_lines": 31
} |
"""All character-related routes.
If the user wants to browse/search characters, or look at how a character has
improved over time, they'll likely want something from here.
"""
from datetime import datetime, timedelta
from bottle import route, request, template, HTTPResponse
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from db import Character, DataRequest, getDataTable
@route('/character')
def listCharacters(dbSession):
"""List all the characters, possibly providing search functionality."""
# This query only matters for people without JavaScript.
recentCharacters = dbSession.query(Character) \
.filter(Character.lastSeen > (datetime.now() - timedelta(days=33))) \
.order_by(Character.lastSeen.desc()).limit(25).all()
return template('characterList', characters=recentCharacters)
@route('/character/<charName>')
def characterOverview(charName, dbSession):
"""Provide an overview of how a character has been doing."""
print(charName)
if not charName.isalnum():
raise HTTPResponse(status=404)
character = None
try:
character = dbSession.query(Character).filter(
Character.name == charName).one()
except NoResultFound:
raise HTTPResponse(status=404)
except MultipleResultsFound:
raise HTTPResponse(status=500)
else:
return template('characterOverview', character=character)
@route('/data/character')
@route('/data/character', method="POST")
def queryCharacterTable(dbSession):
"""Return JSON describing the results of an arbitrary query/search."""
dataReq = DataRequest(Character, request.json)
return getDataTable(dataReq, dbSession)
| {
"repo_name": "proegssilb/tsw-stats",
"path": "pages/characterRoutes.py",
"copies": "1",
"size": "1696",
"license": "apache-2.0",
"hash": -7715193322439927000,
"line_mean": 35.8695652174,
"line_max": 77,
"alpha_frac": 0.7287735849,
"autogenerated": false,
"ratio": 4.315521628498728,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5544295213398728,
"avg_score": null,
"num_lines": null
} |
__all__ = ['chatcommand', 'execute_chat_command', 'save_matchsettings', '_register_chat_command']
import functools
import inspect
from .events import eventhandler, send_event
from .log import logger
from .asyncio_loop import loop
_registered_chat_commands = {} # dict of all registered chat commands
async def execute_chat_command(server, player, cmd):
#if not player.is_admin():
#r = check_rights(player)
args = cmd.split(' ')
if args[len(args) - 1] is '':
del args[len(args) - 1]
if args[0] in _registered_chat_commands:
try:
if len(args) == 1:
server.run_task(_registered_chat_commands[args[0]](server, player))
else:
server.run_task(_registered_chat_commands[args[0]](server, player, *args[1:]))
except Exception as exp:
server.chat_send_error('fault use of chat command: ' + args[0], player)
server.chat_send_error(str(exp), player)
server.chat_send('use /help to see available chat commands', player)
raise
else:
server.chat_send_error('unknown chat command: ' + args[0], player)
server.chat_send('use /help to see available chat commands', player)
def _register_chat_command(chat_command, function):
if chat_command not in _registered_chat_commands:
_registered_chat_commands[chat_command] = function
else:
logger.error('chatcommand ' + "'" + chat_command + "'" + ' already registered to ' + str(function))
return False
def _unregister_chat_command(chat_command):
if chat_command not in _registered_chat_commands:
raise 'chat command not registered'
else:
del _registered_chat_commands[chat_command]
# @chatcommand decorator
def chatcommand(cmd):
def chatcommand_decorator(func):
if _register_chat_command(cmd, func) is False:
return
module = inspect.getmodule(func)
logger.debug('chatcommand ' + "'" + cmd + "' connected to " + str(func) + ' in module ' + str(module))
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return chatcommand_decorator
@eventhandler('ManiaPlanet.PlayerChat')
async def _on_player_chat(server, callback):
p = server.player_from_login(callback.login)
# ignore normal chat
if not callback.isCommand:
if p is not None:
send_event(server, 'pie.PlayerChat', p)
return
server.run_task(execute_chat_command(server, p, callback.text))
@chatcommand('/help')
async def cmd_help(server, player):
"""list all chat commands"""
server.chat_send('help:', player)
for cmd in _registered_chat_commands:
if _registered_chat_commands[cmd].__doc__ is None:
docstr = 'no description set'
else:
docstr = _registered_chat_commands[cmd].__doc__
server.chat_send(cmd + ' - ' + docstr, player)
async def save_matchsettings(server, filename = None):
await server.rpc.SaveMatchSettings('MatchSettings\\' + server.config.matchsettings)
@chatcommand('/savematchsettings')
async def cmd_savematchsettings(server, player):
await save_matchsettings(server)
server.chat_send('matchsettings saved: ' + server.config.matchsettings)
@chatcommand('/shutdown')
async def cmd_shutdown(server, player):
await server.chat_send_wait('pie shutdown')
loop.stop()
@chatcommand('/players')
async def cmd_players(server, player):
for player in server.players:
server.chat_send(server.players[player].nickname) | {
"repo_name": "juergenz/pie",
"path": "src/pie/chat_commands.py",
"copies": "1",
"size": "3649",
"license": "mit",
"hash": 291605473629515000,
"line_mean": 26.4436090226,
"line_max": 110,
"alpha_frac": 0.6464784873,
"autogenerated": false,
"ratio": 3.7931392931392933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9929468602496507,
"avg_score": 0.0020298355885570844,
"num_lines": 133
} |
__all__ = ["CherryAdmin", "CherryAdminView", "CherryAdminRawView"]
import os
import cherrypy
from nxtools import *
from .handler import CherryAdminHandler
from .context import CherryAdminContext
from .sessions import CherryAdminSessions
from .view import CherryAdminView, CherryAdminRawView
script_name = os.path.basename(os.path.splitext(__file__)[0])
def default_context_helper():
return {}
def default_user_context_helper(data):
return data
default_settings = {
#
# Environment
#
"templates_dir" : "templates",
"static_dir" : "static",
"sessions_dir" : "/tmp/" + script_name + "_sessions",
"sessions_timeout" : 60*24*7,
"hash_salt" : "4u5457825749",
"minify_html" : True,
"log_screen" : False,
#
# Server configuration
#
"host" : "0.0.0.0",
"port" : 8080,
"blocking" : False,
#
# Application
#
"views" : {"index" : CherryAdminView},
"api_methods" : {},
"login_helper" : lambda x, y: False,
"site_context_helper" : default_context_helper,
"page_context_helper" : default_context_helper,
"user_context_helper" : default_user_context_helper,
}
class CherryAdmin():
def __init__(self, **kwargs):
"""
host: IP Address the server will listen for HTTP connections
port: Port the server will listen for HTTP connection
blocking:
templates_dir:
static_dir:
sessions_dir:
sessions_timeout: Number of minutes after which inactive session session expires
hash_salt:
minify_html:
log_screen:
"""
self.settings = default_settings
self.settings.update(kwargs)
self.is_running = False
self.handler = CherryAdminHandler(self)
self.sessions = CherryAdminSessions(
self["sessions_dir"],
self["sessions_timeout"] * 60,
self["hash_salt"]
)
static_root, static_dir = os.path.split(os.path.abspath(self["static_dir"]))
self.config = {
'/': {
'tools.proxy.on': True,
'tools.proxy.local': 'Referer',
'tools.staticdir.root': static_root,
'tools.trailing_slash.on' : False,
'error_page.default': self.handler.cherrypy_error,
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': static_dir
},
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(static_root, static_dir, "img", "favicon.ico")
},
}
cherrypy.config.update({
"server.socket_host" : str(self["host"]),
"server.socket_port" : int(self["port"]),
"log.screen" : self["log_screen"]
})
cherrypy.tree.mount(self.handler, "/", self.config)
if kwargs.get("start_engine", True):
cherrypy.engine.subscribe('start', self.start)
cherrypy.engine.subscribe('stop', self.stop)
cherrypy.engine.start()
logging.goodnews("Web service started")
if self["blocking"]:
cherrypy.engine.block()
def __getitem__(self, key):
return self.settings[key]
def start(self):
self.is_running = True
def stop(self):
logging.warning("Web service stopped")
self.is_running = False
def shutdown(self):
cherrypy.engine.exit()
| {
"repo_name": "martastain/cherryadmin",
"path": "cherryadmin/__init__.py",
"copies": "1",
"size": "3696",
"license": "mit",
"hash": 607150420648549400,
"line_mean": 26.1764705882,
"line_max": 104,
"alpha_frac": 0.5446428571,
"autogenerated": false,
"ratio": 4.026143790849673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070786647949673,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Chunk']
import os
import string
import subprocess
from tempfile import NamedTemporaryFile
from miles import Configuration
class Chunk:
"""Chunk of a short trajectory.
We refer to a trajectory started at a certain milestone and
stopped at a neighboring milestone as a short trajectory. Short
trajectories are comprised of chunks, which are pieces of
trajectories obtained by running the external MD engine (and
stored in DCD and DVD files).
Attributes
----------
input_name : str
Input name to use in the MD engine. XXX Give examples.
output_name : str
Output name to use in the MD engine. XXX Give examples.
template : str
File containing the template file for the MD program.
configuration : Configuration
General configuration settings for the simulation.
random_seed : int
Seed for the random number generator used by the MD engine.
content : str
Evaluated template passed to the MD code.
command_line : List[str]
Evaluated template containing the command line to invoke the
MD code.
status : int
Exit status of the MD code.
stdin, stdout, stderr
Standard input, output, and error for the MD code.
"""
def __init__(self, configuration: Configuration, input_name: str,
output_name: str, template: str, random_seed: int) -> None:
self.input_name = input_name
self.output_name = output_name
self.simulation_dir = configuration.simulation_dir
self.template = os.path.join(self.simulation_dir, template)
self.configuration = configuration
self.random_seed = random_seed
self.content, self.command_line, self.status = None, None, None
self.stdin, self.stdout, self.stderr = None, None, None
def __repr__(self) -> str:
return ('{}({!r}, {!r}, {!r}, {!r}, {!r})'
.format(self.__class__.__name__,
self.configuration,
self.input_name,
self.output_name,
self.template,
self.random_seed))
def run(self) -> int:
"""Run command to obtain chunk of timesteps.
Returns
-------
status : int
Exit status of the MD command.
"""
def interpolate_md_template(keywords):
with open(self.template, 'r') as tf:
tmp = string.Template(tf.read())
return bytes(tmp.safe_substitute(keywords), 'utf-8')
def interpolate_command_line_template(keywords):
tmp = string.Template(self.configuration.command)
command_line = str(tmp.safe_substitute(keywords))
return [self.configuration.shell, '-c', command_line]
# We must use simulation_dir due to NAMD2.
with NamedTemporaryFile(dir=self.simulation_dir) as f:
keywords = {**self.__dict__,
**self.configuration.__dict__,
'simulation_file': f.name} # PEP 448
self.content = interpolate_md_template(keywords)
f.write(self.content)
f.flush()
self.command_line = interpolate_command_line_template(keywords)
proc = subprocess.Popen(self.command_line,
executable=self.configuration.shell,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.stdout, self.stderr = proc.communicate(self.stdin)
self.status = proc.returncode
return self.status
| {
"repo_name": "clsb/miles",
"path": "miles/chunk.py",
"copies": "1",
"size": "3758",
"license": "mit",
"hash": 2833687872560316000,
"line_mean": 33.4770642202,
"line_max": 76,
"alpha_frac": 0.5830228845,
"autogenerated": false,
"ratio": 4.650990099009901,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 109
} |
__all__ = ['ChunksBD']
import os
from typing import Optional
from miles import (BaseChunks, Configuration, delete_files, get_random_name)
class ChunksBD(BaseChunks):
"""Sequence of simple-brownian-dynamics trajectory chunks.
"""
def __init__(self, configuration: Configuration,
initial_file: str, reset_velocities: bool,
random_seed: Optional[int] = None) -> None:
super().__init__(configuration, initial_file,
reset_velocities, random_seed)
self.suffixes = ('',)
self.intermediate_files = [] # type: List[str]
def prepare(self):
assert self.prev_chunk is not None
out = self.prev_chunk.output_name
inp = get_random_name(self.configuration.temp_dir)
self.intermediate_files.append(inp)
with open(out, 'r') as src, open(inp, 'w') as dst:
lines = src.readlines()
last_line = lines[-1].strip()
fields = last_line.split()
dst.write('{} {}'.format(fields[1], fields[2]))
return inp
def cleanup(self):
delete_files(self.suffixes, self.prev_chunk.output_name)
def __del__(self):
for f in self.intermediate_files:
os.remove(f)
super().__del__()
| {
"repo_name": "clsb/miles",
"path": "miles/chunks_bd.py",
"copies": "1",
"size": "1290",
"license": "mit",
"hash": -7040483465384221000,
"line_mean": 28.3181818182,
"line_max": 76,
"alpha_frac": 0.5821705426,
"autogenerated": false,
"ratio": 3.897280966767372,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4979451509367372,
"avg_score": null,
"num_lines": null
} |
# __all__ = ['ChunksMOIL']
#
# from typing import Optional
#
# from miles import (BaseChunks, Configuration, DCDReader, DVDReader, delete_files, get_random_name, path_ext_join) # noqa: E501
#
#
# class ChunksMOIL(BaseChunks):
# """Sequence of MOIL trajectory chunks.
#
# """
# def __init__(self, configuration: Configuration,
# initial_file: str, reset_velocities: bool,
# random_seed: Optional[int] = None) -> None:
# super().__init__(configuration, initial_file,
# reset_velocities, random_seed)
#
# self.suffixes = ('dcd', 'dvd')
#
# def prepare(self):
# assert self.prev_chunk is not None
# out = self.prev_chunk.output_name
# dcd_file_name = path_ext_join(out, 'dcd')
# dvd_file_name = path_ext_join(out, 'dvd')
#
# inp = get_random_name(self.configuration.temp_dir)
# new_dcd_file_name = path_ext_join(inp, 'dcd')
# new_dvd_file_name = path_ext_join(inp, 'dvd')
#
# with DCDReader(dcd_file_name) as dcd, DVDReader(dvd_file_name) as dvd:
# dcd.save_last_frame_to(new_dcd_file_name)
# dvd.save_last_frame_to(new_dvd_file_name)
#
# return inp
#
# def cleanup(self):
# delete_files(self.suffixes, self.prev_chunk.output_name)
| {
"repo_name": "clsb/miles",
"path": "miles/chunks_moil.py",
"copies": "1",
"size": "1328",
"license": "mit",
"hash": 811007849493778200,
"line_mean": 34.8918918919,
"line_max": 129,
"alpha_frac": 0.5865963855,
"autogenerated": false,
"ratio": 2.9576837416481068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40442801271481066,
"avg_score": null,
"num_lines": null
} |
__all__ = ['ChunksNAMD']
import shutil
from typing import Optional
from miles import (BaseChunks, Configuration, get_random_name, delete_files) # noqa: E501
class ChunksNAMD(BaseChunks):
"""Sequence of NAMD trajectory chunks.
"""
suffixes = {'coor', 'vel', 'xsc'}
extra_suffixes = {'colvars.traj', 'colvars.state',
'colvars.state.old'}
def __init__(self, configuration: Configuration,
initial_file: str, reset_velocities: bool,
random_seed: Optional[int] = None) -> None:
super().__init__(configuration, initial_file,
reset_velocities, random_seed)
def prepare(self):
assert self.prev_chunk is not None
out = self.prev_chunk.output_name
inp = get_random_name(self.configuration.temp_dir)
steps = self.configuration.steps_per_chunk
for ext in self.suffixes:
old = '{}.{}.{}'.format(out, steps, ext)
new = '{}.{}'.format(inp, ext)
shutil.move(old, new)
return inp
def cleanup(self):
if self.num_chunk > 1:
inp = self.prev_chunk.input_name
delete_files(self.suffixes, inp)
out = self.prev_chunk.output_name
delete_files(self.suffixes.union(self.extra_suffixes), out)
steps = self.configuration.steps_per_chunk
for step in range(1, steps + 1):
file_name = '{}.{}'.format(out, step)
delete_files(self.suffixes, file_name)
| {
"repo_name": "clsb/miles",
"path": "miles/chunks_namd.py",
"copies": "1",
"size": "1521",
"license": "mit",
"hash": 6083378572946334000,
"line_mean": 30.0408163265,
"line_max": 90,
"alpha_frac": 0.5818540434,
"autogenerated": false,
"ratio": 3.81203007518797,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.989388411858797,
"avg_score": 0,
"num_lines": 49
} |
__all__ = ["ChyoaTree", "TreeCharset", "Tree", "STANDARD_TREE_CHARSET", "ASCII_TREE_CHARSET"]
import json
import os
class TreeCharset(object):
def __init__(self, trunk, intersection, branch, corner):
self.trunk = trunk
self.intersection = intersection
self.branch = branch
self.corner = corner
STANDARD_TREE_CHARSET = TreeCharset("│", "├", "─", "└")
ASCII_TREE_CHARSET = TreeCharset("|", "|", "-", "`")
class Tree(object):
def __init__(self, root, children):
# Children in form {child: {grandchildren}}
self.root = root
self.children = children
def display(self, charset=STANDARD_TREE_CHARSET):
print(self.root)
self.display_subtree(self.children, [], charset)
def display_subtree(self, children, level, charset):
child_list = list(children.keys())
child_list.sort()
for i in range(len(child_list)):
notlast = (i < len(child_list) - 1)
if notlast:
corner = charset.intersection
else:
corner = charset.corner
child = child_list[i]
print("%s%s%s %s" %
(self.get_indent(level, charset), corner, charset.branch, child))
if children[child]:
self.display_subtree(children[child], level + [notlast], charset)
@staticmethod
def get_indent(level, charset):
separator = []
for active in level:
if active:
separator.append("%s " % charset.trunk)
else:
separator.append(" ")
return "".join(separator)
class ChyoaTree(Tree):
def __init__(self, path):
prev_dir = os.getcwd()
os.chdir(path)
with open("meta.json", "r") as fh:
meta = json.load(fh)
name, tree = self.build_dict(meta["root"])
Tree.__init__(self, name, tree)
os.chdir(prev_dir)
@staticmethod
def build_dict(id):
tree = {}
with open("%d.json" % id, "r") as fh:
chapter = json.load(fh)
name = chapter["name"]
for child in chapter["choices"]:
child_name, grandchildren = ChyoaTree.build_dict(child)
tree[child_name] = grandchildren
return name, tree
| {
"repo_name": "sexypants/chyoa-scraper",
"path": "chyoa/tree.py",
"copies": "1",
"size": "2316",
"license": "mit",
"hash": 3660345426783317500,
"line_mean": 27.1463414634,
"line_max": 93,
"alpha_frac": 0.5485268631,
"autogenerated": false,
"ratio": 3.8986486486486487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9874342482813234,
"avg_score": 0.014566605787082847,
"num_lines": 82
} |
__all__ = ('City')
from building import Building
from pocketthrone.entities.unit import Unit
from pocketthrone.entities.event import *
from random import choice, seed
from pocketthrone.managers.pipe import L
from pocketthrone.managers.eventmanager import EventManager
# city wall and building positions (N = Wall, L = Street, O = Building)
# NNNNN
# NOLON
# NLOLN
# NOLON
# NNNNN
class City:
# engine properties
_id = -1
_map = None
_tag = "[City] "
#names
name = ""
name_de = ""
# resources
image_path = None
json_path = None
# flags
is_undestroyable = False
is_owned_by_nature = False
is_coastal = False
water_position = None
# changeable building vars
player_num = -1
city_size = 1
hp = -1
region = None
pos_x = -1
pos_y = -1
# buildings
buildings = []
# production
production = []
production_time = -1
# refactored production
recruition = None
construction = None
def __init__(self, owner_num=-1, name=None, pos=None, size=1, buildings=[], capital=False):
# set name & hp
self.hp = 30
self.player_num = owner_num
self.name = name
if name == None:
self.get_random_name()
self.size = size
if pos:
self.set_position(pos)
def assign_id(self, _id):
self._id = _id
# register as listener in EventManager
EventManager.register(self)
def get_random_name(self):
'''generates a random name for the city'''
# hard-coded pre- & postifxes for city names
prefixes = ["S", "Cap", "Fer", "Luc", "Fan", "A", "Grek", "Piac"]
postfixes = ["andria", "ua", "opolis", "acine", "ticum", "udurum", "ula", "epinum"]
# fraction city name generation
print(self._tag + "playernum=" + str(self.player_num))
fraction = L.PlayerManager.get_player_by_num(self.player_num).get_fraction()
if fraction:
city_name = fraction.get_random_city_name()
self.name = city_name
else:
city_name = choice(prefixes) + choice(postfixes)
self.name = city_name
def _get_absolute_position(self, rel_pos):
'''returns absolute position of a position relative to town center'''
abs_x = self.get_position()[0] + rel_pos[0]
abs_y = self.get_position()[1] + rel_pos[1]
return (abs_x, abs_y)
def _check_for_water(self):
'''check if the city is near a water tile and set is_coastal'''
positions = [(-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1)]
# for each tile besides the city center
for rel_pos in positions:
# calculate absolute position
abs_pos = self._get_absolute_position(rel_pos)
# check if tile landscape is water
landscape = L.MapManager.get_tile_at(abs_pos).get_landscape()
if landscape == "W":
# set is_coastal True
self.is_coastal = True
# set position as usable for harbor
self.water_position = (rel_pos)
if self.is_coastal:
self._build_harbor()
def _build_harbor(self):
'''builds an harbor building on a coastal position'''
self.add_building_at("harbor", self.water_position)
'''get the name of this city'''
def get_name(self):
return self.name
def set_name(self, name):
'''set the name of this city'''
self.name = name
def get_player_num(self):
'''get number of the city owner'''
return self.player_num
def get_player(self):
return L.PlayerManager.get_player_by_num(self.player_num)
# set city owner by player number
def set_player_num(self, player_num):
self.player_num = player_num
def capture(self, player_num):
'''capture city by player under number player_num'''
# stop production & set new player
self.stop_production()
self.set_player_num(player_num)
# fire CityCapturedEvent
ev_city_captured = CityCapturedEvent(self, player_num)
EventManager.fire(ev_city_captured)
def get_image_path(self):
'''get the image path for the town centre'''
if self.get_size() == 1:
return "bld_village"
elif self.get_size() == 2:
return "bld_city"
def set_position(self, (pos_x, pos_y)):
'''set the position of this city'''
self.pos_x = pos_x
self.pos_y = pos_y
def get_position(self):
'''return the position of this city'''
return (self.pos_x, self.pos_y)
def set_size(self, size_num):
'''set this cities size'''
self.size = size_num
self.hp = size_num * 15
def get_size(self):
'''returns this cities size'''
return self.size
def name_size(self):
'''return the name of the city type, depending on its size'''
# (Ruins < Village < City < Capital)
if self.city_size <= 0:
return "Ruins"
elif self.city_size == 1:
return "Village"
elif self.city_size == 2:
return "City"
else:
return "Capital"
def get_hp(self):
'''return the health of thsi city'''
return self.hp
def _recruition(self):
return L.CityManager.get_recruition_for(self._id)
def flag_source(self):
fraction = self.get_player().get_fraction()
if fraction:
fraction_name = fraction.basename
return "flag_" + fraction_name + "_big"
return "flag_none_big"
def recruit_unit(self, unit_blueprint):
'''start recruiting a new unit'''
# set production and production time
item_dur = [unit_blueprint, unit_blueprint.cost_turns]
recruition = self._recruition()
recruition.append(item_dur)
print(self._tag + "RECRUIT " + unit_blueprint.get_name() + " in " + self.get_name())
print(self._tag + "city recrutiton=" + repr(recruition))
def get_buildings(self):
'''returns a list of all buildings in this city'''
blds = []
for bld in self.buildings:
if bld.get_city() == self:
blds.append(bld)
return blds
def get_building_at(self, (pos_x, pos_y)):
'''returns the building at an absolute position when built'''
building_at = None
for building in self.get_buildings():
if building.get_position() == (pos_x, pos_y):
building_at = building
return building_at
def has_building(self, building_type):
'''returns True when this city has built a building of type building_type'''
for building in self.get_buildings():
if building.get_type() == building_type:
return True
return False
def add_building_at(self, building_type, (rel_x, rel_y), flatten_ground=False):
'''add a building at city position relative position'''
# calculate absulute building position
abs_pos = (self.pos_x + rel_x, self.pos_y + rel_y)
# make building obecjt
new_bld = Building(self, building_type)
new_bld.set_position(abs_pos)
new_bld.rel_x = rel_x
new_bld.rel_y = rel_y
# append to building list
self.buildings.append(new_bld)
# make dirt ground
if L.MapManager:
self._scrape_ground()
return new_bld
def add_building(self, building_type):
'''add a building at a random position inside the city'''
# get all free building positions
positions = [(-1,-1), (1,-1), (1,1), (-1,1)]
print(repr(self.get_buildings()))
for building in self.get_buildings():
taken_rel_pos = building.get_relative_position()
if taken_rel_pos in positions:
print("buildign at " + str(taken_rel_pos))
positions.remove(taken_rel_pos)
# select a random position out of them
seed()
rel_pos = choice(positions)
# build the building a this pos
self.add_building_at(building_type, rel_pos)
def add_city_wall(self):
'''build a wall around this city'''
# add side walls
positions = [(-2,-1), (-1,-2), (2,-1), (-1,2)]
side = 1
while side <= 4:
start_pos = positions[side -1]
self._add_city_wall_line(side, start_pos)
side += 1
# add edges of the city wall
self._add_city_wall_edges()
def _add_city_wall_line(self, side, (rel_x, rel_y), length=3):
'''add a city wall on one side of this town'''
# 1 = left; 2 = top; 3 = right; 4 = bottom
# set correct wall images
image_path = None
gate_image_path = "bld_wall_gate_hor"
vertically = True
if side == 1:
image_path = "bld_wall_left"
elif side == 2 or side == 4:
image_path = "bld_wall_hor"
vertically = False
elif side == 3:
image_path = "bld_wall_right"
# make a city wall line
i = 0
while i < length:
# get tile at wall position
wall = self.add_building_at("wall", (rel_x, rel_y))
# set correct image paths in image_override of the wall building
wall.image_override = image_path
if vertically:
rel_y += 1
else:
rel_x += 1
# make wall gates on top & bottom of an horizontal wall
if i == 1:
wall.image_override = gate_image_path
i += 1
def _add_city_wall_edges(self):
'''add the city wall edges to this town'''
# city wall: relative edge positions
edge_positions = [(-2,-2), (2,-2), (-2,2), (2,2)]
# city wall edges: image paths
edge_img_prefix = "bld_wall_edge_"
edge_img_postfix = ["lefttop", "righttop", "leftbottom", "rightbottom"]
# make city wall edges
i = 0
while i < 4:
# calculate absolute building position
wall_position = edge_positions[i]
# get full image path of the wall edge
edge_img_path = edge_img_prefix + edge_img_postfix[i]
# create wall building and set image_override
wall_edge = self.add_building_at("wall", wall_position)
wall_edge.image_override = edge_img_path
i += 1
def get_unit_in_production(self):
'''returns the unit thats actually in production in this city'''
recruition = self._recruition()
if recruition:
return recruition.get_item()
return None
def name_production(self):
'''returns name of unit in production or nothing as string'''
recruition = self._recruition()
if not recruition or not recruition.get_item() or not recruition.is_running():
return "nothing"
else:
return recruition.get_item().get_name()
def is_recruiting(self):
'''returns True, if a unit in this city is in production'''
if self._recruition() != None:
return self._recruition().is_running()
return False
def reduce_production_time(self):
'''reduce production time by 1 turn (called by city on NextTurnEvent)'''
recruition = self._recruition()
if recruition:
recruition.decrease()
def stop_production(self):
'''cancel city production (p.e. when its captured)'''
recruition = self._recruition()
if recruition:
recruition.abort()
def finish_recruition(self, unit):
# fire CityRecruitmentFinishedEvent
ev_production_finished = CityRecruitmentFinishedEvent(self, unit)
EventManager.fire(ev_production_finished)
# clear production unit and time
self.stop_production()
def has_requirement(self, req_name):
'''check if city has unit requirement'''
owner_num = self.get_player_num()
owner_fraction = L.PlayerManager.get_player_by_num(owner_num).get_fraction()
fraction_basename = owner_fraction.get_basename()
fraction_req = "is_" + fraction_basename
# is_<fraction>
if req_name == fraction_req:
return True
# is_coastal
if req_name == "is_coastal":
return self.is_coastal
# has_building
if req_name.startswith("has_"):
req_building = req_name.split("_")[1]
return self.has_building(req_building)
return False
def _scrape_ground(self):
GROUND = "G"
# scrape ground under city center
pos = self.get_position()
tile = L.MapManager.get_tile_at(pos)
if tile:
tile.set_landscape(GROUND)
# scrape ground under buildings
for bld in self.buildings:
if bld.get_type() != "harbor":
pos = bld.get_position()
tile = L.MapManager.get_tile_at(pos)
tile.set_landscape(GROUND)
def __repr__(self):
'''returns an xml-like string representation of this city'''
return "<City:" + str(self._id) + " player=" + str(self.player_num) + " name=" + self.name + " size=" + str(self.name_size()) + " pos=" + str(self.get_position()) + " hp=" + str(self.hp) + " water="+ str(self.is_coastal) + " recruits=" + self.name_production() + ">"
def on_event(self, event):
# check for water access
if isinstance(event, GameStartedEvent):
self._check_for_water()
self._scrape_ground()
if not self.name:
self.get_random_name()
# capture city when an enemy player is moving on this city
if isinstance(event, UnitMovedEvent):
unit_pos = event.unit.get_position()
attacker_unit = event.unit
# when an enemy unit moves into city center
if unit_pos == self.get_position() and attacker_unit.get_player_num() != self.get_player_num():
attacker_player_num = attacker_unit.get_player_num()
self.capture(attacker_player_num)
# reduce production time each turn
if isinstance(event, NextOneEvent):
if event.actual_player.num == self.get_player_num():
if self._recruition().is_running():
self._recruition().decrease()
print(self._tag + self.get_name() + " prod_id="+ str(self._recruition().parent_id) + " prod -1")
# finish production when neccessary
if isinstance(event, ProductionFinishedEvent):
if event.city._id == self._id:
if isinstance(event.item, Unit):
print(self._tag + event.city.get_name() + " FINISHED " + repr(event.item))
self.finish_recruition(event.item)
# get random city name when no one is defined
if isinstance(event, MapLoadedEvent):
if self.name == None:
self.get_random_name()
class Production(object):
def __init__(self, parent_id, ent=None):
self.items = []
self.duration = []
self.ent = ent
self.parent_id = parent_id
EventManager.register(self)
def append(self, item_dur):
item = item_dur[0]
dur = item_dur[1]
self.items.append(item)
self.duration.append(dur)
def get_item(self):
try:
return self.items[0]
except:
return None
def get_duration(self):
try:
return self.duration[0]
except:
return None
def is_running(self):
if len(self.items) > 0:
return True
return False
def decrease(self, num=1):
if self.is_running():
self.duration[0] -= num
if self.duration[0] < 1:
self._item_finished(self.items[0])
def abort(self):
self.items = []
self.duration = []
def next(self):
pass
def _item_finished(self, item):
parent = L.CityManager.get_city_by_id(self.parent_id)
ev_prod_finished = ProductionFinishedEvent(parent, item)
EventManager.fire(ev_prod_finished)
self.abort()
def on_event(self, event):
pass
def __repr__(self):
return "<Production city_id=" + str(self.parent_id) + " list=" + repr(self.items) + " durs=" + repr(self.duration) + ">"
| {
"repo_name": "herrschr/pocket-throne",
"path": "pocketthrone/entities/city.py",
"copies": "2",
"size": "14038",
"license": "bsd-2-clause",
"hash": 262325562302491460,
"line_mean": 27.9443298969,
"line_max": 270,
"alpha_frac": 0.6676877048,
"autogenerated": false,
"ratio": 2.890856672158155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9374062471416035,
"avg_score": 0.0368963811084241,
"num_lines": 485
} |
__all__ = ['classAddMethod', 'Category']
from _objc import selector, classAddMethods, objc_class
def classAddMethod(cls, name, method):
"""
Add a single method to a class. 'name' is the ObjC selector
"""
if isinstance(method, selector):
sel = selector(method.callable,
selector=name,
signature=method.signature,
isClassMethod=method.isClassMethod)
else:
sel = selector(method, selector=name)
return classAddMethods(cls, [sel])
#
# Syntactic support for categories
#
class _CategoryMeta(type):
"""
Meta class for categories.
"""
__slots__ = ()
_IGNORENAMES = ('__module__', '__name__')
def _newSubclass(cls, name, bases, methods):
return type.__new__(cls, name, bases, methods)
_newSubclass = classmethod(_newSubclass)
def __new__(cls, name, bases, methods):
if len(bases) != 1:
raise TypeError, "Cannot have multiple inheritance with Categories"
c = bases[0].real_class
if c.__name__ != name:
raise TypeError, "Category name must be same as class name"
m = [ x[1] for x in methods.iteritems() if x[0] not in cls._IGNORENAMES ]
classAddMethods(c, m)
return c
def Category(cls):
"""
Create a category on ``cls``.
Usage:
class SomeClass (Category(SomeClass)):
def method(self):
pass
``SomeClass`` is an existing class that will be rebound to the same
value. The side-effect of this class definition is that the methods
in the class definition will be added to the existing class.
"""
if not isinstance(cls, objc_class):
raise TypeError, "Category can only be used on Objective-C classes"
retval = _CategoryMeta._newSubclass('Category', (), dict(real_class=cls))
return retval
| {
"repo_name": "rays/ipodderx-core",
"path": "objc/_category.py",
"copies": "1",
"size": "1883",
"license": "mit",
"hash": 8560687958601338000,
"line_mean": 28.8888888889,
"line_max": 81,
"alpha_frac": 0.610196495,
"autogenerated": false,
"ratio": 4.075757575757576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004295972232800365,
"num_lines": 63
} |
__all__ = ['ClassContextManager']
class ClassContextManager(object):
class __metaclass__(type):
def __get__(self, instance, owner):
self.parent = instance
return self
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __enter__(self):
self.enter(*self.args, **self.kwargs)
def __exit__(self, exc_type, exc_val, exc_tb):
self.exit(*self.args, **self.kwargs)
def enter(self, *args, **kwargs):
pass
def exit(self, *args, **kwargs):
pass
if __name__ == "__main__":
class Git(object):
def do_something(self, new_branch):
print "Do Something with", new_branch
class with_branch(ClassContextManager):
def enter(self, branch_name):
print "Create", branch_name
self.parent.do_something(branch_name)
def exit(self, branch_name):
print "Delete", branch_name
git = Git()
with git.with_branch('origin/master'):
print "Do Work in Branch"
| {
"repo_name": "rocktavious/pyul",
"path": "pyul/coreUtils/classcontextmanager.py",
"copies": "1",
"size": "1089",
"license": "mit",
"hash": 4721371697524253000,
"line_mean": 24.3255813953,
"line_max": 53,
"alpha_frac": 0.5500459137,
"autogenerated": false,
"ratio": 3.9456521739130435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9988430645752578,
"avg_score": 0.0014534883720930232,
"num_lines": 43
} |
"""All classes in this module are read-only data classes and should not
have their values modified after being loaded from external files.
This module also contains various methods for retrieving data from these
classes.
"""
import pygame
from pygame.locals import Rect
def load_frame_durations(action):
"""Return a tuple containing tuples of ints, for the duration of
each frame within the specified Action.
Args:
action: The Action that will be read.
"""
frame_durations = []
for frame in action.frames:
frame_durations.append(frame.duration)
return tuple(frame_durations)
class CharacterData(object):
"""A data class that stores all of the required information for a
playable character.
The information is retrieved from XML files, which are created with
the Character Editor program and loaded via the CharacterLoader
class.
Attributes:
name The character's given name, as it will
appear in-game. Due to screen space, it
should not exceed 20 characters.
speed The speed, given in pixels/second, that the
character moves at while walking.
stamina The character's starting health. Taking
damage will reduce it based on the 'damage'
value of the colliding hitbox. Reducing it
to 0 means a K.O.
stun_threshold The amount of dizzy stun the character can
endure before becoming dizzy.
mugshot_path The filepath for the character's face
image, which will appear on the
Character Selection screen. All mugshots
are kept in the characters folder, so
'characters/' is omitted from this String.
actions A List of all of the Actions that the
character can perform.
default_actions A Dictionary containing the names for all
of the types of Actions that are universal
for every character, such as walking, and
the index numbers of the corresponding
Actions for this character.
e.g. walk => 0 (The first Action listed in
the character's XML file.)
stand => 2 (The third Action listed.)
"""
def __init__(self):
self.name = ""
self.speed = 0
self.stamina = 0
self.stun_threshold = 0
self.mugshot_path = ""
self.actions = []
self.default_actions = {}
class Action(object):
"""Represents a possible action that the character can perform.
This can be a basic utility, such as walking or jumping, or an
attack sequence.
Attributes:
name An identifier for the Action. This is
only displayed in the Character Editor.
(Note that the game engine identifies
Actions by the order they were listed
in the XML file.)
spritesheet_path The filepath for the animation's sprite
sheet. All sprite sheets are kept in
the characters folder, so 'characters/'
is omitted from this String.
frame_width The width of all frames in the sprite
sheet. All frames should have uniform
dimensions and be ordered from left-to-
right in order to display correctly.
frame_height The height of all frames in the sprite
sheet.
x_offset If this Action is the character's
neutral pose, this value will tell the
engine how far to shift the character
horizontally when the character changes
direction. This is to keep the
character's feet in the same position,
in case the sprite is unbalanced on one
side. (e.g. They're holding a sword
that extends far to the right.)
For any other Action, this value will
shift the character horizontally to
keep their feet in the same position
relative to the neutral pose.
condition The condition the character needs to be
in in order to execute this Action.
This can be Standing/Walking, Crouching,
or Jumping/In-the-Air.
is_multi_hit Normally, attacks will only hit once
and then ignore all Hitboxes in
subsequent Frames. If this is set to
True, all of the Frames in this Action
will be allowed to have one of their
Hitboxes land. (They are treated as
separate hits and thus count towards
the combo total.)
input_priority When the player inputs buttons, the
engine will search each Action's inputs
List to see if they match the last
sequence of buttons. Certain Actions
will have parts of their input
sequences that match parts of others.
(e.g. Light Punch: [light_punch]
Fireball: down => forward =>
[light_punch])
When this happens, the Action with the
highest input_priority will be the one
that executes.
meter_gain When this Action executes, this amount
is added to the player's Special Gauge.
The Gauge's maximum is 100.
meter_needed The amount of points in the player's
Special Gauge that are required to
perform this Action. When it is being
executed, this amount is deducted from
the Gauge.
proximity The Action will only be executed if the
opponent has at least one Hurtbox that
are this many pixels away, or less,
from the character's closest Hurtbox.
If this is 0, the Action can be
performed from any distance. Note that
Actions with a proximity limit have a
higher input priority than those that
don't.
start_counter_frame If this Action is a Counter-type move,
the animation will skip to the Frame of
this index value, if one of the
character's Hurtboxes is struck in one
of the previous Frames.
If the character is not hit before the
prior Frames are finished animating,
the subsequent Frames are ignored.
0 means that the Action isn't a
Counter attack.
frames A List of all animation Frames within
this Action.
input_list A List detailing the required button
sequence needed to perform this Action.
"""
def __init__(self):
self.name = ""
self.spritesheet_path = ""
self.x_offset = 0
self.condition = 0
self.is_multi_hit = False
self.input_priority = 1
self.meter_gain = 0
self.meter_needed = 0
self.proximity = 0
self.start_counter_frame = 0
self.frames = []
self.input_list = []
class InputStep(object):
"""To perform an Action, a specific sequence of buttons have to be
pressed in a certain order. This class represents a single step in
that sequence, containing all of the buttons required in that step.
Attributes:
inputs A List containing the names of buttons that must be
pressed during this step.
"""
def __init__(self):
self.inputs = []
class Frame(object):
"""An animation frame. Actions and Projectiles generally consist of
several of these objects. On sprite sheets, they are represented as
'cells' that have a uniform width and height and are ordered from
left-to-right.
Attributes:
duration The amount of time to display this Frame
before moving onto the next. This is
measured in the unit of 'frames' which,
in this game, means 1 frame/60 seconds.
For example, to have this Frame displayed
for a full second, this value should be 60.
cancelable If this is set to 1, the player can
interrupt this Frame to start another
Action.
If it is to 2, the player can interrupt it,
but only if one of the Hitboxes in this
Frame strikes the opponent.
(For most Default Actions, this value is
ignored.)
move_x The horizontal distance, in pixels, that
the character will move over the entire
duration of this Frame.
move_y The vertical distance to move the
character.
hurtboxes A List of all Hurtboxes contained in this
Frame.
hitboxes A List of all Hitboxes contained in this
Frame.
projectiles A List of all Projectiles that are created
from this Frame.
"""
def __init__(self):
self.duration = 0
self.cancelable = 0
self.move_x = 0
self.move_y = 0
self.hurtboxes = []
self.hitboxes = []
self.projectiles = []
class CollisionBox(object):
"""A rectangular area that is located somewhere on a Frame. This is
an abstract class that Hurtbox and Hitbox are derived from.
Attributes:
rect Contains the box's offset relative to the
character, as well as its dimensions.
"""
def __init__(self):
self.x_offset = 0
self.y_offset = 0
self.width = 0
self.height = 0
class Hurtbox(CollisionBox):
"""A rectangular space located on a Frame that represents the
character's vulnerable areas. If it collides with a Hitbox from the
opponent or a hazardous Projectile, the character will take damage.
Attributes:
rect Contains the Hurtbox's offset relative to the
character, as well as its dimensions.
"""
class Hitbox(CollisionBox):
"""A rectangular space on a Frame that represents the harmful areas
of a Character or Projectile. If a character's Hurtbox collides
with it, that character takes damage.
Attributes:
rect Contains the Hitbox's offset relative to
the character, as well as its dimensions.
damage The amount of stamina lost by the colliding
character. If unblocked, the character
loses the full amount. If blocked, they
lose a fraction of the value instead.
hitstun The amount of time, in frames, in which the
colliding character recoils and is unable
to act. This is only applied if the attack
was unblocked.
blockstun If the attack was blocked, the character
will be trapped in the blocking animation
for this number of frames.
(For balancing purposes, this value should
always be less than the one for hitstun.)
knockback The distance, in pixels, that the colliding
character will be pushed back on the
horizontal plane.
dizzy_stun The amount of Dizzy Stun the character
receives if the attack was unblocked.
effect Specifies an additional effect on the
colliding character if they don't block the
attack.
If it is set to 1, the character trips. If
it is set to 2, they will be launched into
the air.
can_block_high If this value is False, the Hitbox cannot
be blocked while standing or in the air.
can_block_low If this value is False, the Hitbox cannot
be blocked while crouching.
"""
def __init__(self):
super(Hitbox, self).__init__()
self.damage = 0
self.hitstun = 0
self.blockstun = 0
self.knockback = 0
self.dizzy_stun = 0
self.effect = 0
self.can_block_high = False
self.can_block_low = False
class ProjectileData(object):
"""A damaging weapon that can be thrown by the character. It can be
anything, from fireballs to bullets to stone bricks.
Projectiles are separate entities that travel across the stage
independent of the character who fired them. They have their own
animation that contains their own set of Frames and Hitboxes. Most
of this data is kept in separate Projectile XML files that are
referenced by the Character's main XML file.
Once they are generated from a character's Frame, they will begin
to act on their own based on this data.
Attributes:
name An identifier for the Projectile. Only
displayed within the Character Editor.
rect Contains the Projectile's initial
offset relative to the character, as
well as its Frame dimensions.
x_speed The speed at which the Projectile
travels across the screen horizontally.
y_speed The projectile's vertical speed.
spritesheet_path The filepath for the Projectile's
sprite sheet. These are always kept in
the characters folder, so 'characters/'
is omitted from this String.
stamina When the Projectile collides with an
opposing Projectile, it will lose
stamina equal to the other Projectile's
stamina value.
If its stamina drops to 0, it is
cancelled out.
first_loop_frame Any Frames preceding the one with this
index will only play once as a start-up
animation. All of the Frames from this
one, up until the one preceding the
Frame marked by first_collision_frame,
will play in a loop as long as the
Projectile is on-screen and hasn't hit
anything yet.
first_collision_frame When the Projectile collides with the
opponent or loses all of its stamina,
the animation will skip to the Frame at
this index.
frames A List of the Frames that make up the
Projectile's animation. They can
contain several Hitboxes, but no
Hurtboxes.
"""
def __init__(self):
self.name = ""
self.rect = Rect(0, 0, 0, 0)
self.x_speed = 0
self.y_speed = 0
self.spritesheet_path = ""
self.stamina = 0
self.first_loop_frame = 0
self.first_collision_frame = 0
self.frames = []
| {
"repo_name": "MarquisLP/Sidewalk-Champion",
"path": "lib/custom_data/character_data.py",
"copies": "1",
"size": "17789",
"license": "unlicense",
"hash": 7082967290084277000,
"line_mean": 47.8708791209,
"line_max": 72,
"alpha_frac": 0.502726404,
"autogenerated": false,
"ratio": 5.663482967207895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005923763736263737,
"num_lines": 364
} |
__all__ = ['classification_report', 'find_best_thresholds', 'generate_pr_curves']
import logging
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_recall_fscore_support
from tabulate import tabulate
from uriutils import uri_open
logger = logging.getLogger(__name__)
def classification_report(Y_true, Y_proba, *, labels=None, target_names=None, thresholds=None, precision_thresholds=None, order='names'):
Y_true, Y_proba = _make_label_indicator(Y_true, Y_proba)
Y_true, Y_proba, target_names = _filter_labels(Y_true, Y_proba, labels=labels, target_names=target_names)
n_classes = Y_true.shape[1]
assert len(target_names) == n_classes
if isinstance(thresholds, float): thresholds = np.full(n_classes, thresholds)
if thresholds is not None and n_classes == 2: thresholds[1] = 1.0 - thresholds[0]
if thresholds is not None:
assert thresholds.shape[0] == n_classes
assert ((thresholds <= 1).all() and (thresholds >= 0.0).all())
#end if
if isinstance(precision_thresholds, float): precision_thresholds = np.full(n_classes, precision_thresholds)
if precision_thresholds is not None:
assert precision_thresholds.shape[0] == n_classes
assert ((precision_thresholds <= 1).all() and (precision_thresholds >= 0.0).all())
#end if
is_multiclass = all(np.isclose(Y_proba[i, :].sum(), 1.0) for i in range(Y_true.shape[0]))
if is_multiclass:
Y_predict = np.zeros(Y_proba.shape)
for i in range(Y_true.shape[0]):
j = np.argmax(Y_proba[i, :])
Y_predict[i, j] = 1
#end for
else:
Y_predict = (Y_proba >= 0.5)
#end if
table = []
support_total, ap_score_total = 0.0, 0.0
thresholds_best = np.zeros(n_classes)
thresholds_minprec = np.zeros(n_classes)
for i, name in enumerate(target_names):
# Results using 0.5 as threshold
# print(i, np.logical_and(Y_predict[:, i] == 1, Y_true[:, i] == 1).sum())
if name == 'positive':
print(Y_true[:, i].sum(), Y_predict[:, i].sum())
p, r, f1, _ = precision_recall_fscore_support(Y_true[:, i], Y_predict[:, i], average='binary') # Using default
support = Y_true[:, i].sum()
if support == 0: continue
ap_score = average_precision_score(Y_true[:, i], Y_proba[:, i])
row = [name, '{:d}'.format(int(support)), '{:.3f}'.format(ap_score), '{:.3f}/{:.3f}/{:.3f}'.format(p, r, f1)]
support_total += support
ap_score_total += ap_score
# Results using given thresholds
if thresholds is not None:
p, r, f1, _ = precision_recall_fscore_support(Y_true[:, i], Y_proba[:, i] >= thresholds[i], average='binary') # Using thresholds
row.append('{:.3f}: {:.3f}/{:.3f}/{:.3f}'.format(thresholds[i], p, r, f1))
# print(i, thresholds[i], (Y_proba[:, i] >= thresholds[i]).astype(int))
#end if
# Results using optimal threshold
if n_classes == 2 and i == 1:
thresholds_best[i] = 1.0 - thresholds_best[0]
p, r, f1, _ = precision_recall_fscore_support(Y_true[:, i], Y_proba[:, i] >= thresholds_best[i], average='binary')
row.append('{:.3f}: {:.3f}/{:.3f}/{:.3f}'.format(thresholds_best[i], p, r, f1))
else:
p, r, t = precision_recall_curve(Y_true[:, i], Y_proba[:, i])
f1 = np.nan_to_num((2 * p * r) / (p + r + 1e-8))
best_f1_i = np.argmax(f1)
thresholds_best[i] = t[best_f1_i]
# thresholds_best[i] = 0.5 if np.isclose(t[best_f1_i], 0.0) else t[best_f1_i]
row.append('{:.3f}: {:.3f}/{:.3f}/{:.3f}'.format(thresholds_best[i], p[best_f1_i], r[best_f1_i], f1[best_f1_i]))
# print(i, thresholds_best[i], (Y_proba[:, i] >= thresholds_best[i]).astype(int))
#end if
# Results using optimal threshold for precision > precision_threshold
if precision_thresholds is not None:
if n_classes == 2 and i == 1:
thresholds_minprec[i] = 1.0 - thresholds_minprec[0]
p, r, f1, _ = precision_recall_fscore_support(Y_true[:, i], Y_proba[:, i] >= thresholds_minprec[i], average='binary')
row.append('{:.3f}: {:.3f}/{:.3f}/{:.3f}'.format(thresholds_minprec[i], p, r, f1))
else:
try:
best_f1_i = max(filter(lambda k: p[k] >= precision_thresholds[i], range(p.shape[0])), key=lambda k: f1[k])
if best_f1_i == p.shape[0] - 1 or f1[best_f1_i] == 0.0: raise ValueError()
thresholds_minprec[i] = t[best_f1_i]
# thresholds_minprec[i] = 0.5 if np.isclose(t[best_f1_i], 0.0) else t[best_f1_i]
row.append('{:.3f}: {:.3f}/{:.3f}/{:.3f}'.format(thresholds_minprec[i], p[best_f1_i], r[best_f1_i], f1[best_f1_i]))
# print(i, precision_thresholds[i], (Y_proba[:, i] >= precision_thresholds[i]).astype(int))
except ValueError:
best_f1_i = np.argmax(f1)
logger.warning('Unable to find threshold for label "{}" where precision >= {}.'.format(target_names[i], precision_thresholds[i], t[best_f1_i]))
row.append('-')
#end try
#end if
table.append(row)
#end for
if order == 'support':
table.sort(key=lambda row: int(row[1]), reverse=True)
headers = ['Label', 'Support', 'AP', 'Natural']
if thresholds is not None: headers.append('File T')
headers.append('Best T')
if precision_thresholds is not None: headers.append('Min Prec T')
if n_classes > 1:
macro_averages = ['Macro average', '-', '{:.3f}'.format(average_precision_score(Y_true, Y_proba, average='macro')), '{:.3f}/{:.3f}/{:.3f}'.format(*precision_recall_fscore_support(Y_true, Y_predict, average='macro'))]
if thresholds is not None: macro_averages.append('{:.3f}/{:.3f}/{:.3f}'.format(*precision_recall_fscore_support(Y_true, Y_proba >= thresholds, average='macro')))
macro_averages.append('{:.3f}/{:.3f}/{:.3f}'.format(*precision_recall_fscore_support(Y_true, Y_proba >= thresholds_best, average='macro')))
if precision_thresholds is not None: macro_averages.append('{:.3f}/{:.3f}/{:.3f}'.format(*precision_recall_fscore_support(Y_true, Y_proba >= thresholds_minprec, average='macro')))
micro_averages = ['Micro average', '-', '{:.3f}'.format(average_precision_score(Y_true, Y_proba, average='micro')), '{:.3f}/{:.3f}/{:.3f}'.format(*precision_recall_fscore_support(Y_true, Y_predict, average='micro'))]
if thresholds is not None: micro_averages.append('{:.3f}/{:.3f}/{:.3f}'.format(*precision_recall_fscore_support(Y_true, Y_proba >= thresholds, average='micro')))
micro_averages.append('{:.3f}/{:.3f}/{:.3f}'.format(*precision_recall_fscore_support(Y_true, Y_proba >= thresholds_best, average='micro')))
if precision_thresholds is not None: micro_averages.append('{:.3f}/{:.3f}/{:.3f}'.format(*precision_recall_fscore_support(Y_true, Y_proba >= thresholds_minprec, average='micro')))
perfect_set = ['Perfect set', str(Y_true.shape[0]), '-', '{:.3f}'.format(accuracy_score(Y_true, Y_predict))]
if thresholds is not None: perfect_set.append('{:.3f}'.format(accuracy_score(Y_true, Y_proba >= thresholds)))
perfect_set.append('{:.3f}'.format(accuracy_score(Y_true, Y_proba >= thresholds_best)))
if precision_thresholds is not None: perfect_set.append('{:.3f}'.format(accuracy_score(Y_true, Y_proba >= thresholds_minprec)))
table += [perfect_set, macro_averages, micro_averages]
table.insert(-3, ['-' * max(len(row[i]) for row in table + [headers]) for i in range(len(macro_averages))])
#end if
return tabulate(table, headers=headers, tablefmt='psql') + '\n{} labels, {} instances, {} instance-labels'.format(n_classes, Y_true.shape[0], int(support_total))
#end def
def find_best_thresholds(Y_true, Y_proba, *, labels=None, target_names=None, precision_thresholds=None):
Y_true, Y_proba = _make_label_indicator(Y_true, Y_proba)
Y_true, Y_proba, target_names = _filter_labels(Y_true, Y_proba, labels=labels, target_names=target_names)
n_classes = Y_true.shape[1]
if precision_thresholds is not None and isinstance(precision_thresholds, float): precision_thresholds = np.full(n_classes, precision_thresholds)
assert Y_true.shape[0] == Y_proba.shape[0]
assert Y_true.shape[1] == Y_proba.shape[1]
assert len(target_names) == n_classes
thresholds = np.zeros(n_classes)
for i in range(n_classes):
if n_classes == 2 and i == 1:
thresholds[i] = 1.0 - thresholds[0]
break
#end if
p, r, t = precision_recall_curve(Y_true[:, i], Y_proba[:, i])
f1 = np.nan_to_num((2 * p * r) / (p + r + 1e-8))
if precision_thresholds is None: # use optimal threshold
best_f1_i = np.argmax(f1)
else: # use optimal threshold for precision > precision_threshold
try:
best_f1_i = max(filter(lambda k: p[k] >= precision_thresholds[i], range(p.shape[0])), key=lambda k: f1[k])
if best_f1_i == p.shape[0] - 1 or f1[best_f1_i] == 0.0: raise ValueError()
except ValueError:
best_f1_i = np.argmax(f1)
logger.warning('Unable to find threshold for label "{}" where precision >= {}. Defaulting to best threshold of {}.'.format(target_names[i], precision_thresholds[i], t[best_f1_i]))
#end try
#end if
thresholds[i] = t[best_f1_i]
#end for
return thresholds
#end def
def generate_pr_curves(Y_true, Y_proba, output_prefix, *, labels=None, target_names=None, thresholds=None, precision_thresholds=None):
Y_true, Y_proba = _make_label_indicator(Y_true, Y_proba)
Y_true, Y_proba, target_names = _filter_labels(Y_true, Y_proba, labels=labels, target_names=target_names)
n_classes = Y_true.shape[1]
if isinstance(thresholds, float): thresholds = np.full(n_classes, thresholds)
if thresholds is not None and n_classes == 2: thresholds[1] = 1.0 - thresholds[0]
assert thresholds is None or ((thresholds <= 1).all() and (thresholds >= 0.0).all())
if isinstance(precision_thresholds, float): precision_thresholds = np.full(n_classes, precision_thresholds)
assert precision_thresholds is None or ((precision_thresholds <= 1).all() and (precision_thresholds >= 0.0).all())
thresholds_best = np.zeros(n_classes)
thresholds_minprec = np.zeros(n_classes)
for i, name in enumerate(target_names):
if Y_true[:, i].sum() == 0 and Y_proba[:, i].sum() == 0: continue
precision, recall, thresholds_ = precision_recall_curve(Y_true[:, i], Y_proba[:, i])
f1 = np.nan_to_num((2 * precision * recall) / (precision + recall + 1e-8))
ap_score = average_precision_score(Y_true[:, i], Y_proba[:, i])
fig, ax = plt.subplots()
ax.plot(recall, precision, label='Precision-Recall (AP={:.3f})'.format(ap_score))
ax.set_ylabel('Precision')
ax.set_xlabel('Recall')
ax.set_ylim([0.0, 1.0])
ax.set_xlim([0.0, 1.0])
ax.set_title('Precision-Recall Curve for "{}"'.format(name))
if thresholds is not None: # Results using given thresholds
p, r, f1_score, _ = precision_recall_fscore_support(Y_true[:, i], Y_proba[:, i] >= thresholds[i], average='binary') # Using thresholds
ax.plot([r], [p], marker='x', label='File T={:.3f}; F1={:.3f}'.format(thresholds[i], f1_score))
#end if
if n_classes == 2 and i == 1: # Results using optimal threshold
thresholds_best[i] = 1.0 - thresholds_best[0]
p, r, f1_score, _ = precision_recall_fscore_support(Y_true[:, i], Y_proba[:, i] >= thresholds_best[i], average='binary')
else:
best_f1_i = np.argmax(f1)
p, r, f1_score, thresholds_best[i] = precision[best_f1_i], recall[best_f1_i], f1[best_f1_i], thresholds_[best_f1_i]
#end if
ax.plot([r], [p], marker='x', label='Best T={:.3f}; F1={:.3f}'.format(thresholds_best[i], f1_score))
if precision_thresholds is not None: # Results using optimal threshold for precision > precision_threshold
if n_classes == 2 and i == 1:
thresholds_minprec[i] = 1.0 - thresholds_minprec[0]
p, r, f1_score, _ = precision_recall_fscore_support(Y_true[:, i], Y_proba[:, i] >= thresholds_minprec[i], average='binary')
else:
try:
best_f1_i = max(filter(lambda k: precision[k] >= precision_thresholds[i], range(precision.shape[0])), key=lambda k: f1[k])
if best_f1_i == precision.shape[0] - 1 or f1[best_f1_i] == 0.0: raise ValueError()
thresholds_minprec[i] = thresholds_[best_f1_i]
except ValueError:
best_f1_i = np.argmax(f1)
logger.warning('Unable to find threshold for label "{}" where precision >= {}.'.format(target_names[i], precision_thresholds[i], thresholds_[best_f1_i]))
#end try
p, r, f1_score, thresholds_minprec[i] = precision[best_f1_i], recall[best_f1_i], f1[best_f1_i], thresholds_[best_f1_i]
#end if
ax.plot([r], [p], marker='x', label='Minprec T={:.3f}; F1={:.3f}'.format(thresholds_minprec[i], f1_score))
#end if
ax.legend()
plt.tight_layout()
with uri_open(os.path.join(output_prefix, _sanitize_name(name) + '.txt'), 'w') as f:
for p, r, t in zip(precision, recall, thresholds_):
f.write('{} {} {}\n'.format(p, r, t))
#end with
with uri_open(os.path.join(output_prefix, _sanitize_name(name) + '.pdf'), 'wb') as f:
fig.savefig(f, format='pdf')
logger.info('Precision-Recall curve for "{}" saved to <{}>.'.format(name, f.name))
#end with
#end for
#end def
def _sanitize_name(name):
return ''.join(c for c in name if c.isalnum() or c in ' ._-()+=&').rstrip()
def _make_label_indicator(Y_true, Y_proba):
assert Y_true.shape[0] == Y_proba.shape[0]
assert Y_proba.ndim == 2
assert Y_proba.shape[1] > 1
if Y_true.ndim == 1:
Y_true_new = np.zeros(Y_proba.shape)
Y_true_max = Y_true.max()
if Y_true.shape[1] == 2:
assert Y_true_max == 1
for i in range(Y_true.shape[0]):
Y_true_new[i, 0] = 1.0 - Y_true[i]
Y_true_new[i, 1] = Y_true[i]
#end for
else:
assert Y_true_max == Y_true.shape[1]
for i in range(Y_true.shape[0]):
Y_true_new[i, Y_true[i]] = 1
#end if
return Y_true_new, Y_proba
#end if
return Y_true, Y_proba
#end def
def _filter_labels(Y_true, Y_proba, labels=[], target_names=None):
if target_names is None: target_names = list(range(Y_true.shape[1]))
if labels:
Y_true, Y_proba = Y_true[:, labels], Y_proba[:, labels]
target_names = [target_names[j] for j in labels]
#end if
return Y_true, Y_proba, target_names
#end def
def main():
Y_true = np.zeros((10, 3))
Y_proba = np.random.rand(10, 3)
Y_true[:4] = 1
print(classification_report(Y_true, Y_proba, target_names=['ham', 'spam', 'bam'], precision_thresholds=0.75))
#end def
if __name__ == '__main__': main()
| {
"repo_name": "skylander86/ycml",
"path": "ycml/utils/analysis.py",
"copies": "1",
"size": "15721",
"license": "apache-2.0",
"hash": 905042473299407200,
"line_mean": 46.7841945289,
"line_max": 224,
"alpha_frac": 0.5892754914,
"autogenerated": false,
"ratio": 3.2215163934426227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4310791884842623,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Client']
import inspect
from types import MethodType
from utils import get_context
import methods
class Client (object):
def __init__ (self, remote):
self._remote = remote
@property
def _path (self):
return ''
@property
def _parent (self):
return None
def bind (method, name):
requires_context = True
if inspect.isfunction(method):
args = inspect.getargspec(method).args
requires_context = ('context' in args)
def api_method (self, context=None, *args, **kwargs):
if requires_context or context is not None:
context = get_context(context or kwargs, self)
return method(self, context, *args, **kwargs)
return method(self, *args, **kwargs)
bound_method = MethodType(api_method, None, Client)
setattr(Client, name, bound_method)
return bound_method
for name, module in methods.__dict__.items():
if name[0] != '_':
for name, method in module.__dict__.items():
if name[0] != '_':
bind(method, name)
| {
"repo_name": "mozilla/badgekit-api-python-client",
"path": "badgekitapiclient/client/__init__.py",
"copies": "1",
"size": "1082",
"license": "mpl-2.0",
"hash": 1205392558391357200,
"line_mean": 23.5909090909,
"line_max": 58,
"alpha_frac": 0.6025878004,
"autogenerated": false,
"ratio": 3.9779411764705883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004957815513730738,
"num_lines": 44
} |
__all__ = ['Client']
import urlparse
import requests
from optimizely_ee import error
from optimizely_ee import resource
BASE_URL = 'https://www.optimizelyapis.com/experiment/v1/'
class Client(object):
ALLOWED_REQUESTS = ['get', 'post', 'put', 'delete']
def __init__(self, api_key, api_base=BASE_URL):
# set API information
self.api_key = api_key
self.api_base = api_base
# instantiate resource generators for the relevant API resources
self.Projects = resource.ResourceGenerator(client=self, resource=resource.Project)
self.Experiments = resource.ResourceGenerator(client=self, resource=resource.Experiment)
self.Variations = resource.ResourceGenerator(client=self, resource=resource.Variation)
self.Goals = resource.ResourceGenerator(client=self, resource=resource.Goal)
self.Audiences = resource.ResourceGenerator(client=self, resource=resource.Audience)
self.Dimensions = resource.ResourceGenerator(client=self, resource=resource.Dimension)
self.Schedules = resource.ResourceGenerator(client=self, resource=resource.Schedule)
def request(self, method, url_parts, headers=None, data=None):
""" Method for making requests to the Optimizely API
"""
if method in self.ALLOWED_REQUESTS:
# add request token header
headers = headers or {}
# test if Oauth token
if ":" in self.api_key:
headers.update({'Token': self.api_key, 'User-Agent': 'optimizely-client-python/0.1.1'})
else:
headers.update({'Authorization': ' Bearer ' + self.api_key, 'User-Agent': 'optimizely-client-python/0.1.1'})
# make request and return parsed response
url = urlparse.urljoin(self.api_base, '/'.join([str(url_part) for url_part in url_parts]))
return self.parse_response(getattr(requests, method)(url, headers=headers, data=data))
else:
raise error.BadRequestError('%s is not a valid request type.' % method)
@staticmethod
def parse_response(resp):
""" Method to parse response from the Optimizely API and return results as JSON. Errors are thrown for various
errors that the API can throw.
"""
if resp.status_code in [200, 201, 202]:
return resp.json()
elif resp.status_code == 204:
return None
elif resp.status_code == 400:
raise error.BadRequestError(resp.text)
elif resp.status_code == 401:
raise error.UnauthorizedError(resp.text)
elif resp.status_code == 403:
raise error.ForbiddenError(resp.text)
elif resp.status_code == 404:
raise error.NotFoundError(resp.text)
elif resp.status_code == 429:
raise error.TooManyRequestsError(resp.text)
elif resp.status_code == 503:
raise error.ServiceUnavailableError(resp.text)
else:
raise error.OptimizelyError(resp.text)
| {
"repo_name": "experimentengine/optimizely-client-python",
"path": "optimizely_ee/client.py",
"copies": "1",
"size": "2793",
"license": "mit",
"hash": 4827919902081366000,
"line_mean": 39.4782608696,
"line_max": 116,
"alpha_frac": 0.7031865378,
"autogenerated": false,
"ratio": 3.7794316644113666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9924883274579239,
"avg_score": 0.01154698552642557,
"num_lines": 69
} |
__all__ = ['Client']
import urlparse
import requests
from optimizely import error
from optimizely import resource
BASE_URL = 'https://www.optimizelyapis.com/experiment/v1/'
class Client(object):
ALLOWED_REQUESTS = ['get', 'post', 'put', 'delete']
def __init__(self, api_key, api_base=BASE_URL):
# set API information
self.api_key = api_key
self.api_base = api_base
# instantiate resource generators for the relevant API resources
self.Projects = resource.ResourceGenerator(client=self, resource=resource.Project)
self.Experiments = resource.ResourceGenerator(client=self, resource=resource.Experiment)
self.Variations = resource.ResourceGenerator(client=self, resource=resource.Variation)
self.Goals = resource.ResourceGenerator(client=self, resource=resource.Goal)
self.Audiences = resource.ResourceGenerator(client=self, resource=resource.Audience)
self.Dimensions = resource.ResourceGenerator(client=self, resource=resource.Dimension)
self.Schedules = resource.ResourceGenerator(client=self, resource=resource.Schedule)
def request(self, method, url_parts, headers=None, data=None):
""" Method for making requests to the Optimizely API
"""
if method in self.ALLOWED_REQUESTS:
# add request token header
headers = headers or {}
# test if Oauth token
if ":" in self.api_key:
headers.update({'Token': self.api_key, 'User-Agent': 'optimizely-client-python/0.1.1'})
else:
headers.update({'Authorization': ' Bearer ' + self.api_key, 'User-Agent': 'optimizely-client-python/0.1.1'})
# make request and return parsed response
url = urlparse.urljoin(self.api_base, '/'.join([str(url_part) for url_part in url_parts]))
return self.parse_response(getattr(requests, method)(url, headers=headers, data=data))
else:
raise error.BadRequestError('%s is not a valid request type.' % method)
@staticmethod
def parse_response(resp):
""" Method to parse response from the Optimizely API and return results as JSON. Errors are thrown for various
errors that the API can throw.
"""
if resp.status_code in [200, 201, 202]:
return resp.json()
elif resp.status_code == 204:
return None
elif resp.status_code == 400:
raise error.BadRequestError(resp.text)
elif resp.status_code == 401:
raise error.UnauthorizedError(resp.text)
elif resp.status_code == 403:
raise error.ForbiddenError(resp.text)
elif resp.status_code == 404:
raise error.NotFoundError(resp.text)
elif resp.status_code == 429:
raise error.TooManyRequestsError(resp.text)
elif resp.status_code == 503:
raise error.ServiceUnavailableError(resp.text)
else:
raise error.OptimizelyError(resp.text)
| {
"repo_name": "wlowry88/optimizely-client-python",
"path": "optimizely/client.py",
"copies": "1",
"size": "3041",
"license": "mit",
"hash": -8293665903882834000,
"line_mean": 43.0724637681,
"line_max": 124,
"alpha_frac": 0.6445248274,
"autogenerated": false,
"ratio": 4.137414965986395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003047497621706693,
"num_lines": 69
} |
"""All client package classes."""
import os
import re
import shutil
from pathlib import Path
from cli.command_runner import CommandRunner
from cli.exceptions import CliError
def get_all_client_classes():
return [PythonPackage, GoPackage, CppPackage]
class ClientPackageMeta:
"""Decorators and the like for defining aspects of the ClientPackage."""
@staticmethod
def action(func):
"""Decorator to mark the method as an action that can be taken on this client. Only for instance methods."""
def wrapper(self, *args, **kwargs):
CommandRunner.print_divider(f"{func.__name__}ing the {self.name} client.")
func(self, *args, **kwargs)
return wrapper
class ClientPackage:
"""Representing a single client package to facilitate changing version number and publishing."""
name = "generic"
def __init__(
self, repo=None, source=None, version_regex=r"\d*\.\d*\.\d*[\w-]", command_runner=None,
):
""" The base class for client packages. Only things necessary for all clients should go here.
:param String version_regex: ex "\d*\.\d*\.\d*[\w-]" valid version regex
:param String name: the name of the client - also the name of the subfolder used to hold the client.
:param String oas_client_name: the OpenApi generator client name. ex: python-experimental
:param Boolean dry_run: If set to true, no commands are actually ran, but it will show which commands would be
ran.
"""
self.repo = (
Path(repo) if repo else CommandRunner.get_onshape_clients_path(Path.cwd())
)
name = self.name
self.root_path = self.repo / name
if not source:
self.source_path = self.get_default_source()
else:
self.source_path = Path(source) / name
self.version_regex = version_regex
self.oas_client_name = self.oas_client_name if self.oas_client_name else name
self.version_to_publish = None
self.command_runner = (
command_runner if command_runner else CommandRunner(cwd=self.root_path)
)
self.command_runner.cwd = self.root_path
def __getattr__(self, name):
def skip_func(*args, **kwargs):
print(f"'{name}' is not supported by client '{self.name}' - skipping")
return skip_func
@ClientPackageMeta.action
def generate(self):
"""Generate the client with default options. Per client options should be set in
./<CLIENT_FOLDER>/openapi_config.json"""
try:
self.run(
f"openapi-generator-cli generate -i ./openapi.json -g {self.oas_client_name} -o {self.source_path} "
f"-c {self.root_path / 'openapi_config.json'}",
cwd=self.root_path.parent,
)
except Exception:
raise CliError(
"Please install openapi-generator-cli by running $onshape-clients setup -tools openapi-generator-cli"
)
@ClientPackageMeta.action
def set_version(self, version="0.0.0"):
self.version_to_publish = version
def set_version_in_source(
self,
version="0.0.0",
file_path_to_version_identifier=None,
regex_for_version_number=None,
):
"""Set the version for clients that include the version number in their source files.
:param version: String The version to update to: ex "1.1.2"
:param file_path_to_version_identifier: Path ex "setup.py"
:param regex_for_version_number: String ex r'version=".*"'"""
if not all(
[version, file_path_to_version_identifier, regex_for_version_number]
):
raise CliError("Must specify all parameters.")
if not version or not re.match(self.version_regex, version):
raise CliError(
f"Version specified: {version} does not match regex {self.version_regex}."
)
f = file_path_to_version_identifier.open().read()
result = re.sub(regex_for_version_number, version, f)
file_path_to_version_identifier.open(mode="w").write(result)
def run(self, command, **kwargs):
"""Run a command in the shell for this client."""
return self.command_runner.run(command, **kwargs)
def get_default_source(self):
destination = Path.home() / 'onshape-clients-codegen' / self.name
return destination
class CppPackage(ClientPackage):
name = "cpp"
oas_client_name = "cpp-qt5"
class GoPackage(ClientPackage):
name = "go"
oas_client_name = "go-experimental"
@ClientPackageMeta.action
def generate(self):
"""Generate the client with default options. Per client options should be set in
./<CLIENT_FOLDER>/openapi_config.json
The code generation preliminary steps (some are manual (m-) at the moment):
-- Create/Clean an output folder
-- Init the git repository
-- Copy static portion from the source
-- Add and commit
-- Run openapi-generator command
-- m- Review all changed files (git ls-files -m) by running 'git add -p'. Don't forget to git checkout at the end
-- m- go build -v ./...
-- m- Run go mod tidy
----- The rest of git commands are in publish()
"""
try:
if self.command_runner.dry_run:
print(f"============= Would remove '{self.source_path}' and copy tree from '{self.root_path}'")
else:
if self.source_path.exists():
shutil.rmtree(self.source_path)
shutil.copytree(str(self.root_path), str(self.source_path), ignore=shutil.ignore_patterns('*.json'))
shutil.copyfile( self.root_path.parent / 'LICENSE', self.source_path / 'LICENSE')
self.command_runner.cwd = self.source_path
self.run("git init")
self.run("git add .")
self.run('git commit -m "Initial_commit"')
self.run(
f"openapi-generator-cli generate -i ./openapi.json -g {self.oas_client_name} -o {self.source_path / 'onshape'} --type-mappings DateTime=JSONTime "
f"-c {self.root_path / 'openapi_config.json'}",
cwd=self.root_path.parent,
)
except Exception:
raise CliError(
"Please install openapi-generator-cli by running $onshape-clients setup -tools openapi-generator-cli"
)
@ClientPackageMeta.action
def test(self, marker=None):
self.command_runner.cwd = self.root_path.parent
self.run('make go-test')
@ClientPackageMeta.action
def publish(self):
"""Copy the contents of the GO package to a new Github repo to get distributed to the broader GO community.
TODO Make sure we get a correct version: probably from openapi_config.json
"""
dot_git = self.source_path / ".git"
if not dot_git.exists():
CliError("Trying to publish incomplete repo ...")
self.command_runner.cwd = self.source_path
self.run("git add .")
self.run(f'git commit -m "v{self.version_to_publish}"')
self.run(f"git tag v{self.version_to_publish}")
self.run(f"git remote add origin https://{os.environ.get('GH_TOKEN')}@github.com/onshape-public/go-client.git", print_divider=False)
self.run("git push --set-upstream origin master -f --tags")
return
class PythonPackage(ClientPackage):
name = "python"
oas_client_name = "python-experimental"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.source_path = self.root_path
def set_version(
self, **kwargs,
):
return self.set_version_in_source(
file_path_to_version_identifier=self.root_path / "setup.py",
regex_for_version_number=r'(?<=version=")' + self.version_regex + r'(?=",)',
**kwargs,
)
@ClientPackageMeta.action
def publish(self):
setup = self.root_path / "setup.py"
dist = self.root_path / "dist"
dist.mkdir(exist_ok=True, parents=True)
self.run(f"python {setup} sdist bdist_wheel -d {str(dist)}", )
result = self.run(f"twine upload {str(dist)}/*", )
if result.returncode != 0:
raise CliError("Error uploading client to pypi.")
shutil.rmtree(str(dist))
@ClientPackageMeta.action
def test(self, marker=None):
result = self.run(f"pipenv run pytest {f'-m {marker}' if marker else ''} -n 8")
if result.returncode != 0:
raise CliError("Error testing client.")
@ClientPackageMeta.action
def install(self):
self.run(f"pipenv install {self.root_path} --dev")
@ClientPackageMeta.action
def lint(self, fix=False):
"""Lint files. If fix is given, write the files back."""
self.run(f"black {self.root_path}{' --check' if fix else ''}")
| {
"repo_name": "onshape-public/onshape-clients",
"path": "cli/cli/clientPackage.py",
"copies": "1",
"size": "9039",
"license": "mit",
"hash": -7571063695929092000,
"line_mean": 38.3,
"line_max": 162,
"alpha_frac": 0.6079212302,
"autogenerated": false,
"ratio": 3.8694349315068495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9968512746277518,
"avg_score": 0.0017686830858663991,
"num_lines": 230
} |
''' All CLI hooks are handled through here. '''
import click
from pyfiglet import Figlet
from lib import __version__
from lib.logger import init_logger
from lib.commands.deploy import Deploy
from lib.commands.invite import Invite
from lib.commands.register import Register
REGISTER_HELP_TEXT = 'Flag to indicate if a user is to be registered.'
DEPLOY_HELP_TEXT = 'Release type [qa, uat, dev].\
This affects the audience that receives notice of this release.\
Default value of "dev" is assumed'
INVITATION_HELP_TEXT = 'Role to register the user under [qa, uat, dev].\
This affects how they receive updates regarding releases.\
Default value of "dev" is assumed.'
INVALID_ROLE = 'Role {0} is not valid. Please use one of ["qa", "uat", "dev"] '
INVALID_DEPLOY_TYPE = 'Please use "uat", "qa" or "dev" as the deploy type'
INVALID_EMAIL = '"{0}" is not a valid email.'
NOCONFIRM_HELP = 'Don\'t ask for confirmation'
# Clear the screen.
click.clear()
# Show a ASCII art on the screen.
print(Figlet().renderText('HARBOR'))
# Initalize logger.
init_logger()
@click.version_option(__version__, message='%(version)s')
@click.group()
def cli():
''' CLI for the Harbor application. '''
pass
@click.command()
@click.option('--user', is_flag=True, help=REGISTER_HELP_TEXT)
def register(user):
''' Register your project/user on the server. '''
Register(user).execute()
@click.command()
@click.option('--deploy-type', help=DEPLOY_HELP_TEXT)
@click.option('--noconfirm', help=NOCONFIRM_HELP)
def deploy(deploy_type, noconfirm):
''' Deploy your project once it has been registered. '''
Deploy(deploy_type).execute()
@click.command()
@click.argument('email')
@click.option('--role', help=INVITATION_HELP_TEXT)
def invite(email, role):
''' Invite someone to the project. '''
Invite(email, role).execute()
cli.add_command(register)
cli.add_command(deploy)
cli.add_command(invite)
| {
"repo_name": "srishanbhattarai/Harbor-CLI",
"path": "packages/cli/lib/cli.py",
"copies": "1",
"size": "1938",
"license": "mpl-2.0",
"hash": -2728333397353033700,
"line_mean": 28.3636363636,
"line_max": 79,
"alpha_frac": 0.6991744066,
"autogenerated": false,
"ratio": 3.3763066202090593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4575481026809059,
"avg_score": null,
"num_lines": null
} |
__all__ = [ "Cmd" ]
import subprocess
import image
import tempfile
class Cmd(object):
"""
Generic wrapper for command line applications using medical images.
"""
def __init__( self, program, *args, **kwargs ):
self.program = program
self.arguments = list(args) # list
self.parameters = kwargs # dict
def _ImageToFile( self, img ):
(filehandle, tmp_file) = tempfile.mkstemp(suffix=".nii",dir=image.tmp_dir)
image.imwrite( tmp_file, img )
return tmp_file
def _to_list(self):
cmd_list = [ self.program ]
if self.arguments is not None:
for arg in self.arguments:
if isinstance( arg, image.Image):
arg = self._ImageToFile(arg)
cmd_list.append( arg )
if self.parameters is not None:
for name, value in self.parameters.items():
cmd_list.append( '-' + name )
if isinstance(value, str):
cmd_list.append(value)
else:
for arg in value:
if isinstance( arg, image.Image):
arg = self._ImageToFile(arg)
cmd_list.append( arg )
return cmd_list
def __str__( self ):
return ' '.join(self._to_list())
def __repr__( self ):
return str(self)
def __call__( self, *args, **kwargs ):
if args is not None:
self.arguments.extend( args )
if kwargs is not None:
self.parameters.update( kwargs )
proc = subprocess.Popen( self._to_list() )
(out, err) = proc.communicate()
if out is not None:
print out
if err is not None:
print err
return
| {
"repo_name": "sk1712/IRTK",
"path": "wrapping/cython/irtk/cmd.py",
"copies": "5",
"size": "1801",
"license": "bsd-3-clause",
"hash": -192358063372776220,
"line_mean": 32.9811320755,
"line_max": 82,
"alpha_frac": 0.5130483065,
"autogenerated": false,
"ratio": 4.288095238095238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7301143544595238,
"avg_score": null,
"num_lines": null
} |
__all__ = ('CMD_STATUS', 'CMD_STATUS_NAME', 'CMD_BLOCKED', 'CMD_READY',
'CMD_ASSIGNED', 'CMD_RUNNING', 'CMD_FINISHING', 'CMD_DONE',
'CMD_ERROR', 'CMD_CANCELED', 'CMD_TIMEOUT',
'isFinalStatus', 'isRunningStatus')
CMD_STATUS = (CMD_BLOCKED,
CMD_READY,
CMD_ASSIGNED,
CMD_RUNNING,
CMD_FINISHING,
CMD_DONE,
CMD_TIMEOUT,
CMD_ERROR,
CMD_CANCELED) = range(9)
CMD_STATUS_NAME = ('BLOCKED',
'READY',
'ASSIGNED',
'RUNNING',
'FINISHING',
'DONE',
'TIMEOUT',
'ERROR',
'CANCELED')
CMD_STATUS_SHORT_NAMES = ("B", "I", "A", "R", "F", "D", "T", "E", "C")
def isFinalStatus(status):
return status in (CMD_DONE, CMD_ERROR, CMD_CANCELED, CMD_TIMEOUT)
def isRunningStatus(status):
return status in (CMD_RUNNING, CMD_FINISHING, CMD_ASSIGNED)
| {
"repo_name": "smaragden/OpenRenderManagement",
"path": "src/octopus/core/enums/command.py",
"copies": "2",
"size": "1065",
"license": "bsd-3-clause",
"hash": -4411505830250308000,
"line_mean": 29.3235294118,
"line_max": 71,
"alpha_frac": 0.4563380282,
"autogenerated": false,
"ratio": 3.5738255033557045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030163531555704,
"avg_score": null,
"num_lines": null
} |
__all__ = ['cochain','Cochain','d','star','delta','laplace_beltrami','laplace_derham']
from scipy import sparse
from pydec.mesh import Simplex
class cochain:
"""
Represents a cochain associated with a simplical complex
The v member of the cochain is left uninitialized. This allows functions like
d(.) and star(.) to operate on single cochains, or groups of cochains together.
The values associated with each cochain are stored in the columns of v. This is
especially useful when v is the identity, and thus represents a basis for all cochains.
Applying operations to this cochain basis allows one to automatically compose the
associated matrix operators.
Use the get_cochain() and get_cochain_basis() members of the SimplicialComplex class to
safely avoid issues with v.
"""
def __init__(self,complex,dimension,is_primal):
self.complex = complex
self.k = dimension
self.n = complex.complex_dimension()
self.is_primal = is_primal
self.v = None
def __add__(self,other):
assert(self.k == other.k and self.complex == other.complex)
f = cochain(self.complex,self.k,self.is_primal)
f.v = self.v + other.v
return f
def __sub__(self,other):
assert(self.k == other.k and self.complex == other.complex)
f = cochain(self.complex,self.k,self.is_primal)
f.v = self.v - other.v
return f
def __getitem__(self,key):
if isinstance(key,Simplex):
data = self.complex[len(key) - 1]
index = data.simplex_to_index[key]
value = self.v.__getitem__(index)
if key.parity == data.simplex_parity[index]:
return value
else:
return -value
else:
return self.v.__getitem__(key)
def __setitem__(self,key,value):
if isinstance(key,Simplex):
data = self.complex[len(key) - 1]
index = data.simplex_to_index[key]
if key.parity == data.simplex_parity[index]:
self.v.__setitem__(index,value)
else:
self.v.__setitem__(index,-value)
else:
self.v.__setitem__(key,value)
def __str__(self):
return 'cochain(k='+str(self.k) + ',n=' + str(self.n) + ',is_primal=' + str(self.is_primal) + '\n' + str(self.v) + ')'
def d(f):
"""
Implements the discrete exterior derivative d(.)
Accepts a cochain and returns the discrete d applied to the cochain
"""
if f.is_primal:
df = cochain(f.complex, f.k + 1, f.is_primal)
if f.k == -1:
df.v = sparse.csr_matrix((f.complex[0].num_simplices,1)) * f.v
elif f.k < -1 or f.k > f.n + 1:
df.v = sparse.csr_matrix((1,1)) * f.v
else:
df.v = f.complex[f.k].d * f.v
return df
else:
df = cochain(f.complex,f.k + 1,f.is_primal)
if f.k == -1:
df.v = sparse.csr_matrix((f.complex[f.n].num_simplices,1)) * f.v
elif f.k < -1 or f.k > f.n + 1:
df.v = sparse.csr_matrix((1,1)) * f.v
else:
df.v = f.complex[f.n - f.k].boundary * f.v
df.v *= (-1) ** f.k
return df
def star(f):
"""
Implements the discrete Hodge star *(.)
Accepts a cochain and returns the Hodge star applied to the cochain
"""
if f.k == -1 or f.k == f.n + 1:
starf = cochain(f.complex, f.n - f.k, not f.is_primal)
starf.v = f.v
return starf
elif f.is_primal:
starf = cochain(f.complex, f.n - f.k, not f.is_primal)
starf.v = f.complex[f.k].star * f.v
return starf
else:
starf = cochain(f.complex, f.n - f.k, not f.is_primal)
starf.v = f.complex[f.n - f.k].star_inv * f.v
return starf
def delta(f):
"""
Implements the discrete codifferental \delta(.)
Accepts a cochain and returns the codifferental of the cochain
"""
sdsf = star(d(star(f)))
sdsf.v *= (-1)**(f.n*(f.k-1)+1)
return sdsf
def laplace_derham(f):
"""
Implements the discrete Laplace-de Rham \del(.)
Accepts a cochain and returns the Laplace-de Rham of the cochain
"""
return d(delta(f)) + delta(d(f))
def laplace_beltrami(f):
"""
Implements the discrete Laplace-Beltrami \del(.) = \delta d
Accepts a cochain and returns the Laplace-Beltrami of the cochain
In the case of 0-forms, the second term of the Laplace-de Rham d(\delta(.)) is 0
so the Laplace-Beltrami and Laplace-de Rham will be the same.
"""
return delta(d(f))
#for backwards compatibility
Cochain = cochain
| {
"repo_name": "whereisravi/pydec",
"path": "pydec/dec/cochain.py",
"copies": "7",
"size": "4875",
"license": "bsd-3-clause",
"hash": 6284509607934738000,
"line_mean": 33.5744680851,
"line_max": 126,
"alpha_frac": 0.5532307692,
"autogenerated": false,
"ratio": 3.3482142857142856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7401445054914285,
"avg_score": null,
"num_lines": null
} |
import numpy as np
def arc_distance(lon0=None, lat0=None, lon1=None, lat1=None, R=1.,
input_coords='radians'):
"""
Gets the arc distance between (lon0, lat0) and (lon1, lat1).
Either pair can be a pair or an array. R is the radius of the
sphere. Uses the formula from the spherical law of cosines.
`input_coords` specifies whether the inputs are in radians (default)
or degrees.
Returns arc distance.
"""
if input_coords == 'degrees':
lon0, lat0 = np.radians(lon0), np.radians(lat0)
lon1, lat1 = np.radians(lon1), np.radians(lat1)
# spherical law of cosines
aa = np.arccos(np.sin(lat0) * np.sin(lat1)
+ np.cos(lat0) * np.cos(lat1)
* np.cos(lon1 - lon0) )
arc_distance = aa * R
return arc_distance
def azimuth(lon0=None, lat0=None, lon1=None, lat1=None,
input_coords='radians'):
"""
Returns the azimuth between (lon0, lat0) and (lon1, lat1). For plate
velocity calculations, (lon1, lat1) should be the pole while (lon0, lat0)
should be the site(s). Either pair can be an array.
Arguments:
lon0, lat0: Longitude and latitude of the site.
lon1, lat1: Longitude and latitude of the pole or of the second
set of points.
`input_coords` specifies whether the inputs are in radians (default)
or degrees.
"""
if input_coords == 'degrees':
lon0, lat0 = np.radians(lon0), np.radians(lat0)
lon1, lat1 = np.radians(lon1), np.radians(lat1)
aa = arc_distance(lat1=lat1, lon1=lon1, lat0=lat0, lon0=lon0)
C = np.arcsin(np.cos(lat1) * np.sin(lon1 - lon0) / np.sin(aa))
if np.isscalar(C):
if lat0 > lat1:
C = np.pi - C
else:
low_lats = (lat0 > lat1)
if np.any(low_lats):
C[low_lats] = np.pi - C
return C
def get_v(rotation_rate, aa, radius=6371000,
return_mm = False):
v = rotation_rate * radius * np.sin(aa)
return v*1e3 if return_mm==True else v
def get_beta(C):
return np.pi/2 + C
def get_ve_vn_from_v_beta(v, beta, return_mm = False):
if return_mm == True:
v *= 1e3
vn = v * np.cos(beta)
ve = v * np.sin(beta)
return ve, vn
def get_v_beta_from_euler(lat1=None, lat0=None, lon1=None,
lon0=None, rotation_rate=None):
aa = arc_distance(lat1=lat1, lat0=lat0, lon1=lon1, lon0=lon0)
C = azimuth(lat1=lat1, lon1=lon1, lon0=lon0, lat0=lat0)
v = get_v(rotation_rate, aa)
beta = get_beta(C)
return v, beta
def get_v_az(ve, vn):
return np.arctan2(vn, ve) + np.pi/2
def angle_difference(angle1, angle2, return_abs = False, units = 'radians'):
if units == 'degrees':
angle1 = np.radians(angle1)
angle2 = np.radians(angle2)
if np.isscalar(angle1) and np.isscalar(angle2):
diff = angle_difference_scalar(angle1, angle2)
else:
diff = angle_difference_vector(angle1, angle2)
if units == 'degrees':
angle1 = np.degrees(angle1)
angle2 = np.degrees(angle2)
return diff if return_abs == False else np.abs(diff)
def angle_difference_scalar(angle1, angle2):
difference = angle2 - angle1
while difference < - np.pi:
difference += 2 * np.pi
while difference > np.pi:
difference -= 2 * np.pi
return difference
def angle_difference_vector(angle1_vec, angle2_vec):
angle1_vec = np.array(angle1_vec)
angle2_vec = np.array(angle2_vec)
difference = angle2_vec - angle1_vec
difference[difference < -np.pi] += 2 * np.pi
difference[difference > np.pi] -= 2 * np.pi
return difference
def add_poles(lon_cb=0., lat_cb=0., rot_cb=0.,
lon_ba=0., lat_ba=0., rot_ba=0.,
input_units='degrees', output_units='degrees'):
'''
Calculates the Euler pole and rotation rate for plate C relative to
plate A based on the CB and BA rotation information.
'''
if input_units == 'degrees':
lon_cb = np.radians(lon_cb)
lat_cb = np.radians(lat_cb)
rot_cb = np.radians(rot_cb)
lon_ba = np.radians(lon_ba)
lat_ba = np.radians(lat_ba)
rot_ba = np.radians(rot_ba)
#TODO: put check for zero rotation rates here
if rot_cb == 0. and rot_ba != 0.:
lon_ca, lat_ca, rot_ca = lon_ba, lat_ba, rot_ba
elif rot_ba == 0. and rot_cb != 0.:
lon_ca, lat_ca, rot_ca = lon_cb, lat_cb, rot_cb
elif rot_ba == 0. and rot_cb == 0.:
# consider raising ValueError
lon_ca, lat_ca, rot_ca = 0., 0., 0.
else:
x_ca = (rot_cb * np.cos(lat_cb) * np.cos(lon_cb)
+rot_ba * np.cos(lat_ba) * np.cos(lon_ba))
y_ca = (rot_cb * np.cos(lat_cb) * np.sin(lon_cb)
+rot_ba * np.cos(lat_ba) * np.sin(lon_ba))
z_ca = rot_cb * np.sin(lat_cb) + rot_ba * np.sin(lat_ba)
rot_ca = np.sqrt(x_ca**2 + y_ca**2 + z_ca**2)
lat_ca = np.arcsin(z_ca / rot_ca)
lon_ca = np.arctan2( y_ca , x_ca )
if output_units == 'degrees':
return np.degrees((lon_ca, lat_ca, rot_ca))
else:
return lon_ca, lat_ca, rot_ca
| {
"repo_name": "cossatot/partial_plates",
"path": "partial_plates/eulers.py",
"copies": "1",
"size": "5319",
"license": "mit",
"hash": -7673525483100078000,
"line_mean": 27.1428571429,
"line_max": 77,
"alpha_frac": 0.5775521715,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40775521714999996,
"avg_score": null,
"num_lines": null
} |
""" All code in this project follows the tutorials provided in the Simple RL series, part of which is found here:
https://medium.com/@awjuliani/super-simple-reinforcement-learning-tutorial-part-2-ded33892c724#.f0442wu0m """
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer as xi
# Import the OpenAi Gym environment
import gym
env = gym.make('CartPole-v0')
"""
# Trying out random episodes
env.reset()
random_episodes = 0
reward_sum = 0
# Execute episodes of random bahaviour
while random_episodes < 10:
env.render()
observation, reward, done, _ = env.step(np.random.randint(0, 2))
reward_sum += reward
# Check if episode has terminated
if done:
random_episodes += 1
print('Reward for this episode was %.4f' % reward_sum)
# Reset environment and tracking variables
reward_sum = 0
env.reset()
"""
# Declare hyperparameters for the agent network and helper functions
HIDDEN_SIZE = 8
LR = 1e-2
GAMMA = 0.99
INPUT_DIMS = 4
NUM_ACTIONS = 2
# Define training parameters
total_episodes = 5000
max_steps = 999
update_frequency = 5
# Define helper function to calculate discounted rewards
def discount_rewards(r):
""" Calculates discounted on-policy rewards. """
discounted_r = np.zeros_like(r)
running_add = 0
# Weights closer to end seen as negative, the ones further away from task failure as positive
for t in reversed(range(0, r.size)):
running_add = running_add * GAMMA + r[t]
discounted_r[t] = running_add
return discounted_r
# Define agent class
class PolicyGradAgent(object):
""" Simple RL agent trained on policy. """
def __init__(self, input_dims, hidden_size, num_actions, learning_rate):
# Initialize args
self.input_dims = input_dims
self.hidden_size = hidden_size
self.num_actions = num_actions
self.learning_rate = learning_rate
# Placeholder for observations, actions, rewards
self.state = tf.placeholder(shape=[None, self.input_dims], dtype=tf.float32, name='state')
self.action_holder = tf.placeholder(shape=[None], dtype=tf.int32, name='actions')
self.reward_holder = tf.placeholder(shape=[None], dtype=tf.float32, name='rewards')
# Define layers etc
with tf.variable_scope('layer_1'):
w_1 = tf.get_variable(shape=[self.input_dims, self.hidden_size], dtype=tf.float32, initializer=xi(),
name='weight_1')
o_1 = tf.nn.relu(tf.matmul(self.state, w_1), name='out_1')
with tf.variable_scope('layer_2'):
w_2 = tf.get_variable(shape=[self.hidden_size, self.num_actions], dtype=tf.float32, initializer=xi(),
name='weight_2')
self.probabilities = tf.nn.softmax(tf.matmul(o_1, w_2), name='probabilities')
# Loss computation
with tf.variable_scope('loss'):
indices = tf.range(0, tf.shape(self.probabilities)[0]) * tf.shape(self.probabilities)[1] + \
self.action_holder
responsible_outputs = tf.gather(tf.reshape(self.probabilities, [-1]), indices)
self.loss = - tf.reduce_mean(tf.multiply(tf.log(responsible_outputs), self.reward_holder))
self.t_vars = tf.trainable_variables()
# Instantiate gradient holders
self.gradient_holders = list()
for _idx, var in enumerate(self.t_vars):
placeholder = tf.placeholder(dtype=tf.float32, name=str(_idx) + '_holder')
self.gradient_holders.append(placeholder)
# Obtain gradients
self.gradients = tf.gradients(self.loss, self.t_vars)
# Optimize
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.update_batch = optimizer.apply_gradients(zip(self.gradient_holders, self.t_vars))
# Initialize the agent
agent = PolicyGradAgent(INPUT_DIMS, HIDDEN_SIZE, NUM_ACTIONS, LR)
# Run the session
with tf.Session() as sess:
# Initialize graph variables
sess.run(tf.global_variables_initializer())
# Tracking variables
e = 0
total_reward = list()
# Initialize gradient buffer
grad_buffer = sess.run(tf.trainable_variables())
for idx, grad in enumerate(grad_buffer):
grad_buffer[idx] = grad * 0
# Initiate training loop
while e < total_episodes:
if total_reward[-1] > 180:
env.render()
curr_state = env.reset()
solved = False
running_reward = 0
episode_history = list()
for j in range(max_steps):
# Choose action
curr_state = np.reshape(curr_state, [1, INPUT_DIMS])
action_distribution = sess.run(agent.probabilities, feed_dict={agent.state: curr_state})
# Add some randomness to the choice for exploration
action_value = np.random.choice(action_distribution[0], p=action_distribution[0])
match = np.square(action_distribution - action_value)
action = np.argmin(match)
# Perform step and memorize transition
next_state, reward, done, info = env.step(action)
episode_history.append([curr_state, action, reward, next_state])
curr_state = next_state
running_reward += reward
# Check if episode has completed, then update network
if done:
episode_history = np.array(episode_history)
# Discount rewards
episode_history[:, 2] = discount_rewards(episode_history[:, 2])
feed_dict = {agent.reward_holder: episode_history[:, 2],
agent.action_holder: episode_history[:, 1],
agent.state: np.vstack(episode_history[:, 0])}
# Get gradient for current episode
gradients = sess.run(agent.gradients, feed_dict=feed_dict)
# Append to grad buffer
for idx, grad in enumerate(gradients):
grad_buffer[idx] = grad_buffer[idx] + grad
if e % update_frequency == 0 and e != 0:
feed_dict = dict(zip(agent.gradient_holders, grad_buffer))
_ = sess.run(agent.update_batch, feed_dict=feed_dict)
for idx, grad in enumerate(grad_buffer):
grad_buffer[idx] = grad * 0
# Report some results
if e % (update_frequency * 10) and e != 0:
avg_r = np.mean(total_reward[-update_frequency * 10:])
print('Average episode reward since last check: %.4f' % avg_r)
if avg_r >= 200:
print('Solution found in %d steps' % len(total_reward))
solved = True
total_reward.append(running_reward)
# Terminate current episode loop following completion or after max_steps
break
e += 1
if solved:
print('Total episodes played: %d' % e)
break
| {
"repo_name": "demelin/learning_reinforcement_learning",
"path": "policy_gradient_agent.py",
"copies": "1",
"size": "7149",
"license": "mit",
"hash": 5935696233843110000,
"line_mean": 38.938547486,
"line_max": 113,
"alpha_frac": 0.6034410407,
"autogenerated": false,
"ratio": 3.9453642384105962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010969938531469937,
"num_lines": 179
} |
# Copyright (c) 2017 Giorgio Gonnella and CONTRIBUTORS
# Copyright (c) 2017 Center for Bioinformatics, University of Hamburg
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import argparse
import sys
import random
op = argparse.ArgumentParser(description=__doc__)
op.add_argument("--segments", "-s", type=int,
help="number of segments", required=True)
op.add_argument("--slen", "-l", type=int, default=100,
help="lenght of segments sequence")
op.add_argument("--with-sequence", "-w", action="store_true")
op.add_argument("--dovetails-per-segment", "-d",
help="average number of dovetail edges per segment",
default=2.0, type=float)
op.add_argument('--gfa-version', "-g", default="gfa1",
help="gfa version", choices=("gfa1", "gfa2"))
op.add_argument('--version', action='version', version='%(prog)s 1.0')
opts = op.parse_args()
if opts.segments < 0:
sys.stderr.write("Error: the number of segments must be "+
">= 0 ({})\n".format(opts.segments))
exit(1)
if opts.dovetails_per_segment < 0:
sys.stderr.write("Error: the average number of dovetails per segment must "+
"be >= 0 ({})\n".format(opts.dovetails_per_segment))
exit(1)
if opts.slen <= 0:
sys.stderr.write("Error: the length of segments sequence must be > 0"+
" ({})\n".format(opts.slen))
exit(1)
if opts.gfa_version == "gfa1":
print("H\tVN:Z:1.0")
else:
print("H\tVN:Z:2.0")
def random_sequence(slen):
sequence = []
for i in range(slen):
sequence.append(random.choice('ACGT'))
return "".join(sequence)
for i in range(opts.segments):
if opts.with_sequence:
sequence = random_sequence(opts.slen)
else:
sequence = "*"
if opts.gfa_version == "gfa1":
print("S\ts{}\t{}\tLN:i:{}".format(i, sequence, opts.slen))
else:
print("S\ts{}\t{}\t{}".format(i, opts.slen, sequence))
n_dovetails = int(opts.segments * opts.dovetails_per_segment)
edges = {}
for i in range(n_dovetails):
edge = False
while not edge:
s_from = random.randint(0, opts.segments-1)
s_from_or = random.choice('+-')
s_to = random.randint(0, opts.segments-1)
s_to_or = random.choice('+-')
if s_from not in edges:
edges[s_from] = {'+': {}, '-': {}}
if s_to not in edges[s_from][s_from_or]:
edges[s_from][s_from_or][s_to] = {'+': False, '-': False}
if not edges[s_from][s_from_or][s_to][s_to_or]:
edges[s_from][s_from_or][s_to][s_to_or] = True
edge = True
ovlen = opts.slen//10
if ovlen == 0: ovlen = 1
cigar = "{}M".format(ovlen)
if opts.gfa_version == "gfa1":
print("L\ts{}\t{}\ts{}\t{}\t{}\tID:Z:e{}".format(s_from, s_from_or, s_to,
s_to_or, cigar, i))
else:
s_from_begin = opts.slen - ovlen if s_from_or == "+" else 0
s_from_end = "{}$".format(opts.slen) if s_from_or == "+" else ovlen
s_to_begin = opts.slen - ovlen if s_to_or == "-" else 0
s_to_end = "{}$".format(opts.slen) if s_to_or == "-" else ovlen
print("E\te{}\ts{}{}\ts{}{}\t{}\t{}\t{}\t{}\t{}".format(
i, s_from, s_from_or, s_to, s_to_or, s_from_begin, s_from_end,
s_to_begin, s_to_end, cigar))
| {
"repo_name": "AlgoLab/pygfa",
"path": "benchmark/randomgraph.py",
"copies": "1",
"size": "4313",
"license": "mit",
"hash": 7196575962166185000,
"line_mean": 41.2843137255,
"line_max": 81,
"alpha_frac": 0.6116392302,
"autogenerated": false,
"ratio": 3.2066914498141266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9306760531998548,
"avg_score": 0.0023140296031158012,
"num_lines": 102
} |
# All code provided from the http://gengo.com site, such as API example code
# and libraries, is provided under the New BSD license unless otherwise
# noted. Details are below.
#
# New BSD License
# Copyright (c) 2009-2012, myGengo, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of myGengo, Inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
from distutils.core import Command
from setuptools import setup
from setuptools import find_packages
from subprocess import call
__version__ = '1.3.4'
# Command based on Libcloud setup.py:
# https://github.com/apache/libcloud/blob/trunk/setup.py
class Pep8Command(Command):
description = "Run pep8 script"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import pep8
pep8
except ImportError:
print ('Missing "pep8" library. You can install it using pip: '
'pip install pep8')
sys.exit(1)
cwd = os.getcwd()
retcode = call(('pep8 %s/gengo/' % (cwd)).split(' '))
sys.exit(retcode)
setup(
# Basic package information.
name='gengo',
version=__version__,
packages=find_packages(),
# Packaging options.
include_package_data=True,
# Package dependencies.
install_requires=['requests'],
# Metadata for PyPI.
author='Gengo',
author_email='api@gengo.com',
license='LGPL License',
url='https://github.com/myGengo/mygengo-python',
keywords='gengo translation language api',
description='Official Python library for interfacing with the Gengo API.',
long_description=open('README.md').read(),
cmdclass={
'pep8': Pep8Command,
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD 3-Clause License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet'
]
)
| {
"repo_name": "shawnps/mygengo-python",
"path": "setup.py",
"copies": "2",
"size": "3416",
"license": "bsd-3-clause",
"hash": -7008599280766652000,
"line_mean": 33.16,
"line_max": 78,
"alpha_frac": 0.7019906323,
"autogenerated": false,
"ratio": 4.3076923076923075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6009682939992308,
"avg_score": null,
"num_lines": null
} |
#All code you gonna code in the terminal or W.ide
"""NUMBERS
The first type is integer, Example:
1, 5, 9, 10
type(1)
type(5)
The second type is float, Example:
3.14, 5.13e-2, 62.07, 1.0
type(1.0)
type(3.14)
type(5.13e-2)
The third type is complex, Example:
2 + 1j, 4.18 + 5j, 9 + 2.17j
type(2 + 3j)
type(4.18 + 5j)
Strings
The text o character need to be in ' '
Example: 'Hello World'
type('Hello World')
Booleans
This are the text True and False
Example:
type(True)
type(False)
Warning: False is a boolean but false is not the same with True and true
Lists
This are arranges with this syntax name_of_list = []
Example:
new_list = [1, 3.14, True, "Hello World"]
type(new_list)
Tuples
Similar like list but the syntax instead of [] are ()
Example:
new_tuple = (2, 5.83, False, "Bye World")
type(new_tuple)
Differences between lists and tuples:
Tuples are fixed size in nature whereas lists are dynamic.
In other words, a tuple is immutable whereas a list is mutable.
You can't add elements to a tuple. Tuples have no append or extend method as lists.
You can't remove elements from a tuple. Tuples have no remove or pop method as lists.
You can find elements in a tuple, since this doesn’t change the tuple.
Tuples are faster than lists. If you're defining a constant set of values and all you're ever going to do with it is iterate through it, use a tuple instead of a list.
It makes your code safer if you “write-protect” data that does not need to be changed. Using a tuple instead of a list is like having an implied assert statement that this data is constant, and that special thought (and a specific function) is required to override that.
Some tuples can be used as dictionary keys (specifically, tuples that contain immutable values like strings, numbers, and other tuples). Lists can never be used as dictionary keys, because lists are not immutable.
There are some interesting articles on this issue, e.g. "Python Tuples are Not Just Constant Lists" or "Understanding tuples vs. lists in Python". The official Python documentation also mentions this ("Tuples are immutable, and usually contain an heterogeneous sequence ...").
Diccionaries
The diccionaries are similar like the tuples and list but this needs a key for the value as 'dictionary = {key: value}
Example:
new_ dict = {1: 1.34, 2: 'Hello World, 3: 9.28, 4: True}
type(new_dict)
"""
| {
"repo_name": "maumg1196/PythonRandomExercices",
"path": "TypesofData.py",
"copies": "1",
"size": "2380",
"license": "mit",
"hash": 1324938788284602600,
"line_mean": 32.9142857143,
"line_max": 276,
"alpha_frac": 0.7459983151,
"autogenerated": false,
"ratio": 3.4011461318051577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46471444469051576,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Collection', 'CollectionError', 'CollectionUnexpectedError']
from utils import errorutils
from model import Model
from backends.base import BaseBackend
class CollectionError(errorutils.UnexpectedError):
pass
class CollectionUnexpectedError(errorutils.UnexpectedError):
pass
class Singleton(type):
def __init__(cls, name, bases, dict):
super(Singleton, cls).__init__(name, bases, dict)
cls.instance = None
def __call__(cls,*args,**kw):
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kw)
return cls.instance
class Collection(object):
'''
Abstract base collection class. Subclasses should implement syncronizing the
collection with a database.
'''
__metaclass__ = Singleton
modelClass = Model
classField = 'class'
backend = None
def setBackend(self, backend=None, **kwargs):
kwargs.update(colName=self.__class__.__name__)
backend = BaseBackend() if backend == None else backend(**kwargs)
self.backend = backend
def toModel(self, modelOrId):
'''
If passed an id, converts to the model for that id. If passed a model
just returns the model. Primarily used internally.
'''
if isinstance(modelOrId, Model):
return modelOrId
else:
return self[modelOrId]
def toId(self, modelOrId):
'''
If passed a model, returns its id otherwise returns the id.
Primarily used internally.
'''
if isinstance(modelOrId, Model):
return modelOrId['id']
else:
return modelOrId
def makeId(self, model):
'''
Generates a new unique id for this collection.
'''
while True:
newid = self._do_makeId(model)
try:
exists = self[newid]
except KeyError:
return newid
if not exists:
return newid
def createClass(self, cls, *args, **kwargs):
'''
Creates a new Model of the given class and adds it
to the collection. Calls the Model's __init__ method.
'''
newModel = cls(*args, **kwargs)
self.add(newModel)
return newModel
def create(self, *args, **kwargs):
'''
Creates a new Model of this collection's modelClass and adds it
to the collection. Calls the Model's __init__ method.
'''
return self.createClass(self.modelClass, *args, **kwargs)
def getClass(self, data):
'''
returns a model's class by selecting it from the list of modelClasses
'''
if not hasattr(self, 'modelClasses') or not self.modelClasses:
return self.modelClass
else:
classnames = map(lambda x: (x.__name__, x), self.modelClasses)
classname = data.get(self.classField)
try:
return dict(classnames)[classname]
except KeyError:
return classnames[0][1]
def get(self, modelId, default=None):
'''
Gets a model stored in this collection by modelId
'''
try:
return self[modelId]
except KeyError:
return default
def add(self, model):
'''
Adds a model to this collection. Note: this leaves it up to the
subclass to assign an Id.
'''
# check to make sure the id is unique
try:
if model.id in self:
raise CollectionError("id %s already exists in collection" %\
model.id)
except AttributeError:
pass
model._collection = self
return self._do_add(model)
def __contains__(self, modelOrId):
modelId = self.toId(modelOrId)
try:
return self[modelId]
except KeyError:
return False
def _modelFromData(self, data):
modelclass = self.getClass(data)
model = modelclass.__new__(modelclass)
model.unpack(**data)
model._collection = self
return model
def __getitem__(self, modelId):
'''
obviously this is quite inefficient. Later I should implement a simple
cache to keep these accesses from hitting the db each time
'''
data = self._do_getItem(modelId)
if not data:
raise KeyError
return self._modelFromData(data)
def __iter__(self):
for data in self._do_iter():
yield self._modelFromData(data)
def find(self, query, limit=None, **kwargs):
params = {'limit':limit} if limit else {}
params.update(**kwargs)
for data in self._do_find(query, **params):
yield self._modelFromData(data)
def __delitem__(self, modelOrId):
model = self.toModel(modelOrId)
self._do_delete(model)
# Backend functions to implement -----------------------
def _check_backend(self):
if not hasattr(self, 'backend') or self.backend == None:
raise CollectionError("backend not configured")
def _do_makeId(self, model):
self._check_backend()
return self.backend.make_Id(model)
def _do_add(self, model):
self._check_backend()
return self.backend.add(model)
def _do_saveModel(self, model):
self._check_backend()
return self.backend.saveModel(model)
def _do_delete(self, model):
self._check_backend()
return self.backend.delete(model)
def _do_getItem(self, modelId):
self._check_backend()
return self.backend.getItem(modelId)
def _do_iter(self):
self._check_backend()
return self.backend.iter()
def _do_find(self, query):
self._check_backend()
return self.backend.find(query)
def __len__(self):
self._check_backend()
return self.backend.len()
| {
"repo_name": "colevscode/quickdata",
"path": "quickdata/collection.py",
"copies": "1",
"size": "6026",
"license": "mit",
"hash": 545373248329501950,
"line_mean": 25.6637168142,
"line_max": 81,
"alpha_frac": 0.5730169267,
"autogenerated": false,
"ratio": 4.3477633477633475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5420780274463347,
"avg_score": null,
"num_lines": null
} |
__all__ = ['colorscheme', 'icon_theme', 'notify', 'widget_style']
from os import popen
from plugins.kde import *
from core import path, kde
@path.register_path_prefix
def KDEWALLPAPER():
paths = []
if kde.version() == 5:
wallpaper = popen("kf5-config --path wallpaper")
wallpaper = wallpaper.read().replace("\n", '').split(":")
for path in wallpaper:
paths.append(path.rstrip('/'))
elif kde.version() == 4:
wallpaper = popen("kde4-config --path wallpaper")
wallpaper = wallpaper.read().replace("\n", '').split(":")
for path in wallpaper:
paths.append(path.rstrip('/'))
return paths
@path.register_path_prefix
def KDEDATA():
paths = []
if kde.version() == 5:
data = popen("kf5-config --path data")
data = data.read().replace("\n", '').split(":")
for path in data:
paths.append(path.rstrip('/'))
elif kde.version() == 4:
data = popen("kde4-config --path data")
data = data.read().replace("\n", '').split(":")
for path in data:
paths.append(path.rstrip('/'))
return paths
| {
"repo_name": "nielsvm/kde4-profiles",
"path": "plugins/kde/__init__.py",
"copies": "1",
"size": "1147",
"license": "bsd-3-clause",
"hash": -6497953124740100000,
"line_mean": 32.7352941176,
"line_max": 65,
"alpha_frac": 0.5640802092,
"autogenerated": false,
"ratio": 3.7119741100323624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47760543192323623,
"avg_score": null,
"num_lines": null
} |
__all__ = ["colors", "set_style"]
from PySide2 import QtWidgets
from . import project_globals as g
# see http://www.google.com/design/spec/style/color.html#color-color-palette
colors = {
"background": "#212121",
"go": "#009688",
"stop": "#F44336",
"set": "#009688",
"advanced": "#FFC107",
"text_light": "#EEEEEE",
"text_disabled": "#757575",
"widget_background": "#EEEEEE",
"heading_1": "#00BCD4",
"heading_0": "#FFC107",
} # least important heading
g.colors_dict.write(colors)
def set_style():
# Style Sheet----------------------------------------------------------------
StyleSheet = ""
# main window
StyleSheet += "QMainWindow{background:custom_color}".replace(
"custom_color", colors["background"]
)
# push button
# StyleSheet += 'QPushButton{background:custom_color; border-width:0px; border-radius: 0px; font: bold 14px}'.replace('custom_color', colors['go'])
# progress bar
StyleSheet += "QProgressBar:horizontal{border: 0px solid gray; border-radius: 0px; background: custom_color; padding: 0px; height: 30px;}".replace(
"custom_color", colors["background"]
)
StyleSheet += "QProgressBar:chunk{background:custom_color}".replace(
"custom_color", colors["go"]
)
# tab widget
StyleSheet += "QTabWidget::pane{border-top: 2px solid #C2C7CB;}"
StyleSheet += "QTabWidget::tab-bar{left: 5px;}"
StyleSheet += "QTabBar::tab{width: 100px; background: clr1; border: 0px; border-bottom-color: black; border-top-left-radius: 4px; border-top-right-radius: 4px; min-width: 8ex; padding: 2px; font: bold 14px; color: clr2}".replace(
"clr1", colors["background"]
).replace(
"clr2", colors["heading_0"]
)
StyleSheet += "QTabBar::tab:selected{border-color: black; border-bottom-color: black; color: clr1}".replace(
"clr1", colors["heading_1"]
)
# scroll bar
StyleSheet += "QScrollArea::QWidget::QWidget{backround: transparent;}"
# group box
StyleSheet += (
"QGroupBox{border: 2px solid gray; font: bold 14px; margin-top: 0ex; border-radius: 0 px;}"
)
StyleSheet += "QGroupBox::title{subcontrol-origin: margin; padding: 0 0px}"
app = g.app.read()
app.setStyleSheet(StyleSheet)
# Palette--------------------------------------------------------------------
"""
MainWindow = g.main_window.read()
palette = QtWidgets.QPalette(MainWindow.palette())
palette.setColor(MainWindow.backgroundRole(), QtWidgets.QColor(colors['background']))
#MainWindow.setPalette(palette)
"""
# style----------------------------------------------------------------------
# app.setStyle('windowsxp')
def set_background_role(obj):
palette = QtWidgets.QPalette(obj.palette())
palette.setColor(obj.backgroundRole(), QtWidgets.QColor(colors["background"]))
obj.setPalette(palette)
| {
"repo_name": "wright-group/PyCMDS",
"path": "pycmds/project/style.py",
"copies": "1",
"size": "2917",
"license": "mit",
"hash": -1836418147168966400,
"line_mean": 31.7752808989,
"line_max": 233,
"alpha_frac": 0.603359616,
"autogenerated": false,
"ratio": 3.6012345679012348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47045941839012345,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Column']
import struct
from .expr import Expr
class Column(object):
def __init__(self, name=None, type=None, size=None):
self.name = name
self.type = type
self.size = size
def __repr__(self):
return '<%s name: %r, type: %r, size: %s>' % (
self.__class__.__name__,
self.name,
self.type,
self.size,
)
def __eq__(self, other):
return Expr(self, '==', other)
def __ne__(self, other):
return Expr(self, '!=', other)
def __lt__(self, other):
return Expr(self, '<', other)
def __le__(self, other):
return Expr(self, '<=', other)
def __gt__(self, other):
return Expr(self, '>', other)
def __ge__(self, other):
return Expr(self, '>=', other)
def __iter__(self):
'''
Used for conversion to dict in Schema.
'''
d = {
'name': self.name,
'type': self.type,
'size': self.size,
}
for k, v in d.items():
yield k, v
def _get_struct_format(self, value=None):
if self.type == 'bool':
fmt = b'!BBB'
elif self.type == 'int':
fmt = b'!BBq'
elif self.type == 'float':
fmt = b'!BBd'
elif self.type == 'str':
if value is None:
fmt = b'!BBQ%is' % self.size
else:
fmt = b'!BBQ%is' % len(value)
else:
raise Exception('unsupported column type')
return fmt
def _get_column_size(self, value):
fmt = self._get_struct_format(value)
size = struct.calcsize(fmt)
return size
def _get_column_packed(self, value=None):
fmt = self._get_struct_format(value)
is_null = 1 if value is None else 0
if self.type == 'str':
# FIXME: use self.size if required
b = struct.pack(fmt, 0, is_null, len(value), value)
else:
b = struct.pack(fmt, 0, is_null, value)
return b
def _get_column_unpacked(self, mm, pos):
status, is_null = struct.unpack_from('!BB', mm, pos)
pos += 2
if self.type == 'bool':
value, = struct.unpack_from('!B', mm, pos)
value = bool(value)
pos += 1
elif self.type == 'int':
value, = struct.unpack_from('!q', mm, pos)
pos += 8
elif self.type == 'float':
value, = struct.unpack_from('!d', mm, pos)
pos += 8
elif self.type == 'str':
str_len, = struct.unpack_from('!Q', mm, pos)
pos += 8
value = mm[pos:pos + str_len]
pos += str_len
else:
raise Exception('unsupported column type')
return value, pos
| {
"repo_name": "yadb/yadb",
"path": "backup/store/column.py",
"copies": "1",
"size": "2842",
"license": "mit",
"hash": -8148891914683405000,
"line_mean": 25.3148148148,
"line_max": 63,
"alpha_frac": 0.4683321605,
"autogenerated": false,
"ratio": 3.729658792650919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9695886576046542,
"avg_score": 0.0004208754208754209,
"num_lines": 108
} |
__all__ = ['column_stack','row_stack', 'dstack','array_split','split','hsplit',
'vsplit','dsplit','apply_over_axes','expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, zeros, newaxis, outer, \
concatenate, isscalar, array, asanyarray
from numpy.core.fromnumeric import product, reshape
from numpy.core import hstack, vstack, atleast_3d
def apply_along_axis(func1d,axis,arr,*args):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> def new_func(a):
... \"\"\"Divide elements of a by 2.\"\"\"
... return a * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(new_func, 0, b)
array([[ 0.5, 1. , 1.5],
[ 2. , 2.5, 3. ],
[ 3.5, 4. , 4.5]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis,nd))
ind = [0]*(nd-1)
i = zeros(nd,'O')
indlist = range(nd)
indlist.remove(axis)
i[axis] = slice(None,None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist,ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0: axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res,axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Notes
-----
This function is equivalent to ``np.vstack(tup).T``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v,copy=False,subok=True)
if arr.ndim < 2:
arr = array(arr,copy=False,subok=True,ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays,1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join arrays.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate(map(atleast_3d,tup),2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.array([])
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]),0)):
sub_arys[i] = _nx.array([])
return sub_arys
def array_split(ary,indices_or_sections,axis = 0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try: # handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError: #indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section,extras = divmod(Ntotal,Nsections)
section_sizes = [0] + \
extras * [Neach_section+1] + \
(Nsections-extras) * [Neach_section]
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary,axis,0)
for i in range(Nsections):
st = div_points[i]; end = div_points[i+1]
sub_arys.append(_nx.swapaxes(sary[st:end],axis,0))
# there is a weird issue with array slicing that allows
# 0x10 arrays and other such things. The following kludge is needed
# to get around this issue.
sub_arys = _replace_zero_by_x_arrays(sub_arys)
# end kludge.
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join arrays together.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try: len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError('array split does not result in an equal division')
res = array_split(ary,indices_or_sections,axis)
return res
def hsplit(ary,indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if len(ary.shape) > 1:
return split(ary,indices_or_sections,1)
else:
return split(ary,indices_or_sections,0)
def vsplit(ary,indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary,indices_or_sections,0)
def dsplit(ary,indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError('vsplit only works on arrays of 3 or more dimensions')
return split(ary,indices_or_sections,2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = [(getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__')]
wrappers.sort()
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = [(getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__')]
wrappers.sort()
if wrappers:
return wrappers[-1][-1]
return None
def kron(a,b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimenensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a,copy=False,subok=True,ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a,b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a,b).reshape(as_+bs)
axis = nd-1
for _ in xrange(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
c = _nx.array(A,copy=False,subok=True,ndmin=d)
shape = list(c.shape)
n = max(c.size,1)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
if nrep!=1:
c = c.reshape(-1,n).repeat(nrep,0)
dim_in = shape[i]
dim_out = dim_in*nrep
shape[i] = dim_out
n /= max(dim_in,1)
return c.reshape(shape)
| {
"repo_name": "mbalasso/mynumpy",
"path": "numpy/lib/shape_base.py",
"copies": "11",
"size": "24366",
"license": "bsd-3-clause",
"hash": -8705056836733563000,
"line_mean": 28.041716329,
"line_max": 80,
"alpha_frac": 0.5083312813,
"autogenerated": false,
"ratio": 3.3511208912116626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002790634242185748,
"num_lines": 839
} |
# all combinations except those that do not produce some sort of
# chimeric protein are commented out
import itertools
import os
agfusion_db = os.path.join(
os.path.split(__file__)[0],
'data',
'agfusion.db'
)
AGFUSION_DB_URL = "https://s3.amazonaws.com/agfusion/agfusion."
# this is mostly contigent on the maximum ensembl release supported
# by pyensembl
MAX_ENSEMBL_RELEASE = 95
GENOME_SHORTCUTS = {
'GRCm38':['mus_musculus',MAX_ENSEMBL_RELEASE],
'mm10':['mus_musculus',MAX_ENSEMBL_RELEASE],
'mm9':['mus_musculus',67],
'GRCh38':['homo_sapiens',MAX_ENSEMBL_RELEASE],
'hg38':['homo_sapiens',MAX_ENSEMBL_RELEASE],
'hg19':['homo_sapiens',75]
}
AVAILABLE_ENSEMBL_SPECIES = {
'homo_sapiens':range(69,MAX_ENSEMBL_RELEASE+1),
'mus_musculus':range(67,MAX_ENSEMBL_RELEASE+1)
}
ENSEMBL_MYSQL_TABLES = {
'homo_sapiens':{},
'mus_musculus':{}
}
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][48] = 'homo_sapiens_core_48_36j'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][49] = 'homo_sapiens_core_49_36k'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][50] = 'homo_sapiens_core_50_36l'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][51] = 'homo_sapiens_core_51_36m'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][52] = 'homo_sapiens_core_52_36n'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][53] = 'homo_sapiens_core_53_36o'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][54] = 'homo_sapiens_core_54_36p'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][55] = 'homo_sapiens_core_55_37'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][56] = 'homo_sapiens_core_56_37a'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][57] = 'homo_sapiens_core_57_37b'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][58] = 'homo_sapiens_core_58_37c'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][59] = 'homo_sapiens_core_59_37d'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][60] = 'homo_sapiens_core_60_37e'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][61] = 'homo_sapiens_core_61_37f'
#ENSEMBL_MYSQL_TABLES['homo_sapiens'][62] = 'homo_sapiens_core_62_37g'
for i in range(63,MAX_ENSEMBL_RELEASE+1):
if i < 76:
ENSEMBL_MYSQL_TABLES['homo_sapiens'][i] = 'homo_sapiens_core_' + str(i) + '_37'
else:
ENSEMBL_MYSQL_TABLES['homo_sapiens'][i] = 'homo_sapiens_core_' + str(i) + '_38'
for i in range(67,MAX_ENSEMBL_RELEASE+1):
if i < 68:
ENSEMBL_MYSQL_TABLES['mus_musculus'][i] = 'mus_musculus_core_' + str(i) + '_37'
else:
ENSEMBL_MYSQL_TABLES['mus_musculus'][i] = 'mus_musculus_core_' + str(i) + '_38'
# min amino acid length of domain to plot it
MIN_DOMAIN_LENGTH = 5
# just
STANDARD_CHROMOSOMES = [str(i) for i in range(1,23)] + ['X','Y','MT']
# the available protein domain annotations
PROTEIN_ANNOTATIONS = [
'pfam', 'smart', 'superfamily', 'tigrfam', 'pfscan',
'tmhmm', 'seg', 'ncoils', 'prints',
'pirsf', 'signalp'
]
JUNCTION_LOCATIONS = [
'CDS','CDS (start)','CDS (end)','5UTR','5UTR (end)',
'3UTR','3UTR (start)','exon','intron','intron (cds)',
'intron (before cds)','intron (after cds)'
]
CODING_COMBINATIONS = list(itertools.product(JUNCTION_LOCATIONS,JUNCTION_LOCATIONS))
CODING_COMBINATIONS = {i:{'protein_coding_potential':False,'truncated':False} for i in CODING_COMBINATIONS}
CODING_COMBINATIONS['CDS','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS','CDS (end)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS','5UTR'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS','5UTR (end)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS','3UTR'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS','3UTR (start)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS','intron'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS','intron (before cds)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS','intron (after cds)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS (end)','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','5UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','3UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','intron'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (end)','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['CDS (start)','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (start)','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['CDS (start)','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['CDS (start)','5UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['CDS (start)','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['CDS (start)','3UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['CDS (start)','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (start)','intron'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS (start)','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['CDS (start)','intron (before cds)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['CDS (start)','intron (after cds)'] = {'protein_coding_potential':True,'truncated':True}
#CODING_COMBINATIONS['5UTR','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['5UTR','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR','5UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR','3UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR','intron'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['5UTR','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR (end)','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['5UTR (end)','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR (end)','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR (end)','5UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR (end)','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR (end)','3UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR (end)','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR (end)','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['5UTR (end)','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['5UTR (end)','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','5UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','3UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','intron'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','5UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','3UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','intron'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (start)','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','CDS'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','5UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','3UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','intron'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['3UTR (end)','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['3UTR (end)','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron','5UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron','3UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron','intron'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron','intron (after cds)'] = {'protein_coding_potential':{'protein_coding_potential':True,'truncated':False},'truncated':False}
CODING_COMBINATIONS['intron (cds)','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (cds)','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (cds)','CDS (end)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['intron (cds)','5UTR'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['intron (cds)','5UTR (end)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['intron (cds)','3UTR'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['intron (cds)','3UTR (start)'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['intron (cds)','intron'] = {'protein_coding_potential':True,'truncated':True}
CODING_COMBINATIONS['intron (cds)','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (cds)','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (cds)','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (before cds)','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (before cds)','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (before cds)','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (before cds)','5UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (before cds)','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (before cds)','3UTR'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (before cds)','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (before cds)','intron'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (before cds)','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (before cds)','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
#CODING_COMBINATIONS['intron (before cds)','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','CDS'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','CDS (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','CDS (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','5UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','5UTR (end)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','3UTR'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','3UTR (start)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','intron'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','intron (cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','intron (before cds)'] = {'protein_coding_potential':True,'truncated':False}
CODING_COMBINATIONS['intron (after cds)','intron (after cds)'] = {'protein_coding_potential':True,'truncated':False}
| {
"repo_name": "murphycj/AGFusion",
"path": "agfusion/utils.py",
"copies": "1",
"size": "16452",
"license": "mit",
"hash": 7185319132329701000,
"line_mean": 69.3076923077,
"line_max": 152,
"alpha_frac": 0.715049842,
"autogenerated": false,
"ratio": 2.746119178768152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3961169020768152,
"avg_score": null,
"num_lines": null
} |
__all__ = ['combinations', 'permutations']
def combinations(L, n):
"""Generate combinations from a sequence of elements.
Returns a generator object that iterates over all
combinations of n elements from a sequence L.
Parameters
----------
L : list-like
Sequence of elements
n : integer
Number of elements to choose at a time
Returns
-------
A generator object
Examples
--------
>>> combinations([1, 2, 3, 4], 2)
<generator object at 0x7f970dcffa28>
>>> list(combinations([1, 2, 3, 4], 2))
[[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
>>> list(combinations([1, 2, 3, 4], 1))
[[1], [2], [3], [4]]
>>> list(combinations([1, 2, 3, 4], 0))
[[]]
>>> list(combinations([1, 2, 3, 4], 5))
[[]]
>>> list(combinations(['a', 'b', 'c'], 2))
[['a', 'b'], ['a', 'c'], ['b', 'c']]
Notes
-----
The elements remain in the same order as in L
"""
if n==0 or n > len(L):
yield []
else:
for i in xrange(len(L)-n+1):
for t in combinations(L[i+1:],n-1):
yield [L[i]]+t
def permutations(L):
"""Generate permutations from a sequence of elements.
Returns a generator object that iterates over all
permutations of elements in a sequence L.
Parameters
----------
L : list-like
Sequence of elements
Returns
-------
A generator object
Examples
--------
>>> permutations([1, 2, 3])
<generator object at 0x7f970dcffe60>
>>> list(permutations([1, 2, 3]))
[[1, 2, 3], [2, 1, 3], [2, 3, 1], [1, 3, 2], [3, 1, 2], [3, 2, 1]]
>>> list(permutations([1]))
[[1]]
>>> list(permutations([1, 2]))
[[1, 2], [2, 1]]
>>> list(permutations(['a', 'b', 'c']))
[['a', 'b', 'c'], ['b', 'a', 'c'], ['b', 'c', 'a'], ['a', 'c', 'b'], ['c', 'a', 'b'], ['c', 'b', 'a']]
"""
if len(L) == 1:
yield [L[0]]
elif len(L) >= 2:
(h, t) = (L[0:1], L[1:])
for p in permutations(t):
for i in range(len(p)+1):
yield p[:i] + h + p[i:]
| {
"repo_name": "alejospina/pydec",
"path": "pydec/math/combinatorial.py",
"copies": "6",
"size": "2147",
"license": "bsd-3-clause",
"hash": -852229113043876600,
"line_mean": 24.5595238095,
"line_max": 106,
"alpha_frac": 0.4634373544,
"autogenerated": false,
"ratio": 3.233433734939759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.669687108933976,
"avg_score": null,
"num_lines": null
} |
__all__ = ['combine_subsets', ]
# Data taken from https://code.google.com/p/googlefontdirectory/source/browse/tools/subset/subset.py#184
quotes = [
0x2013, # endash
0x2014, # emdash
0x2018, # quoteleft
0x2019, # quoteright
0x201A, # quotesinglbase
0x201C, # quotedblleft
0x201D, # quotedblright
0x201E, # quotedblbase
0x2022, # bullet
0x2039, # guilsinglleft
0x203A, # guilsinglright
]
empty_set = [ 0x0020, # space
]
latin = (range(0x20, 0x7f) + # Basic Latin (A-Z, a-z, numbers)
range(0xa0, 0x100) + # Western European symbols and diacritics
[0x20ac, # Euro
0x0152, # OE
0x0153, # oe
0x003b, # semicolon
0x00b7, # periodcentered
0x0131, # dotlessi
0x02c6, # circumflex
0x02da, # ring
0x02dc, # tilde
0x2074, # foursuperior
0x2215, # divison slash
0x2044, # fraction slash
0xe0ff, # PUA: Font logo
0xeffd, # PUA: Font version number
0xf000, # PUA: font ppem size indicator: run `ftview -f 1255 10 Ubuntu-Regular.ttf` to see it in action!
])
# These ranges include Extended A, B, C, D, and Additional with the
# exception of Vietnamese, which is a separate range
latin_ext = (range(0x100, 0x370) +
range(0x1d00, 0x1ea0) +
range(0x1ef2, 0x1f00) +
range(0x2070, 0x20d0) +
range(0x2c60, 0x2c80) +
range(0xa700, 0xa800))
# 2011-07-16 DC: Charset from http://vietunicode.sourceforge.net/charset/ + U+1ef9 from Fontaine
vietnamese = [0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00C8, 0x00C9,
0x00CA, 0x00CC, 0x00CD, 0x00D2, 0x00D3, 0x00D4,
0x00D5, 0x00D9, 0x00DA, 0x00DD, 0x00E0, 0x00E1,
0x00E2, 0x00E3, 0x00E8, 0x00E9, 0x00EA, 0x00EC,
0x00ED, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F9,
0x00FA, 0x00FD, 0x0102, 0x0103, 0x0110, 0x0111,
0x0128, 0x0129, 0x0168, 0x0169, 0x01A0, 0x01A1,
0x01AF, 0x01B0, 0x20AB] + range(0x1EA0, 0x1EFA)
# Could probably be more aggressive here and exclude archaic characters,
# but lack data
greek = range(0x370, 0x400) + range(0x1f00, 0x2000)
# Based on character frequency analysis
cyrillic = range(0x400, 0x460) + [0x490, 0x491, 0x4b0, 0x4b1, 0x2116]
cyrillic_ext = (range(0x400, 0x530) +
[0x20b4, 0x2116] + # 0x2116 is the russian No, a number abbreviation similar to the latin #
range(0x2de0, 0x2e00) +
range(0xa640, 0xa6a0))
# Based on Droid Arabic Kufi 1.0
arabic = [0x000D, 0x0020, 0x0621, 0x0627, 0x062D,
0x062F, 0x0631, 0x0633, 0x0635, 0x0637, 0x0639,
0x0643, 0x0644, 0x0645, 0x0647, 0x0648, 0x0649,
0x0640, 0x066E, 0x066F, 0x0660, 0x0661, 0x0662,
0x0663, 0x0664, 0x0665, 0x0666, 0x0667, 0x0668,
0x0669, 0x06F4, 0x06F5, 0x06F6, 0x06BE, 0x06D2,
0x06A9, 0x06AF, 0x06BA, 0x066A, 0x061F, 0x060C,
0x061B, 0x066B, 0x066C, 0x066D, 0x064B, 0x064D,
0x064E, 0x064F, 0x064C, 0x0650, 0x0651, 0x0652,
0x0653, 0x0654, 0x0655, 0x0670, 0x0656, 0x0615,
0x0686, 0x0623, 0x0625, 0x0622, 0x0671, 0x0628,
0x067E, 0x062A, 0x062B, 0x0679, 0x0629, 0x062C,
0x062E, 0x0630, 0x0688, 0x0632, 0x0691, 0x0698,
0x0634, 0x0636, 0x0638, 0x063A, 0x0641, 0x0642,
0x0646, 0x06D5, 0x06C0, 0x0624, 0x064A, 0x06CC,
0x06D3, 0x0626, 0x06C2, 0x06C1, 0x06C3, 0x06F0,
0x06F1, 0x06F2, 0x06F3, 0x06F9, 0x06F7, 0x06F8,
0xFC63, 0x0672, 0x0673, 0x0675, 0x0676, 0x0677,
0x0678, 0x067A, 0x067B, 0x067C, 0x067D, 0x067F,
0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685,
0x0687, 0x0689, 0x068A, 0x068B, 0x068C, 0x068D,
0x068E, 0x068F, 0x0690, 0x0692, 0x0693, 0x0694,
0x0695, 0x0696, 0x0697, 0x0699, 0x069A, 0x069B,
0x069C, 0x069D, 0x069E, 0x069F, 0x06A0, 0x06A1,
0x06A2, 0x06A3, 0x06A5, 0x06A6, 0x06A7, 0x06A8,
0x06AA, 0x06AB, 0x06AC, 0x06AD, 0x06AE, 0x06B0,
0x06B1, 0x06B2, 0x06B3, 0x06B4, 0x06B5, 0x06B6,
0x06B7, 0x06B8, 0x06B9, 0x06BB, 0x06BC, 0x06BD,
0x06BF, 0x06C4, 0x06C5, 0x06CD, 0x06D6, 0x06D7,
0x06D8, 0x06D9, 0x06DA, 0x06DB, 0x06DC, 0x06DF,
0x06E1, 0x06E2, 0x06E3, 0x06E4, 0x06E5, 0x06E6,
0x06E7, 0x06E8, 0x06EA, 0x06EB, 0x06ED, 0x06FB,
0x06FC, 0x06FD, 0x06FE, 0x0600, 0x0601, 0x0602,
0x0603, 0x060E, 0x060F, 0x0610, 0x0611, 0x0612,
0x0613, 0x0614, 0x0657, 0x0658, 0x06EE, 0x06EF,
0x06FF, 0x060B, 0x061E, 0x0659, 0x065A, 0x065B,
0x065C, 0x065D, 0x065E, 0x0750, 0x0751, 0x0752,
0x0753, 0x0754, 0x0755, 0x0756, 0x0757, 0x0758,
0x0759, 0x075A, 0x075B, 0x075C, 0x075D, 0x075E,
0x075F, 0x0760, 0x0761, 0x0762, 0x0763, 0x0764,
0x0765, 0x0766, 0x0767, 0x0768, 0x0769, 0x076A,
0x076B, 0x076C, 0x076D, 0x06A4, 0x06C6, 0x06C7,
0x06C8, 0x06C9, 0x06CA, 0x06CB, 0x06CF, 0x06CE,
0x06D0, 0x06D1, 0x06D4, 0x06FA, 0x06DD, 0x06DE,
0x06E0, 0x06E9, 0x060D, 0xFD3E, 0xFD3F, 0x25CC,
# Added from https://groups.google.com/d/topic/googlefontdirectory-discuss/MwlMWMPNCXs/discussion
0x063b, 0x063c, 0x063d, 0x063e, 0x063f, 0x0620,
0x0674, 0x0674, 0x06EC]
subsets_dict = {
'quotes': quotes,
'empty_set': empty_set,
'latin': latin,
'latin_ext': latin_ext,
'vietnamese': vietnamese,
'greek': greek,
'cyrillic': cyrillic,
'cyrillic_ext': cyrillic_ext,
'arabic':arabic
}
def combine_subsets(subsets, font=None):
subsets.append('empty_set')
subsets.append('quotes')
subsets = set(subsets)
result = []
for i in subsets:
result.extend(subsets_dict[i])
if 'menu' in subsets and font:
result.extend(set(map(ord, font.familyname)))
return result
| {
"repo_name": "vitalyvolkov/fontbakery",
"path": "checker/tools.py",
"copies": "1",
"size": "5477",
"license": "apache-2.0",
"hash": 7033846329131263000,
"line_mean": 36.5136986301,
"line_max": 108,
"alpha_frac": 0.6751871462,
"autogenerated": false,
"ratio": 2.0076979472140764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3182885093414076,
"avg_score": null,
"num_lines": null
} |
__all__ = ["combomethod", "ComboMethodError"]
import functools
class ComboMethodError(Exception):
"""Inappropriate initialization of the combo method descriptor."""
pass
class ComboMethod(object):
"""Combo method descriptor.
Descriptor is initializes eater by passing in the staticmethod or the
class method. After that an instance method can be added using the instance
decorator.
This class should never be used. Use the ``combomethod`` function instead.
"""
def __init__(self, staticmethod=None, classmethod=None):
if staticmethod is None and classmethod is None:
raise ComboMethodError(
"Either static method or class method has to be provided"
)
self.staticmethod = staticmethod
self.classmethod = classmethod
self.instancemethod = None
def instance(self, instancemethod):
self.instancemethod = instancemethod
return self
def __get__(self, obj, type=None):
if obj is None:
if self.staticmethod is not None:
return self.staticmethod
else:
@functools.wraps(self.classmethod)
def wrapper(*args, **kwargs):
return self.classmethod(type, *args, **kwargs)
return wrapper
else:
if self.instancemethod is None:
raise ComboMethodError("Instance method is not provided")
@functools.wraps(self.instancemethod)
def wrapper(*args, **kwargs):
return self.instancemethod(obj, *args, **kwargs)
return wrapper
def combomethod(method=None, static=False):
"""Create a class method or static method.
It will be used when you call it on the class but can be overridden by an
instance method of the same name that will be called when the method is
called on the instance.
Usage::
class Foo(object):
class_variable = 2
def __init__(self):
self.instance_variable = 3
# Override class variable for test case
self.class_variable = 4
@combomethod(static=True)
def static_and_instance(x):
return x + 1
@static_and_instance.instance
def static_and_instance(self, x):
return x + self.instance_variable
@combomethod
def class_and_instance(cls, x):
return x + cls.class_variable
@class_and_instance.instance
def class_and_instance(self, x):
return x + self.instance_variable
>>> Foo.static_and_instance(100)
101
>>> Foo.class_and_instance(100)
102
>>> f = Foo()
>>> f.static_and_instance(100)
103
>>> f.class_and_instance(100)
103
"""
return ComboMethod if static else ComboMethod(None, method)
| {
"repo_name": "alefnula/tea",
"path": "tea/decorators/__init__.py",
"copies": "1",
"size": "2970",
"license": "bsd-3-clause",
"hash": -8762230824345428000,
"line_mean": 28.7,
"line_max": 79,
"alpha_frac": 0.5885521886,
"autogenerated": false,
"ratio": 4.729299363057325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 100
} |
__all__ = ["command_center", "crystalite_farm", "adamantite_mine", "crystalite_silo",
"adamantite_storage", "vault", "laboratory", "craft_pad"]
from .tools import create_building
COMMAND_CENTER_BASIS = {
'alias': 'commandCenter-main',
'role': 'center',
'type': 'commandCenter',
'size': 4,
'status': 'idle',
'tile_position': None,
'level': None,
'player_id': None,
}
COMMAND_CENTERS = {
1: dict(hit_points=2500, **COMMAND_CENTER_BASIS),
2: dict(hit_points=3000, **COMMAND_CENTER_BASIS),
3: dict(hit_points=3500, **COMMAND_CENTER_BASIS),
4: dict(hit_points=4000, **COMMAND_CENTER_BASIS),
5: dict(hit_points=6000, **COMMAND_CENTER_BASIS)
}
CRYSTALITE_FARM_BASIS = {
'alias': 'crystalite-farm',
'role': 'building',
'type': 'crystaliteFarm',
'size': 3,
'status': 'idle',
'tile_position': None,
'level': None,
'player_id': None,
}
CRYSTALITE_FARMS = {
1: dict(hit_points=1000, **CRYSTALITE_FARM_BASIS),
2: dict(hit_points=1200, **CRYSTALITE_FARM_BASIS),
3: dict(hit_points=1400, **CRYSTALITE_FARM_BASIS),
4: dict(hit_points=1700, **CRYSTALITE_FARM_BASIS),
5: dict(hit_points=2000, **CRYSTALITE_FARM_BASIS)
}
ADAMANTITE_MINE_BASIS = {
'alias': 'adamantite-mine',
'role': 'building',
'type': 'adamantiteMine',
'size': 3,
'status': 'idle',
'tile_position': None,
'level': None,
'player_id': None,
}
ADAMANTITE_MINES = {
1: dict(hit_points=1500, **ADAMANTITE_MINE_BASIS),
2: dict(hit_points=1800, **ADAMANTITE_MINE_BASIS),
3: dict(hit_points=2200, **ADAMANTITE_MINE_BASIS),
4: dict(hit_points=2600, **ADAMANTITE_MINE_BASIS),
5: dict(hit_points=3100, **ADAMANTITE_MINE_BASIS)
}
CRYSTALITE_SILO_BASIS = {
'alias': 'crystalite-silo',
'role': 'building',
'type': 'crystaliteSilo',
'size': 3,
'status': 'idle',
'tile_position': None,
'level': None,
'player_id': None,
}
CRYSTALITE_SILOS = {
1: dict(hit_points=1000, **CRYSTALITE_SILO_BASIS),
2: dict(hit_points=1200, **CRYSTALITE_SILO_BASIS),
3: dict(hit_points=1400, **CRYSTALITE_SILO_BASIS),
4: dict(hit_points=1700, **CRYSTALITE_SILO_BASIS),
5: dict(hit_points=2000, **CRYSTALITE_SILO_BASIS),
}
ADAMANTITE_STORAGE_BASIS = {
'alias': 'adamantite_storage',
'role': 'building',
'type': 'adamantiteStorage',
'size': 3,
'status': 'idle',
'tile_position': None,
'level': None,
'player_id': None,
}
ADAMANTITE_STORAGES = {
1: dict(hit_points=1000, **ADAMANTITE_STORAGE_BASIS),
2: dict(hit_points=1200, **ADAMANTITE_STORAGE_BASIS),
3: dict(hit_points=1400, **ADAMANTITE_STORAGE_BASIS),
4: dict(hit_points=1700, **ADAMANTITE_STORAGE_BASIS),
}
VAULT_BASIS = {
'alias': 'vault',
'role': 'building',
'type': 'vault',
'size': 3,
'status': 'idle',
'tile_position': None,
'level': None,
'player_id': None,
}
VAULTS = {
1: dict(hit_points=1000, **VAULT_BASIS),
2: dict(hit_points=1100, **VAULT_BASIS),
3: dict(hit_points=1200, **VAULT_BASIS),
4: dict(hit_points=1300, **VAULT_BASIS),
}
LABORATORY_BASIS = {
'alias': 'lab',
'role': 'building',
'type': 'laboratory',
'size': 3,
'status': 'idle',
'tile_position': None,
'level': None,
'player_id': None,
}
LABORATORIES = {
1: dict(hit_points=1200, **LABORATORY_BASIS),
2: dict(hit_points=1300, **LABORATORY_BASIS),
}
CRAFT_PAD_BASIS = {
'alias': 'craft-pad',
'role': 'building',
'type': 'craftPad',
'size': 3,
'status': 'idle',
'tile_position': None,
'level': None,
'player_id': None,
}
CRAFT_PADS = {
1: dict(hit_points=1000, **CRAFT_PAD_BASIS),
2: dict(hit_points=1200, **CRAFT_PAD_BASIS),
3: dict(hit_points=1400, **CRAFT_PAD_BASIS),
4: dict(hit_points=1700, **CRAFT_PAD_BASIS),
5: dict(hit_points=2000, **CRAFT_PAD_BASIS),
}
def command_center(level: int, tile_position: [int, int], player_id: int) -> dict:
return create_building(COMMAND_CENTERS, level, tile_position, player_id)
def crystalite_farm(level: int, tile_position: [int, int], player_id: int) -> dict:
return create_building(CRYSTALITE_FARMS, level, tile_position, player_id)
def adamantite_mine(level: int, tile_position: [int, int], player_id: int) -> dict:
return create_building(ADAMANTITE_MINES, level, tile_position, player_id)
def crystalite_silo(level: int, tile_position: [int, int], player_id: int) -> dict:
return create_building(CRYSTALITE_SILOS, level, tile_position, player_id)
def adamantite_storage(level: int, tile_position: [int, int], player_id: int) -> dict:
return create_building(ADAMANTITE_STORAGES, level, tile_position, player_id)
def vault(level: int, tile_position: [int, int], player_id: int) -> dict:
return create_building(VAULTS, level, tile_position, player_id)
def laboratory(level: int, tile_position: [int, int], player_id: int) -> dict:
return create_building(LABORATORIES, level, tile_position, player_id)
def craft_pad(level: int, tile_position: [int, int], player_id: int) -> dict:
return create_building(CRAFT_PADS, level, tile_position, player_id)
| {
"repo_name": "CheckiO/EoC-battle-mocks",
"path": "battle_mocks/buildings.py",
"copies": "1",
"size": "5196",
"license": "mit",
"hash": 5375180759368006000,
"line_mean": 27.5494505495,
"line_max": 86,
"alpha_frac": 0.630100077,
"autogenerated": false,
"ratio": 2.67421513124035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8802028661060459,
"avg_score": 0.0004573094359781491,
"num_lines": 182
} |
__all__ = ['CommandKey', 'CommandGroup', 'CommandPoolKey']
@classmethod
def _create_flyweight_instance_if_needed(cls, *args, **kargs):
assert len(args) > 1
name = intern(args[1])
return cls.__instances.setdefault(name, super(type(cls), cls).__new__(*args, **kargs))
@classmethod
def _get_flyweights(cls):
return cls.__instances.values()
@classmethod
def _get_flyweights_name(cls):
return cls.__instances.keys()
@classmethod
def _clear_flyweights(cls):
for flyweight in cls.all():
if hasattr(flyweight, "clear") and callable(flyweight.clear):
flyweight.clear()
cls.__instances.clear()
def registry(decoree):
decoree.__instances = dict()
decoree.__new__ = _create_flyweight_instance_if_needed
decoree.all = _get_flyweights
decoree.names = _get_flyweights_name
decoree.clear_all = _clear_flyweights
return decoree
class Key(object):
def __init__(self, name=None):
assert name is not None
assert isinstance(name, str)
self.name = intern(name)
def __repr__(self):
return "<%s:'%s'>" % (self.__class__.__name__, self.name)
@registry
class CommandKey(Key): pass
@registry
class CommandPoolKey(Key): pass
@registry
class CommandGroup(Key): pass
| {
"repo_name": "sboisson/thickskin",
"path": "src/thickskin/registry.py",
"copies": "1",
"size": "1265",
"license": "mit",
"hash": -808220304685587600,
"line_mean": 22.8679245283,
"line_max": 90,
"alpha_frac": 0.6545454545,
"autogenerated": false,
"ratio": 3.3733333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4527878787833334,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Command', 'Parameter', 'CommandOutcome']
from typing import List, Dict, Any, Optional, Type, Generic, \
TypeVar, Iterator
import random
import logging
import attr
from .connection import Message
from .specification import Specification
from .configuration import Configuration
from .state import State
from .environment import Environment
from .valueRange import ValueRange
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
T = TypeVar('T')
# contains all command types, indexed by their unique identifiers
_UID_TO_COMMAND_TYPE = {} # type: Dict[str, Type[Command]]
@attr.s(frozen=True)
class Parameter(Generic[T]):
name = attr.ib(type=str)
values = attr.ib(type=ValueRange)
def generate(self, rng: random.Random) -> T:
"""
Returns a randomly-generated value for this parameter.
"""
return self.values.sample(rng)
@property
def type(self) -> Type[T]:
"""
The underlying type of this parameter.
"""
return self.values.type
@property
def _field(self) -> str:
"""
The name of the field that stores the value of this parameter.
"""
return "__{}".format(self.name)
class CommandMeta(type):
def __new__(mcl,
cls_name: str,
bases, # FIXME
ns: Dict[str, Any]
):
if bases == (object,):
return super().__new__(mcl, cls_name, bases, ns)
# build an exception message template
tpl_err = "failed to build definition for command [{}]: "
tpl_err = tpl_err.format(cls_name) + "{}"
# obtain name
logger.debug("obtaining command name")
try:
name_command = ns['name'] # type: str
except KeyError:
msg = "missing 'name' field in Command definition"
raise TypeError(tpl_err.format(msg))
if not isinstance(name_command, str):
t = type(name_command)
msg = "expected 'name' field to be str but was {}".format(t)
raise TypeError(tpl_err.format(msg))
if name_command == '':
msg = "'name' field must not be an empty string"
raise TypeError(tpl_err.format(msg))
logger.debug("obtained command name: %s", name_command)
# ensure "to_message" is implemented
if 'to_message' not in ns:
msg = "missing 'to_message' method in Command definition"
raise TypeError(tpl_err.format(msg))
# build parameters
logger.debug("building command parameters")
try:
params = ns['parameters'] # type: List[Parameter]
except KeyError:
msg = "missing 'parameters' field in Command definition"
raise TypeError(tpl_err.format(msg))
# FIXME build a FrozenDict
ns['parameters'] = frozenset(params)
logger.debug("built command parameters")
# build next_allowed
logger.debug("building command next_allowed")
try:
na = ns['next_allowed'] # type: List[Parameter]
except KeyError:
na = ['*']
# FIXME
ns['next_allowed'] = na
# build specifications
logger.debug("building specifications")
try:
specs = ns['specifications'] # type: List[Specifications]
except KeyError:
msg = "missing 'specifications' field in Command definition"
raise TypeError(tpl_err.format(msg))
if not (isinstance(specs, list) and all(isinstance(s, Specification) for s in specs)): # noqa: pycodestyle
msg = "expected 'specifications' field to be List[Specification]"
raise TypeError(tpl_err.format(msg))
if specs is []:
msg = "all commands must provide at least one Specification"
raise TypeError(tpl_err.format(msg))
if len(set(s.name for s in specs)) != len(specs):
msg = "each specification must be given a unique name"
raise TypeError(tpl_err.format(msg))
# TODO type check specs
# FIXME build a FrozenDict
ns['specifications'] = list(specs)
logger.debug("built specifications")
logger.debug("constructing properties")
for param in params:
field = param._field
getter = lambda self, f=field: getattr(self, f)
ns[param.name] = property(getter)
logger.debug("constructed properties")
return super().__new__(mcl, cls_name, bases, ns)
def __init__(cls, cls_name: str, bases, ns: Dict[str, Any]):
if bases == (object,):
return super().__init__(cls_name, bases, ns)
# build an exception message template
tpl_err = "failed to build definition for command [{}]: "
tpl_err = tpl_err.format(cls_name) + "{}"
# obtain or generate a unique identifier
if 'uid' in ns:
uid = ns['uid']
if not isinstance(uid, str):
t = type(uid)
msg = "expected 'uid' field to be str but was {}".format(t)
raise TypeError(tpl_err.format(msg))
logger.debug("using provided UID: %s", uid)
else:
uid = '{}.{}'.format(cls.__module__, cls.__qualname__)
# ensure uid is not an empty string
if uid == '':
msg = "'uid' field must not be an empty string"
raise TypeError(tpl_err.format(msg))
# convert uid to a read-only property
ns['uid'] = property(lambda u=uid: u)
# ensure that uid isn't already in use
if uid in _UID_TO_COMMAND_TYPE:
msg = "'uid' already in use [%s]".format(uid)
raise TypeError(tpl_error.format(msg))
logger.debug("registering command type [%s] with UID [%s]", cls, uid)
_UID_TO_COMMAND_TYPE[uid] = cls
logger.debug("registered command type [%s] with UID [%s]", cls, uid)
return super().__init__(cls_name, bases, ns)
class Command(object, metaclass=CommandMeta):
def __init__(self, *args, **kwargs) -> None:
cls_name = self.__class__.__name__
params = self.__class__.parameters # type: FrozenSet[Parameter]
# were any positional arguments passed to the constructor?
if args:
msg = "constructor [{}] accepts no positional arguments but {} {} given" # noqa: pycodestyle
msg = msg.format(cls_name,
"was" if len(args) == 1 else "were",
len(args))
raise TypeError(msg)
# set values for each variable
for p in params:
try:
val = kwargs[p.name]
except KeyError:
msg = "missing keyword argument [{}] to constructor [{}]"
msg = msg.format(p.name, cls_name)
raise TypeError(msg)
# TODO perform run-time type checking?
setattr(self, p._field, val)
# did we pass any unexpected keyword arguments?
if len(kwargs) > len(params):
actual_args = set(n for n in kwargs)
expected_args = set(p.name for p in params)
unexpected_arguments = list(actual_args - expected_args)
msg = "unexpected keyword arguments [{}] supplied to constructor [{}]" # noqa: pycodestyle
msg = msg.format('; '.join(unexpected_arguments), cls_name)
raise TypeError(msg)
@classmethod
def get_next_allowed(cls, system: Type['System']) -> List[Type['Command']]:
if not cls.next_allowed:
return []
if cls.next_allowed[0] == '*': # all allowed
return [v for k, v in system.commands.items()
if k not in cls.next_allowed[1:]]
allowed = [system.commands[n] for n in cls.next_allowed]
return allowed
def __eq__(self, other: 'Command') -> bool:
if type(self) != type(other):
msg = "illegal comparison of commands: [{}] vs. [{}]"
msg = msg.format(self.uid, other.uid)
raise Exception(msg) # FIXME use HoustonException
for param in self.__class__.parameters:
if self[param.name] != other[param.name]:
return False
return True
def __getitem__(self, name: str) -> Any:
# FIXME use frozendict
try:
params = self.__class__.parameters
param = next(p for p in params if p.name == name)
except StopIteration:
msg = "no parameter [{}] in command [{}]"
msg.format(name, self.__class__.__name__)
raise KeyError(msg)
return getattr(self, param._field)
def __hash__(self) -> int:
params = (self.uid,)
params += tuple(self[p.name] for p in self.__class__.parameters)
return hash(params)
@property
def uid(self) -> str:
"""
The UID of the type of this command.
"""
return self.__class__.uid
@property
def name(self) -> str:
"""
The name of the type of this command.
"""
return self.__class__.__name__
@staticmethod
def from_dict(d: Dict[str, Any]) -> 'Command':
name_typ = d['type']
typ = _UID_TO_COMMAND_TYPE[name_typ] # type: Type[Command]
params = d['parameters']
return typ(**params)
def to_dict(self) -> Dict[str, Any]:
fields = {
'type': self.uid,
'parameters': {}
} # type: Dict[str, Any]
for param in self.__class__.parameters:
fields['parameters'][param.name] = getattr(self, param._field)
return fields
def __repr__(self) -> str:
fields = self.to_dict()['parameters']
for (name, val) in fields.items():
if isinstance(val, float):
s = "{:.3f}".format(val)
else:
s = str(val)
fields[name] = val
s = '; '.join(["{}: {}".format(k, v) for (k, v) in fields.items()])
s = "{}({})".format(self.__class__.__name__, s)
return s
def dispatch(self,
sandbox: 'Sandbox',
state: State,
environment: Environment,
configuration: Configuration
) -> None:
"""
Responsible for invoking this command.
Parameters:
sandbox: the sandbox for the system under test.
state: the state of the system immediately prior to the
call to this method
environment: a description of the environment in which the
command is being performed
configuration: the configuration of the system under test.
"""
raise NotImplementedError
def timeout(self,
state: State,
environment: Environment,
config: Configuration
) -> float:
"""
Responsible for calculating the maximum time that this command
should take to finish its execution.
Parameters:
state: the state of the system prior to the execution of the
command.
environment: the state of the environment prior to the execution
of the command.
configuration: the configuration of the system under test.
Returns:
Maximum length of time (in seconds) that the command may take to
complete its execution.
"""
spec = self.resolve(state, environment, config)
return spec.timeout(self, state, environment, config)
def resolve(self,
state: State,
environment: Environment,
config: Configuration
) -> Specification:
"""
Returns the specification that the system is expected to satisfy when
completing this command in a given state, environment, and
configuration.
"""
for spec in self.__class__.specifications:
if spec.precondition.is_satisfied(self,
state,
None,
environment,
config):
return spec
raise Exception("failed to resolve specification")
def to_message(self) -> Message:
"""
Transforms this command into a message that can be sent to the system
under test.
"""
raise NotImplementedError
def __iter__(self) -> Iterator[Parameter]:
yield from self.__class__.parameters
@classmethod
def generate(cls, rng: random.Random) -> 'Command':
params = {p.name: p.generate(rng) for p in cls.parameters}
command = cls(**params)
return command
@attr.s(frozen=True)
class CommandOutcome(object):
"""
Describes the outcome of a command execution in terms of the state of the
system before and after the execution.
"""
command = attr.ib(type=Command)
successful = attr.ib(type=bool)
start_state = attr.ib(type=State)
end_state = attr.ib(type=State)
time_elapsed = attr.ib(type=float) # FIXME use time delta
@staticmethod
def from_json(jsn: Dict[str, Any]) -> 'CommandOutcome':
return CommandOutcome(Command.from_json(jsn['command']),
jsn['successful'],
State.from_json(jsn['start_state']),
State.from_json(jsn['end_state']),
jsn['time_elapsed'])
def to_json(self) -> Dict[str, Any]:
return {'command': self.__command.to_json(),
'successful': self.__successful,
'start_state': self.start_state.to_json(),
'end_state': self.end_state.to_json(),
'time_elapsed': self.__time_elapsed}
| {
"repo_name": "squaresLab/Houston",
"path": "houston/command.py",
"copies": "1",
"size": "14054",
"license": "mit",
"hash": 1978678848537620500,
"line_mean": 34.0473815461,
"line_max": 115,
"alpha_frac": 0.5518001992,
"autogenerated": false,
"ratio": 4.3987480438184665,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000041562759767248545,
"num_lines": 401
} |
__all__ = ['COMMON_MIME_MAPS', 'BaseStaticSiteRenderer']
# Since mimetypes.get_extension() gets the "first known" (alphabetically),
# we get supid behavior like "text/plain" mapping to ".bat". This list
# overrides some file types we will surely use, to eliminate a call to
# mimetypes.get_extension() except in unusual cases.
COMMON_MIME_MAPS = {
"text/plain": ".txt",
"text/html": ".html",
"text/javascript": ".js",
"application/javascript": ".js",
"text/json": ".json",
"application/json": ".json",
"text/css": ".css",
}
class BaseStaticSiteRenderer(object):
"""
This default renderer writes the given URLs (defined in get_paths())
into static files on the filesystem by getting the view's response
through the Django testclient.
"""
@classmethod
def initialize_output(cls):
"""
Things that should be done only once to the output directory BEFORE
rendering occurs (i.e. setting up a config file, creating dirs,
creating an external resource, starting an atomic deploy, etc.)
Management command calls this once before iterating over all
renderer instances.
"""
pass
@classmethod
def finalize_output(cls):
"""
Things that should be done only once to the output directory AFTER
rendering occurs (i.e. writing end of config file, setting up
permissions, calling an external "deploy" method, finalizing an
atomic deploy, etc.)
Management command calls this once after iterating over all
renderer instances.
"""
pass
def get_paths(self):
""" Override this in a subclass to define the URLs to process """
raise NotImplementedError
@property
def paths(self):
""" Property that memoizes get_paths. """
p = getattr(self, "_paths", None)
if not p:
p = self.get_paths()
self._paths = p
return p
def render_path(self, path=None, view=None):
raise NotImplementedError
def generate(self):
for path in self.paths:
self.render_path(path)
| {
"repo_name": "mtigas/django-medusa",
"path": "django_medusa/renderers/base.py",
"copies": "4",
"size": "2156",
"license": "mit",
"hash": 6490160948408028000,
"line_mean": 30.2463768116,
"line_max": 75,
"alpha_frac": 0.6321892393,
"autogenerated": false,
"ratio": 4.346774193548387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6978963432848388,
"avg_score": null,
"num_lines": null
} |
#All community-made commands should go here. Write your command
#and make a pull request, and I'll try to implement it. I'll
#provide some examples here. Otherwise check out atbCommands.py
import requests
import time
import sys
import datetime
import csv
import random
import re
import json
import traceback
import os
import psutil
import telegram
from .. import atbSendFunctions as atbSendFunctions
from .. import atbMiscFunctions as atbMiscFunctions
from pydblite import Base #The PyDbLite stuff
import builtins
#If you make your own python files for processing data, put them
#In the community folder and import them here:
####
chatInstanceArray = {}
def process(bot, chat_id, parsedCommand, messageText, currentMessage, update, instanceAge):
def sendText(givenText, replyingMessageID=0, keyboardLayout=[]):
if not chatInstanceArray[chat_id]['adminDisable']:
atbSendFunctions.sendText(bot, chat_id, givenText, replyingMessageID, keyboardLayout)
def sendPhoto(imageName):
atbSendFunctions.sendPhoto(bot, chat_id, "images/" + imageName)
def sendSticker(stickerName):
atbSendFunctions.sendSticker(bot, chat_id, "stickers/" + stickerName)
def passSpamCheck():
return atbMiscFunctions.spamCheck(chat_id, currentMessage.date)
try:
chatInstanceArray[chat_id]['checking'] = True
except Exception:
chatInstanceArray[chat_id] = {'checking': True, 'adminDisable': False, 'spamTimestamp': 0, 'shottyTimestamp': 0, 'shottyWinner': "", 'checkingVehicles': False, 'whoArray': []}
try:
#commands go here, in this if-elif block. Python doesn't have switch statements.
if parsedCommand == "/mom": #sends "MOM GET THE CAMERA"
sendText("MOM GET THE CAMERA")
elif atbMiscFunctions.isMoom(parsedCommand): #sends M {random number of Os} M
if passSpamCheck(): #use this to prevent spamming of a command
response = "M"
for i in range(0, random.randint(3, 75)):
response += "O"
sendText(response + "M")
elif parsedCommand == "/swag":
sendText("swiggity swag, what\'s in the bag?")
elif parsedCommand == "/worms":
if passSpamCheck():
response = "hey man can I borrow your "
if len(messageText) > len("/worms "):
response += messageText[len("/worms "):]
else:
response += "worms"
sendText(response)
elif parsedCommand == "/shh" or parsedCommand == "/shhh":
if passSpamCheck():
sendPhoto("shhh.jpg")
elif parsedCommand == "/father":
if (random.randint(0, 1)):
sendText("You ARE the father!")
else:
sendText("You are NOT the father!")
elif parsedCommand == "/rip": #sends "I can't believe that [name (defaults to sender's name)] is fucking dead."
if passSpamCheck():
response = "I can't believe that "
if len(messageText) > len("/rip "):
if (messageText[len("/rip "):] == "me"):
response += currentMessage.from_user.first_name
else:
response += messageText[len("/rip "):]
else:
response += currentMessage.from_user.first_name
response += " is fucking dead."
sendText(response)
elif parsedCommand == "/scrub":
checkingStats = False
try:
if currentMessage.text.lower().split()[1] == "stats":
db = Base('chatStorage/scrub.pdl') #The path to the DB
db.create('username', 'name', 'counter', mode="open")
K = list()
for user in db:
K.append(user)
sortedK = sorted(K, key=lambda x: int(x['counter']), reverse=True)
outputString = "SCRUBBIEST LEADERBOARD:\n"
for user in sortedK:
pluralString = " SCRUB POINT"
if not(int(user['counter']) == 1):
pluralString += "S"
pluralString += "\n"
outputString += user['name'].upper() + ": " + str(user['counter']) + pluralString
sendText(outputString)
checkingStats = True
except IndexError:
pass
if not checkingStats and (currentMessage.from_user.id == 169883788 or currentMessage.from_user.id == 44961843):
db = Base('chatStorage/scrub.pdl')
db.create('username', 'name', 'counter', mode="open")
userWasFound = False
valueSuccessfullyChanged = False
for user in db:
if int(user['username']) == currentMessage.reply_to_message.from_user.id:
db.update(user, counter=int(user['counter']) + 1)
valueSuccessfullyChanged = True
userWasFound = True
db.commit()
if not userWasFound:
db.insert(currentMessage.reply_to_message.from_user.id, currentMessage.reply_to_message.from_user.first_name, 1)
db.commit()
if valueSuccessfullyChanged or not userWasFound:
sendText("Matt Gomez awarded a scrub point to " + currentMessage.reply_to_message.from_user.first_name + ".")
elif not checkingStats:
sendText("AdamTestBot, powered by ScrubSoft (C)")
elif parsedCommand == "/hiss":
checkingStats = False
try:
if currentMessage.text.lower().split()[1] == "stats":
db = Base('chatStorage/hiss.pdl')
db.create('username', 'name', 'counter', mode="open")
K = list()
for user in db:
K.append(user)
sortedK = sorted(K, key=lambda x: int(x['counter']), reverse=True)
outputString = "Hiss Leaderboard:\n"
for user in sortedK:
pluralString = " hiss"
if not(int(user['counter']) == 1):
pluralString += "es"
pluralString += "\n"
outputString += user['name'] + ": " + str(user['counter']) + pluralString
sendText(outputString)
checkingStats = True
except IndexError:
pass
if not checkingStats and (currentMessage.from_user.id == 122526873 or currentMessage.from_user.id == 44961843):
db = Base('chatStorage/hiss.pdl')
db.create('username', 'name', 'counter', mode="open")
userWasFound = False
valueSuccessfullyChanged = False
for user in db:
if int(user['username']) == currentMessage.reply_to_message.from_user.id:
db.update(user, counter=int(user['counter']) + 1)
valueSuccessfullyChanged = True
userWasFound = True
db.commit()
if not userWasFound:
db.insert(currentMessage.reply_to_message.from_user.id, currentMessage.reply_to_message.from_user.first_name, 1)
db.commit()
if valueSuccessfullyChanged or not userWasFound:
sendText("Robyn hissed at " + currentMessage.reply_to_message.from_user.first_name + ".")
elif parsedCommand == "/water":
if passSpamCheck():
if (random.randint(0, 1) == 0):
sendSticker("water.webp")
else:
sendSticker("hoboken_water.webp")
elif parsedCommand == "/sysinfo":
if passSpamCheck():
cpu = []
for x in range(3):
cpu.append(psutil.cpu_percent(interval=1))
cpuavg = round(sum(cpu) / float(len(cpu)), 1)
memuse = psutil.virtual_memory()[2]
diskuse = psutil.disk_usage('/')[3]
sendText("The CPU uasge is " + str(cpuavg) + "%, the memory usage is " + str(memuse) + "%, and " + str(diskuse) + "% of the disk has been used.")
#this command should go last:
elif parsedCommand == "/community": #add your command to this list
response = "/mom - get the camera\n"
response += "/mooom (any number of \'o\'s) - call for help\n"
response += "/swag - more memes\n"
response += "/worms - can I borrow them?\n"
response += "/shh(h) - here, be relaxed\n"
response += "/father - are you the father?\n"
response += "/rip (something) - I can't believe they're dead!\n"
response += "/hiss stats - see how many time Robyn has hissed at people\n"
response += "/scrub or /scrub stats - see who sponsors me or how many times Matt Gomez has called you a scrub\n"
response += "/water - does this water look brown to you?\n"
response += "/sysinfo - Gets server performance info."
sendText(response)
else:
return False
return True
except Exception:
print(traceback.format_exc())
return False
| {
"repo_name": "magomez96/AdamTestBot",
"path": "src/Community/atbCommunity.py",
"copies": "1",
"size": "9617",
"license": "mit",
"hash": -7709301319690334000,
"line_mean": 42.3198198198,
"line_max": 183,
"alpha_frac": 0.5388374753,
"autogenerated": false,
"ratio": 4.295221080839661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.533405855613966,
"avg_score": null,
"num_lines": null
} |
#All community-made commands should go here. Write your command
#and make a pull request, and I'll try to implement it. I'll
#provide some examples here. Otherwise check out atbCommands.py
import requests
import time
import sys
import datetime
import csv
import random
import re
import json
import traceback
import os
import telegram
from threading import Thread
from urllib.request import urlopen
try:
import psutil
except:
pass
from .. import atbSendFunctions as atbSendFunctions
from .. import atbMiscFunctions as atbMiscFunctions
from . import atbQuote
from pydblite import Base #The PyDbLite stuff
import builtins
#If you make your own python files for processing data, put them
#In the community folder and import them here:
####
chatInstanceArray = {}
def process(bot, chat_id, parsedCommand, messageText, currentMessage, update, instanceAge):
def sendText(givenText, replyingMessageID=0, keyboardLayout=[]):
if not chatInstanceArray[chat_id]['adminDisable']:
atbSendFunctions.sendText(bot, chat_id, givenText, replyingMessageID, keyboardLayout)
def sendPhoto(imageName):
atbSendFunctions.sendPhoto(bot, chat_id, "images/" + imageName)
def sendSticker(stickerName):
atbSendFunctions.sendSticker(bot, chat_id, "stickers/" + stickerName)
def sendAudio(audioName):
atbSendFunctions.sendAudio(bot, chat_id, "audio/" + audioName)
def sendVideo(videoName):
atbSendFunctions.sendVideo(bot, chat_id, "videos/" + videoName)
def passSpamCheck(timeDelay=15):
return atbMiscFunctions.spamCheck(chat_id, currentMessage.date, timeDelay)
try:
chatInstanceArray[chat_id]['checking'] = True
except Exception:
chatInstanceArray[chat_id] = {'checking': True, 'adminDisable': False, 'spamTimestamp': 0, 'shottyTimestamp': 0, 'shottyWinner': "", 'checkingVehicles': False, 'whoArray': []}
try:
#commands go here, in this if-elif block. Python doesn't have switch statements.
if parsedCommand == "/mom": #sends "MOM GET THE CAMERA"
sendText("MOM GET THE CAMERA")
elif atbMiscFunctions.isMoom(parsedCommand): #sends M {random number of Os} M
if passSpamCheck(): #use this to prevent spamming of a command
response = "M"
for i in range(0, random.randint(3, 75)):
response += "O"
sendText(response + "M")
elif parsedCommand == "/swag":
sendText("swiggity swag, what\'s in the bag?")
elif parsedCommand == "/worms":
if passSpamCheck():
response = "hey man can I borrow your "
if len(messageText) > len("/worms "):
response += messageText[len("/worms "):]
else:
response += "worms"
sendText(response)
elif parsedCommand == "/shh" or parsedCommand == "/shhh":
if passSpamCheck():
sendPhoto("shhh.jpg")
elif parsedCommand == "/father":
if (random.randint(0, 1)):
sendText("You ARE the father!")
else:
sendText("You are NOT the father!")
elif parsedCommand == "/rip": #sends "I can't believe that [name (defaults to sender's name)] is fucking dead."
if passSpamCheck():
response = "I can't believe that "
while "my " in messageText:
messageText = messageText.replace("my ", currentMessage.from_user.first_name + "\'s ", 1)
if len(messageText) > len("/rip "):
if messageText[len("/rip "):] == "me":
response += currentMessage.from_user.first_name
else:
response += messageText[len("/rip "):]
else:
response += currentMessage.from_user.first_name
response += " is fucking dead."
sendText(response)
elif parsedCommand == "/rips": #sends "I can't believe that [name (defaults to sender's name)] is fucking dead."
if passSpamCheck():
response = "I can't believe that "
while "my " in messageText:
messageText = messageText.replace("my ", currentMessage.from_user.first_name + "\'s ", 1)
if len(messageText) > len("/rip "):
if messageText[len("/rip "):] == "me":
response += currentMessage.from_user.first_name
else:
response += messageText[len("/rip "):]
else:
response += currentMessage.from_user.first_name
response += " are fucking dead."
sendText(response)
elif parsedCommand == "/scrub":
checkingStats = False
try:
if currentMessage.text.lower().split()[1] == "stats":
db = Base('chatStorage/scrub.pdl') #The path to the DB
db.create('username', 'name', 'counter', mode="open")
K = list()
for user in db:
K.append(user)
sortedK = sorted(K, key=lambda x: int(x['counter']), reverse=True)
outputString = "SCRUBBIEST LEADERBOARD:\n"
for user in sortedK:
pluralString = " SCRUB POINT"
if not(int(user['counter']) == 1):
pluralString += "S"
pluralString += "\n"
outputString += user['name'].upper() + ": " + str(user['counter']) + pluralString
sendText(outputString)
checkingStats = True
except IndexError:
pass
if not checkingStats and (currentMessage.from_user.id == 169883788 or currentMessage.from_user.id == 44961843):
db = Base('chatStorage/scrub.pdl')
db.create('username', 'name', 'counter', mode="open")
userWasFound = False
valueSuccessfullyChanged = False
try:
pointsAdded = float(currentMessage.text.lower().split()[1])
except (IndexError, ValueError):
pointsAdded = 1
for user in db:
if int(user['username']) == currentMessage.reply_to_message.from_user.id:
db.update(user, counter=int(user['counter']) + pointsAdded)
valueSuccessfullyChanged = True
userWasFound = True
db.commit()
if not userWasFound:
db.insert(currentMessage.reply_to_message.from_user.id, currentMessage.reply_to_message.from_user.first_name, pointsAdded)
db.commit()
if valueSuccessfullyChanged or not userWasFound:
sendText("Matt Gomez awarded " + str(pointsAdded) + " scrub point(s) to " + currentMessage.reply_to_message.from_user.first_name + ".")
elif not checkingStats:
sendText("AdamTestBot, powered by ScrubSoft (C)")
elif parsedCommand == "/hiss":
checkingStats = False
try:
if currentMessage.text.lower().split()[1] == "stats":
db = Base('chatStorage/hiss.pdl')
db.create('username', 'name', 'counter', mode="open")
K = list()
for user in db:
K.append(user)
sortedK = sorted(K, key=lambda x: int(x['counter']), reverse=True)
outputString = "Hiss Leaderboard:\n"
for user in sortedK:
pluralString = " hiss"
if not(int(user['counter']) == 1):
pluralString += "es"
pluralString += "\n"
outputString += user['name'] + ": " + str(user['counter']) + pluralString
sendText(outputString)
checkingStats = True
except IndexError:
pass
if not checkingStats and (currentMessage.from_user.id == 122526873 or currentMessage.from_user.id == 44961843):
db = Base('chatStorage/hiss.pdl')
db.create('username', 'name', 'counter', mode="open")
userWasFound = False
valueSuccessfullyChanged = False
for user in db:
if int(user['username']) == currentMessage.reply_to_message.from_user.id:
db.update(user, counter=int(user['counter']) + 1)
valueSuccessfullyChanged = True
userWasFound = True
db.commit()
if not userWasFound:
db.insert(currentMessage.reply_to_message.from_user.id, currentMessage.reply_to_message.from_user.first_name, 1)
db.commit()
if valueSuccessfullyChanged or not userWasFound:
sendText("Robyn hissed at " + currentMessage.reply_to_message.from_user.first_name + ".")
elif parsedCommand == "/water":
if passSpamCheck():
if (random.randint(0, 1) == 0):
sendSticker("water.webp")
else:
sendSticker("hoboken_water.webp")
elif parsedCommand == "/sysinfo":
if passSpamCheck():
cpu = []
for x in range(3):
cpu.append(psutil.cpu_percent(interval=1))
cpuavg = round(sum(cpu) / float(len(cpu)), 1)
memuse = psutil.virtual_memory()[2]
diskuse = psutil.disk_usage('/')[3]
sendText("The CPU uasge is " + str(cpuavg) + "%, the memory usage is " + str(memuse) + "%, and " + str(diskuse) + "% of the disk has been used.")
elif parsedCommand == "/grill":
if passSpamCheck():
sendPhoto("grill.jpg")
elif parsedCommand == "/pants":
if passSpamCheck():
sendText("Shit! I can't find my pants.")
elif parsedCommand == "/broken":
if passSpamCheck():
if len(messageText) > len("/broken "):
message = str(currentMessage.from_user.username) + " says: @magomez96 my " + messageText[len("/broken "):] + " is broken!"
else:
message = str(currentMessage.from_user.username) + " says: @magomez96 my shit is broken!"
sendText(message)
elif parsedCommand == "/quote":
if passSpamCheck(5):
try:
sendText(atbQuote.getQuoteAt(chat_id, int(messageText.split()[1])))
except:
sendText(atbQuote.getQuote(chat_id))
elif parsedCommand == "/quotefrom":
print("\n" + messageText[len("/quotefrom "):])
if passSpamCheck(5):
sendText(atbQuote.getQuoteFrom(chat_id, messageText[len("/quotefrom "):]))
elif parsedCommand == "/quoteremove":
if currentMessage.from_user.username == "AdamZG" or currentMessage.from_user.username == "magomez96" or currentMessage.from_user.username == "Peribot":
if atbQuote.quoteRemove(chat_id, int(messageText.split()[1])):
sendText("Quote " + messageText.split()[1] + " removed")
else:
sendText("That quote doesn't exist or you never added any quotes")
elif parsedCommand == "/quoteadd":
if currentMessage.reply_to_message == None and messageText == "/quoteadd":
sendText("Try replying to a message with this command to add it to the quote list")
else:
userLastName = ""
try:
userLastName = " " + currentMessage.from_user.last_name
except:
pass
try:
replyUserLastName = ""
try:
replyUserLastName = " " + currentMessage.reply_to_message.from_user.last_name
except:
replyUserLastName = ""
quote_resp = atbQuote.quoteAdd(chat_id, '"' + currentMessage.reply_to_message.text + '"', (currentMessage.reply_to_message.from_user.first_name + replyUserLastName).strip())
sendText(quote_resp)
except(Exception):
quoteParse = currentMessage.text.split("-")
quote = "-".join(quoteParse[:-1])
quote = quote[len("/quoteadd "):].strip()
quote_resp = atbQuote.quoteAdd(chat_id, quote, quoteParse[-1].strip(), (currentMessage.from_user.first_name + userLastName).strip())
sendText(quote_resp)
elif parsedCommand == "/quotelegacy":
if passSpamCheck(5):
sendText(atbQuote.getQuoteLegacy(chat_id))
elif parsedCommand == "/pogo":
def getPokeInfo():
start = time.time()
try:
nf = urlopen("https://pgorelease.nianticlabs.com/plfe/", timeout = 3)
page = nf.read()
end = time.time()
nf.close()
except TimeoutError:
end=time.time()
rTime = round((end - start) * 1000)
if (rTime < 800):
sendText("Pokémon GO is UP\n{}ms Response Time".format(rTime))
elif (rTime >= 800 and rTime < 3000):
sendText("Pokémon GO's servers are struggling\n{}ms Response Time".format(rTime))
elif (rTime >= 3000):
sendText("Pokémon GO is DOWN\n{}ms Response Time".format(rTime))
except Exception as e:
sendText("Pokémon GO's servers are really not doing well\nHere's what I got back\n" + e.__str__())
myThread = Thread(target=getPokeInfo)
myThread.start()
elif parsedCommand == "/discourse":
if passSpamCheck():
if (random.randint(0, 1) == 0):
sendPhoto("discourse.jpg")
else:
sendText("http://thediscour.se")
#this command should go last:
elif parsedCommand == "/community": #add your command to this list
response = "/mom - get the camera\n"
response += "/mooom (any number of \'o\'s) - call for help\n"
response += "/swag - more memes\n"
response += "/worms - can I borrow them?\n"
response += "/shh(h) - here, be relaxed\n"
response += "/father - are you the father?\n"
response += "/rip(s) (something) - I can't believe they're dead! (The s is for plural dead things)\n"
response += "/hiss stats - see how many time Robyn has hissed at people\n"
response += "/scrub or /scrub stats - see who sponsors me or how many times Matt Gomez has called you a scrub\n"
response += "/water - does this water look brown to you?\n"
response += "/sysinfo - Gets server performance info.\n"
response += "/grill - I'm a George Foreman grill!\n"
response += "/pants - Pants?\n"
response += "/broken - Tell Matt Gomez your stuff is broken.\n"
response += "/quote - Pulls a random quote from a list. Reply to a message with /quoteadd to add one!\n"
response += "/pogo - Want to know if Pokémon GO's servers are up?\n"
response += "/discourse - Break in case of spicy discourse"
sendText(response)
else:
return False
return True
except Exception:
print(traceback.format_exc())
return False
| {
"repo_name": "noisemaster/AdamTestBot",
"path": "src/Community/atbCommunity.py",
"copies": "1",
"size": "16512",
"license": "mit",
"hash": 6134199270723779000,
"line_mean": 43.7257617729,
"line_max": 193,
"alpha_frac": 0.5209910947,
"autogenerated": false,
"ratio": 4.368086795448531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030443557495928165,
"num_lines": 361
} |
__all__ = ['compare_datasets']
import pandas as pd
import numpy as np
SUFFIX_DF1 = '_df1'
SUFFIX_DF2 = '_df2'
def equals_condition(df1_columns_to_compare, df2_columns_to_compare):
condition = []
for col_x, col_y in zip(df1_columns_to_compare, df2_columns_to_compare):
condition.append(col_x + ' == ' + col_y)
return ' and '.join(condition)
def not_exists_condition(df_columns_to_compare):
condition = []
for col in df_columns_to_compare:
condition.append(col)
return ' + '.join(condition) + ' != ' + ' + '.join(condition)
def compare_datasets(df1, df2, df1_keys, df2_keys,
df1_columns_to_compare=None,
df2_columns_to_compare=None):
if not df1_columns_to_compare:
df1_columns_to_compare = list(column for column in df1.columns.difference(df1_keys))
if not df2_columns_to_compare:
df2_columns_to_compare = list(column for column in df2.columns.difference(df2_keys))
for column in df1_columns_to_compare:
if column in df2_columns_to_compare:
df1_columns_to_compare[df1_columns_to_compare.index(column)] = column + SUFFIX_DF1
df2_columns_to_compare[df2_columns_to_compare.index(column)] = column + SUFFIX_DF2
df_result = pd.merge(df1, df2, how='outer', indicator=True,
suffixes=(SUFFIX_DF1, SUFFIX_DF2),
left_on=df1_keys, right_on=df2_keys)
df_result.eval('equals = ' + equals_condition(df1_columns_to_compare,
df2_columns_to_compare),
inplace=True)
df_result['_merge'] = np.where(df_result['equals'],
'equals',
df_result['_merge'])
df_result.drop(labels='equals', axis=1, inplace=True)
df_result.rename(columns={'_merge': 'result'}, inplace=True)
return df_result
| {
"repo_name": "marcusrehm/data-compare",
"path": "datacompare/__init__.py",
"copies": "1",
"size": "1929",
"license": "mit",
"hash": 1651145317202563300,
"line_mean": 36.8235294118,
"line_max": 94,
"alpha_frac": 0.5914981856,
"autogenerated": false,
"ratio": 3.3664921465968587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4457990332196859,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Composer', 'ComposerError']
from .error import MarkedError
from .events import * # NOQA
from .nodes import * # NOQA
class ComposerError(MarkedError):
pass
class Composer:
def __init__(self):
pass
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
return node
def compose_node(self, parent, index):
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node()
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node()
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node()
self.ascend_resolver()
return node
def compose_scalar_node(self):
event = self.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolve(ScalarNode, event.value, event.implicit, event.start_mark)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
return node
def compose_sequence_node(self):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| {
"repo_name": "wezhang/vim-setup",
"path": "bundle/powerline/powerline/lint/markedjson/composer.py",
"copies": "3",
"size": "3315",
"license": "apache-2.0",
"hash": -3205551130425678300,
"line_mean": 27.3333333333,
"line_max": 80,
"alpha_frac": 0.6995475113,
"autogenerated": false,
"ratio": 3.0779944289693595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.527754194026936,
"avg_score": null,
"num_lines": null
} |
__all__ = ["compute_postscript", "format_dt"]
import datetime
import random
import typing as T
MatcherValue = T.Optional[T.Union[range, int]]
Matcher = T.Tuple[MatcherValue, MatcherValue, MatcherValue, MatcherValue]
POSTSCRIPTS: T.Dict[Matcher, T.Union[str, T.List[str]]] = {
# New Year's Day
(1, 1, 0, range(31)): "\N{face with party horn and party hat} \N{party popper}",
# First day of the month
(None, 1, None, None): "\N{spiral calendar pad}\N{variation selector-16}",
# Halloween
(10, 31, None, None): "\N{jack-o-lantern}",
# Valentine's Day
(2, 14, None, None): "\N{two hearts}",
# Earth Day
(4, 22, None, None): [
"\N{earth globe americas}",
"\N{earth globe europe-africa}",
"\N{earth globe asia-australia}",
],
}
def compute_postscript(dt: datetime.datetime) -> T.Optional[str]:
def _match(value: int, matcher: MatcherValue) -> bool:
if matcher is None:
# `None` means we don't care about the value, so always match.
return True
if isinstance(matcher, range):
return value in matcher
return value == matcher
postscript = next(
(
postscript
for ((month, day, hour, minute), postscript) in POSTSCRIPTS.items()
if _match(dt.month, month)
and _match(dt.day, day)
and _match(dt.hour, hour)
and _match(dt.minute, minute)
),
None,
)
if postscript is None:
return None
if isinstance(postscript, list):
return random.choice(postscript)
return postscript
def format_dt(
dt: datetime.datetime,
*,
shorten: bool = True,
time_only: bool = False,
include_postscript: bool = True,
) -> str:
# If `shorten` is `True`, we can omit the 12-hour representation
# before noon as it is redundant (they are the same under both clock
# representation systems).
if dt.hour < 12 and shorten:
time_format = "%H:%M"
else:
time_format = "%H:%M (%I:%M %p)"
if time_only:
full_format = time_format
else:
# Manually interpolate the day to avoid leading zeroes.
date_format = f"%B {dt.day}"
if dt.month == 1 and dt.day == 1:
# Show the year on the first day of the year.
date_format += ", %Y"
full_format = f"{date_format} {time_format}"
formatted = dt.strftime(full_format)
if include_postscript and (postscript := compute_postscript(dt)) is not None:
return f"{formatted} {postscript}"
return formatted
| {
"repo_name": "slice/dogbot",
"path": "dog/ext/time/formatting.py",
"copies": "2",
"size": "2611",
"license": "mit",
"hash": -569603805019495940,
"line_mean": 28.0111111111,
"line_max": 84,
"alpha_frac": 0.5901953275,
"autogenerated": false,
"ratio": 3.5094086021505375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00026622030926191616,
"num_lines": 90
} |
__all__ = ('config', 'parse', 'ENGINES', 'data_dir')
import os
import re
import json
try:
from urllib.parse import urlparse # Python 3
except ImportError:
from urlparse import urlparse # Python 2
from dj_paas_env import provider
ENGINES = {
'postgres': 'django.db.backends.postgresql_psycopg2',
'postgresql': 'django.db.backends.postgresql_psycopg2',
'pgsql': 'django.db.backends.postgresql_psycopg2',
'mysql': 'django.db.backends.mysql',
'sqlite': 'django.db.backends.sqlite3',
}
re_keys = [r'.*DATABASE_URL', r'HEROKU_POSTGRESQL_.+_URL',
r'OPENSHIFT_.+_DB_URL', 'DOTCLOUD_.+_.*SQL_URL']
re_keys = list(map(re.compile, re_keys))
def config(default=None, engine=None):
provider_detected = provider.detect()
url = None
if provider_detected == provider.DOTCLOUD:
with open('/home/dotcloud/environment.json', 'r') as f:
os.environ.update(json.load(f))
for key in os.environ:
for re_key in re_keys:
if re_key.match(key):
url = os.environ[key]
break
if not url:
return parse(default, engine)
conf = parse(url, engine)
if provider_detected == provider.OPENSHIFT:
if 'OPENSHIFT_POSTGRESQL_DB_URL' in os.environ:
conf['NAME'] = os.environ['PGDATABASE']
elif 'OPENSHIFT_MYSQL_DB_URL' in os.environ:
conf['NAME'] = os.environ['OPENSHIFT_APP_NAME']
return conf
def parse(url, engine=None):
if url in ('sqlite://:memory:', 'sqlite://'):
return {
'ENGINE': ENGINES['sqlite'],
'NAME': ':memory:'
}
url = urlparse(url)
return {
'ENGINE': engine if engine else ENGINES[url.scheme],
'NAME': url.path[1:].split('?', 2)[0],
'USER': url.username or '',
'PASSWORD': url.password or '',
'HOST': url.hostname,
'PORT': url.port or ''
}
def sqlite_dev():
return 'sqlite:///' + os.path.join(data_dir(), 'database.sqlite3')
def data_dir(default='data'):
"""
Return persistent data directory or ``default`` if not found
Warning: Do not use this directory to store sqlite databases in producction
"""
if 'OPENSHIFT_DATA_DIR' in os.environ:
return os.environ['OPENSHIFT_DATA_DIR']
if 'GONDOR_DATA_DIR' in os.environ:
return os.environ['GONDOR_DATA_DIR']
if provider.detect() == provider.DOTCLOUD:
return os.path.expanduser('~/data')
return default
| {
"repo_name": "pbacterio/dj-paas-env",
"path": "dj_paas_env/database.py",
"copies": "1",
"size": "2488",
"license": "mit",
"hash": 5426331667121059000,
"line_mean": 29.7160493827,
"line_max": 79,
"alpha_frac": 0.606511254,
"autogenerated": false,
"ratio": 3.445983379501385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4552494633501385,
"avg_score": null,
"num_lines": null
} |
# All configuration for the CT monitor is done from this file!
# interval (in seconds) between updates
INTERVAL = 60
# Directories for various output files
OUTPUT_DIR = "output/"
# Output file for certificate data.
# Set to None to disable textfile writing
DEFAULT_CERT_FILE = None
# DEFAULT_CERT_FILE = OUTPUT_DIR + "cert_data.json"
# Set to None to disable database writing
# DOMAINS_FILE = OUTPUT_DIR + "domains.json"
DOMAINS_FILE = None
ISSUERS_FILE = DOMAINS_FILE = OUTPUT_DIR + "issuers.log"
# Set to None to disable database output
# DB_PATH = './tmpdb/'
DB_PATH = None
MONITORED_DOMAINS = [
"*.liu.se",
"*.kth.se",
"*.nordu.net",
"*.sunet.se",
"*.dfri.se",
"*.iis.se",
]
MONITORED_ISSUERS = [
"Let's Encrypt",
]
# Some strings
ERROR_STR = "ERROR: "
# CT logs and associated keys
CTLOGS = [
{"name" : "pilot",
"url" : "https://ct.googleapis.com/pilot/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEfahLEimAoz2t01p3uMziiLOl/fHTDM0YDOhBRuiBARsV4UvxG2LdNgoIGLrtCzWE0J5APC2em4JlvR8EEEFMoA==",
"id" : "pLkJkLQYWBSHuxOizGdwCjw1mAT5G9+443fNDsgN3BA=",
"build" : False},
{"name" : "plausible",
"url" : "https://plausible.ct.nordu.net/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9UV9+jO2MCTzkabodO2F7LM03MUBc8MrdAtkcW6v6GA9taTTw9QJqofm0BbdAsbtJL/unyEf0zIkRgXjjzaYqQ==",
"id" : "qucLfzy41WbIbC8Wl5yfRF9pqw60U1WJsvd6AwEE880=",
"build" : True},
{"name" : "digicert",
"url" : "https://ct1.digicert-ct.com/log/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEAkbFvhu7gkAW6MHSrBlpE1n4+HCFRkC5OLAjgqhkTH+/uzSfSl8ois8ZxAD2NgaTZe1M9akhYlrYkes4JECs6A==",
"id" : "VhQGmi/XwuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0=",
"build" : False},
{"name" : "izenpe",
"url" : "https://ct.izenpe.com/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEJ2Q5DC3cUBj4IQCiDu0s6j51up+TZAkAEcQRF6tczw90rLWXkJMAW7jr9yc92bIKgV8vDXU4lDeZHvYHduDuvg==",
"id" : "dGG0oJz7PUHXUVlXWy52SaRFqNJ3CbDMVkpkgrfrQaM=",
"build" : False},
{"name" : "certly",
"url" : "https://log.certly.io/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAECyPLhWKYYUgEc+tUXfPQB4wtGS2MNvXrjwFCCnyYJifBtd2Sk7Cu+Js9DNhMTh35FftHaHu6ZrclnNBKwmbbSA==",
"id" : "zbUXm3/BwEb+6jETaj+PAC5hgvr4iW/syLL1tatgSQA=",
"build" : False},
{"name" : "aviator",
"url" : "https://ct.googleapis.com/aviator/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1/TMabLkDpCjiupacAlP7xNi0I1JYP8bQFAHDG1xhtolSY1l4QgNRzRrvSe8liE+NPWHdjGxfx3JhTsN9x8/6Q==",
"id" : "aPaY+B9kgr46jO65KB1M/HFRXWeT1ETRCmesu09P+8Q=",
"build" : False},
{"name" : "rocketeer",
"url" : "https://ct.googleapis.com/rocketeer/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIFsYyDzBi7MxCAC/oJBXK7dHjG+1aLCOkHjpoHPqTyghLpzA9BYbqvnV16mAw04vUjyYASVGJCUoI3ctBcJAeg==",
"id": "7ku9t3XOYLrhQmkfq+GeZqMPfl+wctiDAMR7iXqo/cs=",
"build" : False},
{"name" : "symantec",
"url" : "https://ct.ws.symantec.com/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEluqsHEYMG1XcDfy1lCdGV0JwOmkY4r87xNuroPS2bMBTP01CEDPwWJePa75y9CrsHEKqAy8afig1dpkIPSEUhg==",
"id" : "3esdK3oNT6Ygi4GtgWhwfi6OnQHVXIiNPRHEzbbsvsw=",
"build" : False},
{"name" : "venafi",
"url" : "https://ctlog.api.venafi.com/",
"key" : "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAolpIHxdSlTXLo1s6H1OCdpSj/4DyHDc8wLG9wVmLqy1lk9fz4ATVmm+/1iN2Nk8jmctUKK2MFUtlWXZBSpym97M7frGlSaQXUWyA3CqQUEuIJOmlEjKTBEiQAvpfDjCHjlV2Be4qTM6jamkJbiWtgnYPhJL6ONaGTiSPm7Byy57iaz/hbckldSOIoRhYBiMzeNoA0DiRZ9KmfSeXZ1rB8y8X5urSW+iBzf2SaOfzBvDpcoTuAaWx2DPazoOl28fP1hZ+kHUYvxbcMjttjauCFx+JII0dmuZNIwjfeG/GBb9frpSX219k1O4Wi6OEbHEr8at/XQ0y7gTikOxBn/s5wQIDAQAB",
"id" : "rDua7X+pZ0dXFZ5tfVdWcvnZgQCUHpve/+yhMTt1eC0=",
"build" : False},
{"name" : "wosign",
"url" : "https://ct.wosign.com/",
"key" : "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1+wvK3VPN7yjQ7qLZWY8fWrlDCqmwuUm/gx9TnzwOrzi0yLcAdAfbkOcXG6DrZwV9sSNYLUdu6NiaX7rp6oBmw==",
"id" : "nk/3PcPOIgtpIXyJnkaAdqv414Y21cz8haMadWKLqIs=",
"build" : False},
]
| {
"repo_name": "gizmachi/ct_tools",
"path": "monitor_conf.py",
"copies": "1",
"size": "4038",
"license": "bsd-3-clause",
"hash": -5014394728623999000,
"line_mean": 37.4571428571,
"line_max": 407,
"alpha_frac": 0.7288261516,
"autogenerated": false,
"ratio": 1.8522935779816514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3081119729581651,
"avg_score": null,
"num_lines": null
} |
# all configuration settings come from config.py
try:
import config
except ImportError:
print("Please copy template-config.py to config.py and configure appropriately !"); exit();
# this can be used to activate debugging
# debug_communication=1
debug_communication=0
try:
# for Python2
from Tkinter import *
except ImportError:
# for Python3
from tkinter import *
import json
import time
import urllib3
def send_to_hcp():
global debug_communication
global http
global url
global headers
global s1
timestamp=int(time.time())
# print(timestamp)
body='{"mode":"async", "messageType":"' + str(config.message_type_id_From_device) + '", "messages":[{"sensor":"slider_desktop", "value":"' + str(s1.get()) + '", "timestamp":' + str(timestamp) + '}]}'
# print(body)
r = http.urlopen('POST', url, body=body, headers=headers)
if (debug_communication == 1):
print("send_to_hcp():" + str(r.status))
print(r.data)
def poll_from_hcp():
global debug_communication
global http
global url
global headers
global t1
global f4_cb1
r = http.urlopen('GET', url, headers=headers)
if (debug_communication == 1):
print("poll_from_hcp():" + str(r.status))
print(r.data)
json_string='{"all_messages":'+(r.data).decode("utf-8")+'}'
# print(json_string)
try:
json_string_parsed=json.loads(json_string)
# print(json_string_parsed)
# take care: if multiple messages arrive in 1 payload - their order is last in / first out - so we need to traverse in reverese order
try:
messages_reversed=reversed(json_string_parsed["all_messages"])
for single_message in messages_reversed:
# print(single_message)
payload=single_message["messages"][0]
opcode=payload["opcode"]
operand=payload["operand"]
# print(opcode)
# print(operand)
# now do things depending on the opcode
if (opcode == "display"):
t1.config(state=NORMAL)
t1.delete(1.0, END)
t1.insert(END, operand)
t1.config(state=DISABLED)
if (opcode == "led"):
f4_cb1.config(state=NORMAL)
if (operand == "0"):
f4_cb1.deselect()
if (operand == "1"):
f4_cb1.select()
f4_cb1.config(state=DISABLED)
except TypeError:
print("Problem decoding the message " + (r.data).decode("utf-8") + " retrieved with poll_from_hcp()! Can and will continue though.")
except ValueError:
print("Problem decoding the message " + (r.data).decode("utf-8") + " retrieved with poll_from_hcp()! Can and will continue though.")
def handle_slider(event):
global do_send
global s1
global cb1
value=s1.get()
if (value == 100):
do_send=1
cb1.config(state=NORMAL)
cb1.select()
cb1.config(state=DISABLED)
# print("Start sending now !")
if (value == 0):
do_send=0
# print("Stop sending now !")
cb1.config(state=NORMAL)
cb1.deselect()
cb1.config(state=DISABLED)
# print("slider value: ", value)
def handle_exit_button():
exit()
def my_send_timer():
global root
global do_send
# print("my_send_timer")
if (do_send == 1):
send_to_hcp()
root.after(1000, my_send_timer)
def my_poll_timer():
global root
# print("my_poll_timer")
poll_from_hcp()
root.after(1000, my_poll_timer)
def build_and_start_ui_with_timers():
global root
global s1
global cb1
global f4_cb1
global t1
root=Tk()
root.resizable(FALSE,FALSE)
root.title("IoT Starterkit - Device Simulator")
root.geometry('+50+50')
l1=Label(root, text="Data that the device sends", font = "TkDefaultFont 14 bold")
l1.pack()
l2=Label(root, text="Slide to 100 to start sending values once per second, slide to 0 to stop sending")
l2.pack()
s1=Scale(root, from_=0, to=100, orient=HORIZONTAL, command = handle_slider)
s1.configure(length=500)
s1.pack()
cb1=Checkbutton(root, text="Sending now", state=DISABLED)
cb1.pack()
f1=Frame(root, height=3, width=500)
f1.pack()
f2=Frame(root, height=1, width=500, bg="black")
f2.pack()
f3=Frame(root, height=3, width=500)
f3.pack()
l3=Label(root, text="Data that the device receives", font = "TkDefaultFont 14 bold")
l3.pack()
f4=Frame(root, width=500)
f4.pack()
f4.l1=Label(f4, text="Remote controlled LED (on/off)")
f4.l1.pack(side=LEFT)
f4_cb1=Checkbutton(f4, state=DISABLED)
f4_cb1.pack(side=LEFT)
l4=Label(root, text="Messages sent to the device")
l4.pack()
t1=Text(root, height=10, width=70, borderwidth=2, relief=SUNKEN, state=DISABLED)
# t1=Text(root, height=10, width=50, borderwidth=2)
t1.pack()
t1.config(state=NORMAL)
t1.insert(END, "Nothing received yet")
t1.config(state=DISABLED)
b1=Button(root, text="Exit", command=handle_exit_button)
b1.pack()
my_send_timer()
my_poll_timer()
root.mainloop()
# === main starts here ===============================================
# disable InsecureRequestWarning if your are working without certificate verification
# see https://urllib3.readthedocs.org/en/latest/security.html
# be sure to use a recent enough urllib3 version if this fails
try:
urllib3.disable_warnings()
except:
print("urllib3.disable_warnings() failed - get a recent enough urllib3 version to avoid potential InsecureRequestWarning warnings! Can and will continue though.")
# use with or without proxy
if (config.proxy_url == ''):
http = urllib3.PoolManager()
else:
http = urllib3.proxy_from_url(config.proxy_url)
url='https://iotmms' + config.hcp_account_id + config.hcp_landscape_host + '/com.sap.iotservices.mms/v1/api/http/data/' + str(config.device_id)
headers = urllib3.util.make_headers(user_agent=None)
# use with authentication
headers['Authorization'] = 'Bearer ' + config.oauth_credentials_for_device
headers['Content-Type'] = 'application/json;charset=utf-8'
do_send=0
build_and_start_ui_with_timers()
| {
"repo_name": "iwonahahn/iot-starterkit",
"path": "neo/examples/python/iot-starterkit-for-desktop/iot_starterkit_desktop.py",
"copies": "3",
"size": "5718",
"license": "apache-2.0",
"hash": -1024619214055302700,
"line_mean": 26.0995260664,
"line_max": 200,
"alpha_frac": 0.6843301854,
"autogenerated": false,
"ratio": 2.90253807106599,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5086868256465991,
"avg_score": null,
"num_lines": null
} |
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
project = u'pygorithm'
version = release = u'1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
latex_documents = [
('index', 'pygorithm.tex', u"Pygorithm",
u'Omkar Pathak', 'manual'),
]
# Auto-Doc options
autodoc_member_order = 'bysource' # alternatively 'alphabetical' (default) or 'groupwise'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pygorithm', u"Pygorithm",
[u'Omkar Pathak'], 1)
]
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pygorithm', u'Pygorithm documentation',
u'Omkar Pathak', 'pygorithm documentation', 'One line description of project.',
'Miscellaneous'),
]
| {
"repo_name": "OmkarPathak/pygorithm",
"path": "docs/conf.py",
"copies": "1",
"size": "2070",
"license": "mit",
"hash": 4564602931861495300,
"line_mean": 30.3636363636,
"line_max": 89,
"alpha_frac": 0.6637681159,
"autogenerated": false,
"ratio": 3.8121546961325965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4975922812032596,
"avg_score": null,
"num_lines": null
} |
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PCL'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = { 'rightsidebar' : 'true' }
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Home'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
html_sidebars = {
'**': [],
'using/windows': [],
}
html_show_copyright = False
html_show_sphinx = False
html_add_permalinks = None
needs_sphinx = 1.0
file_insertion_enabled = True
raw_enabled = True
| {
"repo_name": "otherlab/pcl",
"path": "doc/overview/content/conf.py",
"copies": "19",
"size": "4367",
"license": "bsd-3-clause",
"hash": -7510533759598691000,
"line_mean": 31.3481481481,
"line_max": 80,
"alpha_frac": 0.7137623082,
"autogenerated": false,
"ratio": 3.777681660899654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011048088052019638,
"num_lines": 135
} |
__all__ = ['ConsoleReactor']
import sys
from netrepr import NetRepr, RemoteObjectPool, RemoteObjectReference
from Foundation import *
class ConsoleReactor(NSObject):
def init(self):
self = super(ConsoleReactor, self).init()
self.pool = None
self.netReprCenter = None
self.connection = None
self.commands = {}
return self
def connectionEstablished_(self, connection):
#NSLog(u'connectionEstablished_')
self.connection = connection
self.pool = RemoteObjectPool(self.writeCode_)
self.netReprCenter = NetRepr(self.pool)
def connectionClosed_(self, connection):
#NSLog(u'connectionClosed_')
self.connection = None
self.pool = None
self.netReprCenter = None
def writeCode_(self, code):
#NSLog(u'writeCode_')
self.connection.writeBytes_(repr(code) + '\n')
def netEval_(self, s):
#NSLog(u'netEval_')
return eval(s, self.pool.namespace, self.pool.namespace)
def lineReceived_fromConnection_(self, lineReceived, connection):
#NSLog(u'lineReceived_fromConnection_')
code = lineReceived.rstrip()
if not code:
return
self.pool.push()
command = map(self.netEval_, eval(code))
try:
self.handleCommand_(command)
finally:
self.pool.pop()
def handleCommand_(self, command):
#NSLog(u'handleCommand_')
basic = command[0]
sel = 'handle%sCommand:' % (basic.capitalize())
cmd = command[1:]
if not self.respondsToSelector_(sel):
NSLog(u'%r does not respond to %s', self, command)
else:
# XXX - this crashes PyObjC??
# self.performSelector_withObject_(sel, cmd)
getattr(self, sel.replace(':', '_'))(cmd)
def handleRespondCommand_(self, command):
self.doCallback_sequence_args_(
self.commands.pop(command[0]),
command[0],
map(self.netEval_, command[1:]),
)
def sendResult_sequence_(self, rval, seq):
nr = self.netReprCenter
code = '__result__[%r] = %s' % (seq, nr.netrepr(rval))
self.writeCode_(code)
def sendException_sequence_(self, e):
nr = self.netReprCenter
code = 'raise ' + nr.netrepr_exception(e)
print "forwarding:", code
self.writeCode_(code)
def doCallback_sequence_args_(self, callback, seq, args):
nr = self.netReprCenter
try:
rval = callback(*args)
except Exception, e:
self.sendException_sequence_(e, seq)
else:
self.sendResult_sequence_(rval, seq)
def deferCallback_sequence_value_(self, callback, seq, value):
self.commands[seq] = callback
self.writeCode_('pipe.respond(%r, netrepr(%s))' % (seq, value))
def handleExpectCommand_(self, command):
#NSLog(u'handleExpectCommand_')
seq = command[0]
name = command[1]
args = command[2:]
netrepr = self.netReprCenter.netrepr
rval = None
code = None
if name == 'RemoteConsole.raw_input':
self.doCallback_sequence_args_(raw_input, seq, args)
elif name == 'RemoteConsole.write':
self.doCallback_sequence_args_(sys.stdout.write, seq, args)
elif name == 'RemoteConsole.displayhook':
obj = args[0]
def displayhook_respond(reprobject):
print reprobject
def displayhook_local(obj):
if obj is not None:
displayhook_respond(repr(obj))
if isinstance(obj, RemoteObjectReference):
self.deferCallback_sequence_value_(displayhook_respond, seq, 'repr(%s)' % (netrepr(obj),))
else:
self.doCallback_sequence_args_(displayhook_local, seq, args)
elif name.startswith('RemoteFileLike.'):
fh = getattr(sys, args[0])
meth = getattr(fh, name[len('RemoteFileLike.'):])
self.doCallback_sequence_args_(meth, seq, args[1:])
elif name == 'RemoteConsole.initialize':
self.doCallback_sequence_args_(lambda *args:None, seq, args)
else:
self.doCallback_sequence_args_(NSLog, seq, [u'%r does not respond to expect %r', self, command,])
def close(self):
if self.connection is not None:
self.writeCode_('raise SystemExit')
self.pool = None
self.netReprCenter = None
self.connection = None
self.commands = None
| {
"repo_name": "ariabuckles/pyobjc-core",
"path": "Examples/NonFunctional/RemotePyInterpreter/ConsoleReactor.py",
"copies": "2",
"size": "4588",
"license": "mit",
"hash": -2420811932974356000,
"line_mean": 34.2923076923,
"line_max": 109,
"alpha_frac": 0.5876198779,
"autogenerated": false,
"ratio": 4.006986899563318,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5594606777463318,
"avg_score": null,
"num_lines": null
} |
"""All constants/enumerations are available directly from `mal_scraper.x`"""
from collections import namedtuple
from enum import Enum, unique
Retrieved = namedtuple('Retrieved', ['meta', 'data'])
"""When successfully retrieving from a web-page
.. py:attribute:: meta
A dict of metadata::
{
'id_ref': (object) ID of the media depending on the context,
'when': (datetime) Our best guess on the date of this information,
}
.. py:attribute:: data
A dict of data varying on the media.
"""
@unique
class ConsumptionStatus(Enum):
"""A person's status on a media item, e.g. are they currently watching it?"""
consuming = 'CONSUMING'
completed = 'COMPLETED'
on_hold = 'ONHOLD'
dropped = 'DROPPED'
backlog = 'BACKLOG'
@classmethod
def mal_code_to_enum(cls, code):
"""Return the enum from the MAL code, or None."""
return {
1: ConsumptionStatus.consuming,
2: ConsumptionStatus.completed,
3: ConsumptionStatus.on_hold,
4: ConsumptionStatus.dropped,
6: ConsumptionStatus.backlog,
}.get(code)
@unique
class AiringStatus(Enum):
"""The airing status of a media item."""
pre_air = 'PREAIR' # e.g. https://myanimelist.net/anime/3786
ongoing = 'ONGOING'
finished = 'FINISHED'
@classmethod
def mal_to_enum(cls, text):
return {
'not yet aired': AiringStatus.pre_air,
'finished airing': AiringStatus.finished,
'currently airing': AiringStatus.ongoing,
}.get(text.strip().lower())
class Season(Enum):
"""The season in a year ordered as Winter, Spring, Summer, Autumn."""
# _order_ = 'WINTER SPRING SUMMER AUTUMN' # py3.6? The order in a year
winter = 'WINTER'
spring = 'SPRING'
summer = 'SUMMER'
autumn = fall = 'AUTUMN'
@classmethod
def mal_to_enum(cls, text):
"""Return the enum from the MAL string, or None."""
return {
'winter': cls.winter,
'spring': cls.spring,
'summer': cls.summer,
'fall': cls.autumn,
}.get(text.strip().lower())
class Format(Enum):
"""The media format of a media item."""
tv = 'TV'
film = movie = 'FILM' # https://myanimelist.net/anime/5
ova = 'OVA' # https://myanimelist.net/anime/44
special = 'SPECIAL'
ona = 'ONA' # (Original Net Animation) https://myanimelist.net/anime/574
music = 'MUSIC' # Seriously? https://myanimelist.net/anime/731
unknown = 'UNKNOWN' # https://myanimelist.net/anime/33352
@classmethod
def mal_to_enum(cls, text):
"""Return the enum from the MAL string, or None."""
return {
'tv': cls.tv,
'movie': cls.film,
'ova': cls.ova,
'special': cls.special,
'ona': cls.ona,
'music': cls.music,
'unknown': cls.unknown,
}.get(text.strip().lower())
@unique
class AgeRating(Enum):
"""The age rating of a media item.
MAL Ratings are dubious.
None == Unknown.
Reference: https://myanimelist.net/forum/?topicid=16816
"""
mal_none = 'NONE'
mal_g = 'ALL'
mal_pg = 'CHILDREN'
mal_t = 'TEEN'
mal_r1 = 'RESTRICTEDONE'
mal_r2 = 'RESTRICTEDTWO'
mal_r3 = 'RESTRICTEDTHREE'
@classmethod
def mal_to_enum(cls, text):
"""Return the enum from the MAL string, or None."""
return {
'none': cls.mal_none,
'g': cls.mal_g,
'pg': cls.mal_pg,
'pg-13': cls.mal_t,
'r - 17+': cls.mal_r1,
'r+': cls.mal_r2,
'rx': cls.mal_r3,
}.get(text.strip().lower())
| {
"repo_name": "QasimK/mal-scraper",
"path": "src/mal_scraper/consts.py",
"copies": "1",
"size": "3731",
"license": "mit",
"hash": -4276475426759896600,
"line_mean": 27.0526315789,
"line_max": 81,
"alpha_frac": 0.5719646207,
"autogenerated": false,
"ratio": 3.2359063313096272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4307870952009627,
"avg_score": null,
"num_lines": null
} |
"""All constants related to the ZHA component."""
from __future__ import annotations
import enum
import logging
import bellows.zigbee.application
import voluptuous as vol
from zigpy.config import CONF_DEVICE_PATH # noqa: F401 # pylint: disable=unused-import
import zigpy_cc.zigbee.application
import zigpy_deconz.zigbee.application
import zigpy_xbee.zigbee.application
import zigpy_zigate.zigbee.application
import zigpy_znp.zigbee.application
from homeassistant.components.alarm_control_panel import DOMAIN as ALARM
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.number import DOMAIN as NUMBER
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
import homeassistant.helpers.config_validation as cv
from .typing import CALLABLE_T
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_ATTRIBUTE_ID = "attribute_id"
ATTR_ATTRIBUTE_NAME = "attribute_name"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_IEEE = "device_ieee"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINTS = "endpoints"
ATTR_ENDPOINT_NAMES = "endpoint_names"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_IN_CLUSTERS = "in_clusters"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NEIGHBORS = "neighbors"
ATTR_NODE_DESCRIPTOR = "node_descriptor"
ATTR_NWK = "nwk"
ATTR_OUT_CLUSTERS = "out_clusters"
ATTR_POWER_SOURCE = "power_source"
ATTR_PROFILE_ID = "profile_id"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_UNIQUE_ID = "unique_id"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_ANALOG_INPUT = "analog_input"
CHANNEL_ANALOG_OUTPUT = "analog_output"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_ACE = "ias_ace"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_IDENTIFY = "identify"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SHADE = "shade"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_THERMOSTAT = "thermostat"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
PLATFORMS = (
ALARM,
BINARY_SENSOR,
CLIMATE,
COVER,
DEVICE_TRACKER,
FAN,
LIGHT,
LOCK,
NUMBER,
SENSOR,
SWITCH,
)
CONF_ALARM_MASTER_CODE = "alarm_master_code"
CONF_ALARM_FAILED_TRIES = "alarm_failed_tries"
CONF_ALARM_ARM_REQUIRES_CODE = "alarm_arm_requires_code"
CONF_BAUDRATE = "baudrate"
CONF_CUSTOM_QUIRKS_PATH = "custom_quirks_path"
CONF_DATABASE = "database_path"
CONF_DEFAULT_LIGHT_TRANSITION = "default_light_transition"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_IDENTIFY_ON_JOIN = "enable_identify_on_join"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_FLOWCONTROL = "flow_control"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONF_ZIGPY = "zigpy_config"
CONF_ZHA_OPTIONS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEFAULT_LIGHT_TRANSITION): cv.positive_int,
vol.Required(CONF_ENABLE_IDENTIFY_ON_JOIN, default=True): cv.boolean,
}
)
CONF_ZHA_ALARM_SCHEMA = vol.Schema(
{
vol.Required(CONF_ALARM_MASTER_CODE, default="1234"): cv.string,
vol.Required(CONF_ALARM_FAILED_TRIES, default=3): cv.positive_int,
vol.Required(CONF_ALARM_ARM_REQUIRES_CODE, default=False): cv.boolean,
}
)
CUSTOM_CONFIGURATION = "custom_configuration"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DATA_ZHA_PLATFORM_LOADED = "platform_loaded"
DATA_ZHA_SHUTDOWN_TASK = "zha_shutdown_task"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_CC = "zigpy_cc"
DEBUG_COMP_ZIGPY_ZNP = "zigpy_znp"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_CC: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZNP: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DEVICE_PAIRING_STATUS = "pairing_status"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
PRESET_SCHEDULE = "schedule"
PRESET_COMPLEX = "complex"
ZHA_ALARM_OPTIONS = "zha_alarm_options"
ZHA_OPTIONS = "zha_options"
ZHA_CONFIG_SCHEMAS = {
ZHA_OPTIONS: CONF_ZHA_OPTIONS_SCHEMA,
ZHA_ALARM_OPTIONS: CONF_ZHA_ALARM_SCHEMA,
}
class RadioType(enum.Enum):
"""Possible options for radio type."""
znp = (
"ZNP = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_znp.zigbee.application.ControllerApplication,
)
ezsp = (
"EZSP = Silicon Labs EmberZNet protocol: Elelabs, HUSBZB-1, Telegesis",
bellows.zigbee.application.ControllerApplication,
)
deconz = (
"deCONZ = dresden elektronik deCONZ protocol: ConBee I/II, RaspBee I/II",
zigpy_deconz.zigbee.application.ControllerApplication,
)
ti_cc = (
"Legacy TI_CC = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_cc.zigbee.application.ControllerApplication,
)
zigate = (
"ZiGate = ZiGate Zigbee radios: PiZiGate, ZiGate USB-TTL, ZiGate WiFi",
zigpy_zigate.zigbee.application.ControllerApplication,
)
xbee = (
"XBee = Digi XBee Zigbee radios: Digi XBee Series 2, 2C, 3",
zigpy_xbee.zigbee.application.ControllerApplication,
)
@classmethod
def list(cls) -> list[str]:
"""Return a list of descriptions."""
return [e.description for e in RadioType]
@classmethod
def get_by_description(cls, description: str) -> str:
"""Get radio by description."""
for radio in cls:
if radio.description == description:
return radio.name
raise ValueError
def __init__(self, description: str, controller_cls: CALLABLE_T) -> None:
"""Init instance."""
self._desc = description
self._ctrl_cls = controller_cls
@property
def controller(self) -> CALLABLE_T:
"""Return controller class."""
return self._ctrl_cls
@property
def description(self) -> str:
"""Return radio type description."""
return self._desc
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ADD_ENTITIES = "zha_add_new_entities"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_UPDATE_DEVICE = "{}_zha_update_device"
SIGNAL_GROUP_ENTITY_REMOVED = "group_entity_removed"
SIGNAL_GROUP_MEMBERSHIP_CHANGE = "group_membership_change"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_CHANNEL_MSG = "zha_channel_message"
ZHA_CHANNEL_MSG_BIND = "zha_channel_bind"
ZHA_CHANNEL_MSG_CFG_RPT = "zha_channel_configure_reporting"
ZHA_CHANNEL_MSG_DATA = "zha_channel_msg_data"
ZHA_CHANNEL_CFG_DONE = "zha_channel_cfg_done"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
EFFECT_BLINK = 0x00
EFFECT_BREATHE = 0x01
EFFECT_OKAY = 0x02
EFFECT_DEFAULT_VARIANT = 0x00
| {
"repo_name": "home-assistant/home-assistant",
"path": "homeassistant/components/zha/core/const.py",
"copies": "1",
"size": "11888",
"license": "apache-2.0",
"hash": 2492736485050557400,
"line_mean": 29.7183462532,
"line_max": 88,
"alpha_frac": 0.7293068641,
"autogenerated": false,
"ratio": 2.824423853646947,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40537307177469467,
"avg_score": null,
"num_lines": null
} |
"""All constants related to the ZHA component."""
import enum
import logging
from typing import List
import bellows.zigbee.application
from zigpy.config import CONF_DEVICE_PATH # noqa: F401 # pylint: disable=unused-import
import zigpy_cc.zigbee.application
import zigpy_deconz.zigbee.application
import zigpy_xbee.zigbee.application
import zigpy_zigate.zigbee.application
import zigpy_znp.zigbee.application
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from .typing import CALLABLE_T
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_ATTRIBUTE_ID = "attribute_id"
ATTR_ATTRIBUTE_NAME = "attribute_name"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND = "command"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_IEEE = "device_ieee"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINTS = "endpoints"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_IN_CLUSTERS = "in_clusters"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_NODE_DESCRIPTOR = "node_descriptor"
ATTR_NWK = "nwk"
ATTR_OUT_CLUSTERS = "out_clusters"
ATTR_POWER_SOURCE = "power_source"
ATTR_PROFILE_ID = "profile_id"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_UNIQUE_ID = "unique_id"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_ANALOG_INPUT = "analog_input"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_IDENTIFY = "identify"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SHADE = "shade"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_THERMOSTAT = "thermostat"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
COMPONENTS = (
BINARY_SENSOR,
CLIMATE,
COVER,
DEVICE_TRACKER,
FAN,
LIGHT,
LOCK,
SENSOR,
SWITCH,
)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_FLOWCONTROL = "flow_control"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONF_ZIGPY = "zigpy_config"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DATA_ZHA_PLATFORM_LOADED = "platform_loaded"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_CC = "zigpy_cc"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_CC: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
class RadioType(enum.Enum):
"""Possible options for radio type."""
znp = (
"ZNP = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_znp.zigbee.application.ControllerApplication,
)
ezsp = (
"EZSP = Silicon Labs EmberZNet protocol: Elelabs, HUSBZB-1, Telegesis",
bellows.zigbee.application.ControllerApplication,
)
deconz = (
"deCONZ = dresden elektronik deCONZ protocol: ConBee I/II, RaspBee I/II",
zigpy_deconz.zigbee.application.ControllerApplication,
)
ti_cc = (
"Legacy TI_CC = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_cc.zigbee.application.ControllerApplication,
)
zigate = (
"ZiGate = ZiGate Zigbee radios: PiZiGate, ZiGate USB-TTL, ZiGate WiFi",
zigpy_zigate.zigbee.application.ControllerApplication,
)
xbee = (
"XBee = Digi XBee Zigbee radios: Digi XBee Series 2, 2C, 3",
zigpy_xbee.zigbee.application.ControllerApplication,
)
@classmethod
def list(cls) -> List[str]:
"""Return a list of descriptions."""
return [e.description for e in RadioType]
@classmethod
def get_by_description(cls, description: str) -> str:
"""Get radio by description."""
for radio in cls:
if radio.description == description:
return radio.name
raise ValueError
def __init__(self, description: str, controller_cls: CALLABLE_T):
"""Init instance."""
self._desc = description
self._ctrl_cls = controller_cls
@property
def controller(self) -> CALLABLE_T:
"""Return controller class."""
return self._ctrl_cls
@property
def description(self) -> str:
"""Return radio type description."""
return self._desc
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ADD_ENTITIES = "zha_add_new_entities"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_UPDATE_DEVICE = "{}_zha_update_device"
SIGNAL_GROUP_ENTITY_REMOVED = "group_entity_removed"
SIGNAL_GROUP_MEMBERSHIP_CHANGE = "group_membership_change"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
EFFECT_BLINK = 0x00
EFFECT_BREATHE = 0x01
EFFECT_OKAY = 0x02
EFFECT_DEFAULT_VARIANT = 0x00
| {
"repo_name": "tchellomello/home-assistant",
"path": "homeassistant/components/zha/core/const.py",
"copies": "1",
"size": "10065",
"license": "apache-2.0",
"hash": 8801216952669594000,
"line_mean": 29.3162650602,
"line_max": 88,
"alpha_frac": 0.7281669151,
"autogenerated": false,
"ratio": 2.821698906644239,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40498658217442385,
"avg_score": null,
"num_lines": null
} |
"""All constants related to the ZHA component."""
import enum
import logging
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND = "command"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_NWK = "nwk"
ATTR_POWER_SOURCE = "power_source"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
COMPONENTS = (BINARY_SENSOR, COVER, DEVICE_TRACKER, FAN, LIGHT, LOCK, SENSOR, SWITCH)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONTROLLER = "controller"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = "ezsp"
xbee = "xbee"
deconz = "deconz"
zigate = "zigate"
@classmethod
def list(cls):
"""Return list of enum's values."""
return [e.value for e in RadioType]
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
ZHA_GW_RADIO = "radio"
ZHA_GW_RADIO_DESCRIPTION = "radio_description"
| {
"repo_name": "Teagan42/home-assistant",
"path": "homeassistant/components/zha/core/const.py",
"copies": "1",
"size": "7209",
"license": "apache-2.0",
"hash": 8451634478293702000,
"line_mean": 28.7892561983,
"line_max": 85,
"alpha_frac": 0.7439311971,
"autogenerated": false,
"ratio": 2.7101503759398495,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39540815730398493,
"avg_score": null,
"num_lines": null
} |
"""All constants related to the ZHA component."""
import enum
import logging
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
DOMAIN = 'zha'
BAUD_RATES = [
2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000
]
DATA_ZHA = 'zha'
DATA_ZHA_CONFIG = 'config'
DATA_ZHA_BRIDGE_ID = 'zha_bridge_id'
DATA_ZHA_DISPATCHERS = 'zha_dispatchers'
DATA_ZHA_CORE_EVENTS = 'zha_core_events'
DATA_ZHA_GATEWAY = 'zha_gateway'
ZHA_DISCOVERY_NEW = 'zha_discovery_new_{}'
COMPONENTS = (
BINARY_SENSOR,
DEVICE_TRACKER,
FAN,
LIGHT,
LOCK,
SENSOR,
SWITCH,
)
CONF_BAUDRATE = 'baudrate'
CONF_DATABASE = 'database_path'
CONF_DEVICE_CONFIG = 'device_config'
CONF_RADIO_TYPE = 'radio_type'
CONF_USB_PATH = 'usb_path'
DATA_DEVICE_CONFIG = 'zha_device_config'
ENABLE_QUIRKS = 'enable_quirks'
RADIO = 'radio'
RADIO_DESCRIPTION = 'radio_description'
CONTROLLER = 'controller'
DEFAULT_RADIO_TYPE = 'ezsp'
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = 'zigbee.db'
ATTR_CLUSTER_ID = 'cluster_id'
ATTR_CLUSTER_TYPE = 'cluster_type'
ATTR_ATTRIBUTE = 'attribute'
ATTR_VALUE = 'value'
ATTR_MANUFACTURER = 'manufacturer'
ATTR_COMMAND = 'command'
ATTR_COMMAND_TYPE = 'command_type'
ATTR_ARGS = 'args'
ATTR_ENDPOINT_ID = 'endpoint_id'
IN = 'in'
OUT = 'out'
CLIENT_COMMANDS = 'client_commands'
SERVER_COMMANDS = 'server_commands'
SERVER = 'server'
IEEE = 'ieee'
MODEL = 'model'
NAME = 'name'
LQI = 'lqi'
RSSI = 'rssi'
LAST_SEEN = 'last_seen'
SENSOR_TYPE = 'sensor_type'
HUMIDITY = 'humidity'
TEMPERATURE = 'temperature'
ILLUMINANCE = 'illuminance'
PRESSURE = 'pressure'
METERING = 'metering'
ELECTRICAL_MEASUREMENT = 'electrical_measurement'
GENERIC = 'generic'
BATTERY = 'battery'
UNKNOWN = 'unknown'
UNKNOWN_MANUFACTURER = 'unk_manufacturer'
UNKNOWN_MODEL = 'unk_model'
OPENING = 'opening'
OCCUPANCY = 'occupancy'
ACCELERATION = 'acceleration'
ATTR_LEVEL = 'level'
ZDO_CHANNEL = 'zdo'
ON_OFF_CHANNEL = 'on_off'
ATTRIBUTE_CHANNEL = 'attribute'
BASIC_CHANNEL = 'basic'
COLOR_CHANNEL = 'light_color'
FAN_CHANNEL = 'fan'
LEVEL_CHANNEL = ATTR_LEVEL
ZONE_CHANNEL = ZONE = 'ias_zone'
ELECTRICAL_MEASUREMENT_CHANNEL = 'electrical_measurement'
POWER_CONFIGURATION_CHANNEL = 'power'
EVENT_RELAY_CHANNEL = 'event_relay'
DOORLOCK_CHANNEL = 'door_lock'
SIGNAL_ATTR_UPDATED = 'attribute_updated'
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_AVAILABLE = 'available'
SIGNAL_REMOVE = 'remove'
QUIRK_APPLIED = 'quirk_applied'
QUIRK_CLASS = 'quirk_class'
MANUFACTURER_CODE = 'manufacturer_code'
POWER_SOURCE = 'power_source'
MAINS_POWERED = 'Mains'
BATTERY_OR_UNKNOWN = 'Battery or Unknown'
BELLOWS = 'bellows'
ZHA = 'homeassistant.components.zha'
ZIGPY = 'zigpy'
ZIGPY_XBEE = 'zigpy_xbee'
ZIGPY_DECONZ = 'zigpy_deconz'
ORIGINAL = 'original'
CURRENT = 'current'
DEBUG_LEVELS = {
BELLOWS: logging.DEBUG,
ZHA: logging.DEBUG,
ZIGPY: logging.DEBUG,
ZIGPY_XBEE: logging.DEBUG,
ZIGPY_DECONZ: logging.DEBUG,
}
ADD_DEVICE_RELAY_LOGGERS = [ZHA, ZIGPY]
TYPE = 'type'
NWK = 'nwk'
SIGNATURE = 'signature'
RAW_INIT = 'raw_device_initialized'
ZHA_GW_MSG = 'zha_gateway_message'
DEVICE_REMOVED = 'device_removed'
DEVICE_INFO = 'device_info'
DEVICE_FULL_INIT = 'device_fully_initialized'
DEVICE_JOINED = 'device_joined'
LOG_OUTPUT = 'log_output'
LOG_ENTRY = 'log_entry'
MFG_CLUSTER_ID_START = 0xfc00
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = 'ezsp'
xbee = 'xbee'
deconz = 'deconz'
@classmethod
def list(cls):
"""Return list of enum's values."""
return [e.value for e in RadioType]
DISCOVERY_KEY = 'zha_discovery_info'
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_ASAP = (REPORT_CONFIG_MIN_INT_ASAP, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_BATTERY_SAVE = (REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_IMMEDIATE = (REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_OP = (REPORT_CONFIG_MIN_INT_OP, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
| {
"repo_name": "jabesq/home-assistant",
"path": "homeassistant/components/zha/core/const.py",
"copies": "1",
"size": "5046",
"license": "apache-2.0",
"hash": 6951462692944911000,
"line_mean": 26.5737704918,
"line_max": 76,
"alpha_frac": 0.7063020214,
"autogenerated": false,
"ratio": 2.8834285714285715,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4089730592828571,
"avg_score": null,
"num_lines": null
} |
"""All constants related to the ZHA component."""
import enum
DOMAIN = 'zha'
BAUD_RATES = [
2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000
]
DATA_ZHA = 'zha'
DATA_ZHA_CONFIG = 'config'
DATA_ZHA_BRIDGE_ID = 'zha_bridge_id'
DATA_ZHA_RADIO = 'zha_radio'
DATA_ZHA_DISPATCHERS = 'zha_dispatchers'
DATA_ZHA_CORE_COMPONENT = 'zha_core_component'
DATA_ZHA_CORE_EVENTS = 'zha_core_events'
DATA_ZHA_GATEWAY = 'zha_gateway'
ZHA_DISCOVERY_NEW = 'zha_discovery_new_{}'
COMPONENTS = [
'binary_sensor',
'fan',
'light',
'sensor',
'switch',
]
CONF_BAUDRATE = 'baudrate'
CONF_DATABASE = 'database_path'
CONF_DEVICE_CONFIG = 'device_config'
CONF_RADIO_TYPE = 'radio_type'
CONF_USB_PATH = 'usb_path'
DATA_DEVICE_CONFIG = 'zha_device_config'
ENABLE_QUIRKS = 'enable_quirks'
DEFAULT_RADIO_TYPE = 'ezsp'
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = 'zigbee.db'
ATTR_CLUSTER_ID = 'cluster_id'
ATTR_CLUSTER_TYPE = 'cluster_type'
ATTR_ATTRIBUTE = 'attribute'
ATTR_VALUE = 'value'
ATTR_MANUFACTURER = 'manufacturer'
ATTR_COMMAND = 'command'
ATTR_COMMAND_TYPE = 'command_type'
ATTR_ARGS = 'args'
ATTR_ENDPOINT_ID = 'endpoint_id'
IN = 'in'
OUT = 'out'
CLIENT_COMMANDS = 'client_commands'
SERVER_COMMANDS = 'server_commands'
SERVER = 'server'
IEEE = 'ieee'
MODEL = 'model'
NAME = 'name'
SENSOR_TYPE = 'sensor_type'
HUMIDITY = 'humidity'
TEMPERATURE = 'temperature'
ILLUMINANCE = 'illuminance'
PRESSURE = 'pressure'
METERING = 'metering'
ELECTRICAL_MEASUREMENT = 'electrical_measurement'
GENERIC = 'generic'
UNKNOWN = 'unknown'
OPENING = 'opening'
ZONE = 'zone'
OCCUPANCY = 'occupancy'
ACCELERATION = 'acceleration'
ATTR_LEVEL = 'level'
ZDO_CHANNEL = 'zdo'
ON_OFF_CHANNEL = 'on_off'
ATTRIBUTE_CHANNEL = 'attribute'
BASIC_CHANNEL = 'basic'
COLOR_CHANNEL = 'color'
FAN_CHANNEL = 'fan'
LEVEL_CHANNEL = ATTR_LEVEL
ZONE_CHANNEL = 'zone'
ELECTRICAL_MEASUREMENT_CHANNEL = 'active_power'
POWER_CONFIGURATION_CHANNEL = 'battery'
EVENT_RELAY_CHANNEL = 'event_relay'
SIGNAL_ATTR_UPDATED = 'attribute_updated'
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_AVAILABLE = 'available'
SIGNAL_REMOVE = 'remove'
QUIRK_APPLIED = 'quirk_applied'
QUIRK_CLASS = 'quirk_class'
MANUFACTURER_CODE = 'manufacturer_code'
POWER_SOURCE = 'power_source'
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = 'ezsp'
xbee = 'xbee'
deconz = 'deconz'
@classmethod
def list(cls):
"""Return list of enum's values."""
return [e.value for e in RadioType]
DISCOVERY_KEY = 'zha_discovery_info'
DEVICE_CLASS = {}
SINGLE_INPUT_CLUSTER_DEVICE_CLASS = {}
SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS = {}
CLUSTER_REPORT_CONFIGS = {}
CUSTOM_CLUSTER_MAPPINGS = {}
COMPONENT_CLUSTERS = {}
EVENT_RELAY_CLUSTERS = []
NO_SENSOR_CLUSTERS = []
BINDABLE_CLUSTERS = []
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_ASAP = (REPORT_CONFIG_MIN_INT_ASAP, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_BATTERY_SAVE = (REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_IMMEDIATE = (REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_OP = (REPORT_CONFIG_MIN_INT_OP, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
| {
"repo_name": "nugget/home-assistant",
"path": "homeassistant/components/zha/core/const.py",
"copies": "1",
"size": "3834",
"license": "apache-2.0",
"hash": 2523232175872065000,
"line_mean": 25.8111888112,
"line_max": 72,
"alpha_frac": 0.6841418884,
"autogenerated": false,
"ratio": 2.861194029850746,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9045335918250745,
"avg_score": 0,
"num_lines": 143
} |
"""All constants related to the ZHA component."""
DEVICE_CLASS = {}
SINGLE_CLUSTER_DEVICE_CLASS = {}
COMPONENT_CLUSTERS = {}
def populate_data():
"""Populate data using constants from bellows.
These cannot be module level, as importing bellows must be done in a
in a function.
"""
from bellows.zigbee import zcl
from bellows.zigbee.profiles import PROFILES, zha, zll
DEVICE_CLASS[zha.PROFILE_ID] = {
zha.DeviceType.ON_OFF_SWITCH: 'switch',
zha.DeviceType.SMART_PLUG: 'switch',
zha.DeviceType.ON_OFF_LIGHT: 'light',
zha.DeviceType.DIMMABLE_LIGHT: 'light',
zha.DeviceType.COLOR_DIMMABLE_LIGHT: 'light',
zha.DeviceType.ON_OFF_LIGHT_SWITCH: 'light',
zha.DeviceType.DIMMER_SWITCH: 'light',
zha.DeviceType.COLOR_DIMMER_SWITCH: 'light',
}
DEVICE_CLASS[zll.PROFILE_ID] = {
zll.DeviceType.ON_OFF_LIGHT: 'light',
zll.DeviceType.ON_OFF_PLUGIN_UNIT: 'switch',
zll.DeviceType.DIMMABLE_LIGHT: 'light',
zll.DeviceType.DIMMABLE_PLUGIN_UNIT: 'light',
zll.DeviceType.COLOR_LIGHT: 'light',
zll.DeviceType.EXTENDED_COLOR_LIGHT: 'light',
zll.DeviceType.COLOR_TEMPERATURE_LIGHT: 'light',
}
SINGLE_CLUSTER_DEVICE_CLASS.update({
zcl.clusters.general.OnOff: 'switch',
zcl.clusters.measurement.TemperatureMeasurement: 'sensor',
zcl.clusters.security.IasZone: 'binary_sensor',
})
# A map of hass components to all Zigbee clusters it could use
for profile_id, classes in DEVICE_CLASS.items():
profile = PROFILES[profile_id]
for device_type, component in classes.items():
if component not in COMPONENT_CLUSTERS:
COMPONENT_CLUSTERS[component] = (set(), set())
clusters = profile.CLUSTERS[device_type]
COMPONENT_CLUSTERS[component][0].update(clusters[0])
COMPONENT_CLUSTERS[component][1].update(clusters[1])
| {
"repo_name": "LinuxChristian/home-assistant",
"path": "homeassistant/components/zha/const.py",
"copies": "9",
"size": "1965",
"license": "apache-2.0",
"hash": -6817887734954338000,
"line_mean": 36.7884615385,
"line_max": 72,
"alpha_frac": 0.6524173028,
"autogenerated": false,
"ratio": 3.4055459272097055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8557963230009705,
"avg_score": null,
"num_lines": null
} |
"""All constants related to the ZHA component."""
DEVICE_CLASS = {}
SINGLE_INPUT_CLUSTER_DEVICE_CLASS = {}
SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS = {}
COMPONENT_CLUSTERS = {}
def populate_data():
"""Populate data using constants from bellows.
These cannot be module level, as importing bellows must be done in a
in a function.
"""
from zigpy import zcl
from zigpy.profiles import PROFILES, zha, zll
DEVICE_CLASS[zha.PROFILE_ID] = {
zha.DeviceType.ON_OFF_SWITCH: 'binary_sensor',
zha.DeviceType.LEVEL_CONTROL_SWITCH: 'binary_sensor',
zha.DeviceType.REMOTE_CONTROL: 'binary_sensor',
zha.DeviceType.SMART_PLUG: 'switch',
zha.DeviceType.ON_OFF_LIGHT: 'light',
zha.DeviceType.DIMMABLE_LIGHT: 'light',
zha.DeviceType.COLOR_DIMMABLE_LIGHT: 'light',
zha.DeviceType.ON_OFF_LIGHT_SWITCH: 'binary_sensor',
zha.DeviceType.DIMMER_SWITCH: 'binary_sensor',
zha.DeviceType.COLOR_DIMMER_SWITCH: 'binary_sensor',
}
DEVICE_CLASS[zll.PROFILE_ID] = {
zll.DeviceType.ON_OFF_LIGHT: 'light',
zll.DeviceType.ON_OFF_PLUGIN_UNIT: 'switch',
zll.DeviceType.DIMMABLE_LIGHT: 'light',
zll.DeviceType.DIMMABLE_PLUGIN_UNIT: 'light',
zll.DeviceType.COLOR_LIGHT: 'light',
zll.DeviceType.EXTENDED_COLOR_LIGHT: 'light',
zll.DeviceType.COLOR_TEMPERATURE_LIGHT: 'light',
zll.DeviceType.COLOR_CONTROLLER: 'binary_sensor',
zll.DeviceType.COLOR_SCENE_CONTROLLER: 'binary_sensor',
zll.DeviceType.CONTROLLER: 'binary_sensor',
zll.DeviceType.SCENE_CONTROLLER: 'binary_sensor',
zll.DeviceType.ON_OFF_SENSOR: 'binary_sensor',
}
SINGLE_INPUT_CLUSTER_DEVICE_CLASS.update({
zcl.clusters.general.OnOff: 'switch',
zcl.clusters.measurement.RelativeHumidity: 'sensor',
zcl.clusters.measurement.TemperatureMeasurement: 'sensor',
zcl.clusters.measurement.PressureMeasurement: 'sensor',
zcl.clusters.measurement.IlluminanceMeasurement: 'sensor',
zcl.clusters.smartenergy.Metering: 'sensor',
zcl.clusters.homeautomation.ElectricalMeasurement: 'sensor',
zcl.clusters.security.IasZone: 'binary_sensor',
zcl.clusters.hvac.Fan: 'fan',
})
SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS.update({
zcl.clusters.general.OnOff: 'binary_sensor',
})
# A map of hass components to all Zigbee clusters it could use
for profile_id, classes in DEVICE_CLASS.items():
profile = PROFILES[profile_id]
for device_type, component in classes.items():
if component not in COMPONENT_CLUSTERS:
COMPONENT_CLUSTERS[component] = (set(), set())
clusters = profile.CLUSTERS[device_type]
COMPONENT_CLUSTERS[component][0].update(clusters[0])
COMPONENT_CLUSTERS[component][1].update(clusters[1])
| {
"repo_name": "persandstrom/home-assistant",
"path": "homeassistant/components/zha/const.py",
"copies": "2",
"size": "2895",
"license": "apache-2.0",
"hash": -3537158673797231600,
"line_mean": 40.9565217391,
"line_max": 72,
"alpha_frac": 0.6670120898,
"autogenerated": false,
"ratio": 3.4058823529411764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5072894442741176,
"avg_score": null,
"num_lines": null
} |
"""All constants related to the ZHA component."""
DEVICE_CLASS = {}
SINGLE_INPUT_CLUSTER_DEVICE_CLASS = {}
SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS = {}
CUSTOM_CLUSTER_MAPPINGS = {}
COMPONENT_CLUSTERS = {}
def populate_data():
"""Populate data using constants from bellows.
These cannot be module level, as importing bellows must be done in a
in a function.
"""
from zigpy import zcl, quirks
from zigpy.profiles import PROFILES, zha, zll
from homeassistant.components.sensor import zha as sensor_zha
DEVICE_CLASS[zha.PROFILE_ID] = {
zha.DeviceType.ON_OFF_SWITCH: 'binary_sensor',
zha.DeviceType.LEVEL_CONTROL_SWITCH: 'binary_sensor',
zha.DeviceType.REMOTE_CONTROL: 'binary_sensor',
zha.DeviceType.SMART_PLUG: 'switch',
zha.DeviceType.ON_OFF_LIGHT: 'light',
zha.DeviceType.DIMMABLE_LIGHT: 'light',
zha.DeviceType.COLOR_DIMMABLE_LIGHT: 'light',
zha.DeviceType.ON_OFF_LIGHT_SWITCH: 'binary_sensor',
zha.DeviceType.DIMMER_SWITCH: 'binary_sensor',
zha.DeviceType.COLOR_DIMMER_SWITCH: 'binary_sensor',
}
DEVICE_CLASS[zll.PROFILE_ID] = {
zll.DeviceType.ON_OFF_LIGHT: 'light',
zll.DeviceType.ON_OFF_PLUGIN_UNIT: 'switch',
zll.DeviceType.DIMMABLE_LIGHT: 'light',
zll.DeviceType.DIMMABLE_PLUGIN_UNIT: 'light',
zll.DeviceType.COLOR_LIGHT: 'light',
zll.DeviceType.EXTENDED_COLOR_LIGHT: 'light',
zll.DeviceType.COLOR_TEMPERATURE_LIGHT: 'light',
zll.DeviceType.COLOR_CONTROLLER: 'binary_sensor',
zll.DeviceType.COLOR_SCENE_CONTROLLER: 'binary_sensor',
zll.DeviceType.CONTROLLER: 'binary_sensor',
zll.DeviceType.SCENE_CONTROLLER: 'binary_sensor',
zll.DeviceType.ON_OFF_SENSOR: 'binary_sensor',
}
SINGLE_INPUT_CLUSTER_DEVICE_CLASS.update({
zcl.clusters.general.OnOff: 'switch',
zcl.clusters.measurement.RelativeHumidity: 'sensor',
zcl.clusters.measurement.TemperatureMeasurement: 'sensor',
zcl.clusters.measurement.PressureMeasurement: 'sensor',
zcl.clusters.measurement.IlluminanceMeasurement: 'sensor',
zcl.clusters.smartenergy.Metering: 'sensor',
zcl.clusters.homeautomation.ElectricalMeasurement: 'sensor',
zcl.clusters.security.IasZone: 'binary_sensor',
zcl.clusters.hvac.Fan: 'fan',
})
SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS.update({
zcl.clusters.general.OnOff: 'binary_sensor',
})
# A map of device/cluster to component/sub-component
CUSTOM_CLUSTER_MAPPINGS.update({
(quirks.smartthings.SmartthingsTemperatureHumiditySensor, 64581):
('sensor', sensor_zha.RelativeHumiditySensor)
})
# A map of hass components to all Zigbee clusters it could use
for profile_id, classes in DEVICE_CLASS.items():
profile = PROFILES[profile_id]
for device_type, component in classes.items():
if component not in COMPONENT_CLUSTERS:
COMPONENT_CLUSTERS[component] = (set(), set())
clusters = profile.CLUSTERS[device_type]
COMPONENT_CLUSTERS[component][0].update(clusters[0])
COMPONENT_CLUSTERS[component][1].update(clusters[1])
| {
"repo_name": "aronsky/home-assistant",
"path": "homeassistant/components/zha/const.py",
"copies": "2",
"size": "3232",
"license": "apache-2.0",
"hash": -8868122887720390000,
"line_mean": 40.974025974,
"line_max": 73,
"alpha_frac": 0.6729579208,
"autogenerated": false,
"ratio": 3.4493062966915686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5122264217491569,
"avg_score": null,
"num_lines": null
} |
__all__ = ('Consts',)
class Consts(object):
const_name_to_obj_name = staticmethod(lambda const_name: const_name.lower())
obj_name_to_const_name = staticmethod(lambda const_name: const_name.upper())
def __init__(self, choice=lambda x: x.label, **consts):
"""
:param choice: optional function to be used for retrieving choice label
:param consts: Const objects to be owned by this Consts instance
:return:
"""
# Validate
auto_ids = False
no_ids = [True for c in consts.values() if c.id is None]
if no_ids and len(no_ids) != len(consts):
raise ValueError('All consts or none of them should have id')
elif no_ids:
auto_ids = True
if len(consts) != len(tuple(key for key in consts.keys() if key.isupper())):
raise ValueError('All consts names should be fully uppercase')
if not auto_ids and \
len(consts) != len(set(const_obj.id for const_obj in consts.values() if const_obj.id is not None)):
raise ValueError('All consts ids should be unique')
# Sort
consts_list = [(const_name, const_obj) for const_name, const_obj in consts.items()]
consts_list.sort(key=lambda x: x[1].creation_counter)
# Own fields init
self._consts_list = consts_list
self._consts_dict = consts
self._choice_getter = choice
self._choices = None
self._consts_by_id = None
# Each const init completion
for const_auto_id, const in enumerate(consts_list, start=1):
const_name, const_obj = const
# Set const name
const_obj._set_name(const_name)
# If needed, set const id to auto value
if const_obj.id is None:
const_obj.id = const_auto_id
def get_choices(self):
"""
:return: tuple ( (id, label), (id, label), ...) generated using owned consts,
label is generated using choice constructor param
"""
if self._choices is None:
self._choices = tuple((obj.id, self._choice_getter(obj)) for name, obj in self._consts_list)
return self._choices
def get_consts(self):
"""
:return: all consts obj owned by this Consts instance
"""
return tuple(const_obj for const_name, const_obj in self._consts_list)
def get_consts_names(self):
"""
:return: all consts names owned by this Consts instance
"""
return tuple(const_name for const_name, const_obj in self._consts_list)
def get_by_id(self, const_id, default=None):
"""
:param const_id: const integer id
:return: whole Const object represented by this id
"""
if self._consts_by_id is None:
self._consts_by_id = {obj.id: obj for obj in self._consts_dict.values()}
try:
return self._consts_by_id[const_id]
except KeyError:
try:
const_id = int(const_id)
except (TypeError, ValueError):
return default
return self._consts_by_id.get(const_id, default)
def get_by_name(self, name, default=None):
"""
:param name: const name - if lowercase Const, obj is returned; if uppercase, id is returned
:return: whole Const object represented by this name or Const.id
"""
const_name = self.obj_name_to_const_name(name)
if const_name != name and const_name in self._consts_dict:
return self._consts_dict[const_name]
try:
return self._consts_dict[const_name].id
except KeyError:
raise AttributeError("this Consts object has not attribute '%s'" % name)
def __getattr__(self, name):
return self.get_by_name(name)
def __getitem__(self, name):
return self.get_by_name(name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.get_consts_names())
def __dir__(self):
attrs = dir(super(Consts, self))
consts_names = list(self.get_consts_names())
consts_objs_names = [name.lower() for name in consts_names]
return attrs + consts_names + consts_objs_names
| {
"repo_name": "glowka/const_choice",
"path": "const_choice/consts.py",
"copies": "1",
"size": "4271",
"license": "mit",
"hash": 8841040863426050000,
"line_mean": 37.1339285714,
"line_max": 115,
"alpha_frac": 0.5834699134,
"autogenerated": false,
"ratio": 3.896897810218978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4980367723618978,
"avg_score": null,
"num_lines": null
} |
__all__ = ['ContactList']
import random
from contact import Contact
from print_colors import PrintColors
class ContactList(object):
def __init__(self):
self.items = []
self.items_id_map = {}
self.items_raddr_map = {}
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def add(self, c):
if c.id is None and not c.bootstrap:
raise ValueError('Contact it cannot be None, it its is not bootstrap node')
if c.id is None and c.id in self.items_id_map:
raise ValueError('Bootstrap contact with id=None is already known')
self.items.append(c)
self.items_id_map[c.id] = c
self.items_raddr_map[c.remote_host, c.remote_port] = c
return c
def get(self, id_or_remote_address_or_idx):
c = None
if isinstance(id_or_remote_address_or_idx, (str, bytes)):
c_id = id_or_remote_address_or_idx
try:
c = self.items_id_map[c_id]
except KeyError as e:
pass
elif isinstance(id_or_remote_address_or_idx, (tuple, list)):
remote_host, remote_port = id_or_remote_address_or_idx
try:
c = self.items_raddr_map[remote_host, remote_port]
except KeyError as e:
pass
elif isinstance(id_or_remote_address_or_idx, int):
i = id_or_remote_address_or_idx
try:
c = self.items[i]
except IndexError as e:
pass
return c
def remove(self, c_or_id):
c = None
if isinstance(c_or_id, Contact):
c = c_or_id
self.items.remove(c)
del self.items_id_map[c.id]
del self.items_raddr_map[c.remote_host, c.remote_port]
else:
c_id = c_or_id
c = self.items_id_map.pop(c_id)
self.items.remove(c)
del self.items_raddr_map[c.remote_host, c.remote_port]
return c
def random(self, without_id=None):
if not len(self.items):
return None
# filter contacts
i = random.randint(0, len(self.items) - 1)
c = self.items[i]
if c.id == without_id:
return None
return c
def all(self, version=0, max_old=None):
contacts = []
for c in self.items:
if c.bootstrap:
contacts.append(c)
continue
# FIXME: use version and max_old
contacts.append(c)
return contacts
| {
"repo_name": "mtasic85/routingtable",
"path": "contact_list.py",
"copies": "1",
"size": "2632",
"license": "mit",
"hash": 3009064387989034000,
"line_mean": 25.5858585859,
"line_max": 87,
"alpha_frac": 0.5262158055,
"autogenerated": false,
"ratio": 3.6759776536312847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4702193459131285,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Contact']
class Contact(object):
def __init__(self, id=None, local_host=None, local_port=None, remote_host=None, remote_port=None, bootstrap=False, version=None):
self.id = id
self.local_host = local_host
self.local_port = local_port
self.remote_host = remote_host
self.remote_port = remote_port
self.bootstrap = bootstrap
self.version = version
self.last_seen = None
def __repr__(self):
return '<{}:{} local={}:{} remote={}:{} bootstrap={}>'.format(
self.__class__.__name__,
self.id,
self.local_host,
self.local_port,
self.remote_host,
self.remote_port,
self.bootstrap,
)
def __getstate__(self):
return {
'id': self.id,
'local_host': self.local_host,
'local_port': self.local_port,
'remote_host': self.remote_host,
'remote_port': self.remote_port,
'bootstrap': self.bootstrap,
}
| {
"repo_name": "mtasic85/routingtable",
"path": "contact.py",
"copies": "1",
"size": "1059",
"license": "mit",
"hash": 1614035871938475800,
"line_mean": 31.0909090909,
"line_max": 133,
"alpha_frac": 0.5212464589,
"autogenerated": false,
"ratio": 3.922222222222222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.993575035575302,
"avg_score": 0.0015436650738402846,
"num_lines": 33
} |
__all__ = ["Container"]
from typing import List, Iterator, Dict, Optional, Union, Any, Tuple, Iterable
import copy
import os
import tempfile
import docker
import attr
from .bug import Bug
def _convert_tools(tools: Iterable[str]) -> Tuple[str, ...]:
return tuple(tools)
@attr.s(frozen=True)
class Container(object):
"""
Containers provide ephemeral, mutable instances of registered bugs,
and are used to conduct studies of software bugs. Behind the scenes,
containers are implemented as `Docker containers <https://docker.com>`_.
"""
"""
A unique identifier for this container.
"""
uid = attr.ib(type=str)
"""
The name of the bug that was used to provision this container.
"""
bug = attr.ib(type=str)
"""
The names of the tools that are mounted inside this container.
"""
tools = attr.ib(type=Tuple[str, ...], converter=_convert_tools)
@property
def id(self) -> str:
return self.uid
@staticmethod
def from_dict(d: Dict[str, Any]) -> 'Container':
return Container(uid=d['uid'],
bug=d['bug'],
tools=d['tools'])
def to_dict(self) -> Dict[str, Any]:
return {'uid': self.uid,
'bug': self.bug,
'tools': list(self.tools)}
| {
"repo_name": "ChrisTimperley/AutomatedRepairBenchmarks.c",
"path": "bugzoo/core/container.py",
"copies": "3",
"size": "1325",
"license": "mit",
"hash": -4440814756706547700,
"line_mean": 23.537037037,
"line_max": 78,
"alpha_frac": 0.6,
"autogenerated": false,
"ratio": 3.8629737609329444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5962973760932944,
"avg_score": null,
"num_lines": null
} |
__all__ = ["container", "service", "utils", "cli"]
LOCAL_ENV=".maestro"
GLOBAL_ENV="/var/lib/maestro"
# Maintain a list of environments on disk
# By default an environemnt is created in .maestro unless -g is specified to make it global.
# Global enviroments are stored in /var/lib/maestro. Permission setting will come into play for this.
# The environment directory contains:
# environment.yml capturing the state of the running system
# settings.yml capturing the user configuration settings
# maestro.yml ?? The original environment description used to create the environment
# Initialize a new environment
def init_environment(name, description="maestro.yml", system=False):
# Verify the environment doesn't already exist
# Check for both local and system environments that may conflict
if (system):
# Create a system wide environment
pass
else:
# We're just creating an environment that lives relative to the local directory
pass
# retrieve environment
def get_environment(name):
pass
# list environments
def list_environments():
# Include the local environment if there is one
# Include a list of the system environments
pass
def destroy_environment(name):
pass
| {
"repo_name": "toscanini/maestro",
"path": "maestro/__init__.py",
"copies": "2",
"size": "1236",
"license": "mit",
"hash": -4921311463823477000,
"line_mean": 30.6923076923,
"line_max": 104,
"alpha_frac": 0.7402912621,
"autogenerated": false,
"ratio": 4.204081632653061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5944372894753062,
"avg_score": null,
"num_lines": null
} |
__all__ = ('ContentCoding', 'Request', 'StreamResponse', 'Response')
import asyncio
import binascii
import cgi
import collections
import datetime
import http.cookies
import io
import json
import math
import time
import warnings
import enum
from email.utils import parsedate
from types import MappingProxyType
from urllib.parse import urlsplit, parse_qsl, unquote
from . import hdrs
from .helpers import reify
from .multidict import (CIMultiDictProxy,
CIMultiDict,
MultiDictProxy,
MultiDict)
from .protocol import Response as ResponseImpl, HttpVersion10
from .streams import EOF_MARKER
sentinel = object()
class HeadersMixin:
_content_type = None
_content_dict = None
_stored_content_type = sentinel
def _parse_content_type(self, raw):
self._stored_content_type = raw
if raw is None:
# default value according to RFC 2616
self._content_type = 'application/octet-stream'
self._content_dict = {}
else:
self._content_type, self._content_dict = cgi.parse_header(raw)
@property
def content_type(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of content part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_type
@property
def charset(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of charset part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_dict.get('charset')
@property
def content_length(self, _CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
"""The value of Content-Length HTTP header."""
l = self.headers.get(_CONTENT_LENGTH)
if l is None:
return None
else:
return int(l)
FileField = collections.namedtuple('Field', 'name filename file content_type')
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = 'deflate'
gzip = 'gzip'
identity = 'identity'
############################################################
# HTTP Request
############################################################
class Request(dict, HeadersMixin):
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT,
hdrs.METH_TRACE, hdrs.METH_DELETE}
def __init__(self, app, message, payload, transport, reader, writer, *,
_HOST=hdrs.HOST, secure_proxy_ssl_header=None):
self._app = app
self._version = message.version
self._transport = transport
self._reader = reader
self._writer = writer
self._method = message.method
self._host = message.headers.get(_HOST)
self._path_qs = message.path
self._post = None
self._post_files_cache = None
self._headers = CIMultiDictProxy(message.headers)
if self._version < HttpVersion10:
self._keep_alive = False
else:
self._keep_alive = not message.should_close
# matchdict, route_name, handler
# or information about traversal lookup
self._match_info = None # initialized after route resolving
self._payload = payload
self._cookies = None
self._read_bytes = None
self._has_body = not payload.at_eof()
self._secure_proxy_ssl_header = secure_proxy_ssl_header
@property
def scheme(self):
"""A string representing the scheme of the request.
'http' or 'https'.
"""
if self._transport.get_extra_info('sslcontext'):
return 'https'
secure_proxy_ssl_header = self._secure_proxy_ssl_header
if secure_proxy_ssl_header is not None:
header, value = secure_proxy_ssl_header
if self._headers.get(header) == value:
return 'https'
return 'http'
@property
def method(self):
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._method
@property
def version(self):
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._version
@property
def host(self):
"""Read only property for getting *HOST* header of request.
Returns str or None if HTTP request has no HOST header.
"""
return self._host
@property
def path_qs(self):
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
return self._path_qs
@reify
def _splitted_path(self):
return urlsplit(self._path_qs)
@property
def raw_path(self):
""" The URL including raw *PATH INFO* without the host or scheme.
Warning, the path is unquoted and may contains non valid URL characters
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
"""
return self._splitted_path.path
@reify
def path(self):
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
return unquote(self.raw_path)
@reify
def query_string(self):
"""The query string in the URL.
E.g., id=10
"""
return self._splitted_path.query
@reify
def GET(self):
"""A multidict with all the variables in the query string.
Lazy property.
"""
return MultiDictProxy(MultiDict(parse_qsl(self.query_string,
keep_blank_values=True)))
@reify
def POST(self):
"""A multidict with all the variables in the POST parameters.
post() methods has to be called before using this attribute.
"""
if self._post is None:
raise RuntimeError("POST is not available before post()")
return self._post
@property
def headers(self):
"""A case-insensitive multidict proxy with all headers."""
return self._headers
@property
def if_modified_since(self, _IF_MODIFIED_SINCE=hdrs.IF_MODIFIED_SINCE):
"""The value of If-Modified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_IF_MODIFIED_SINCE)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@property
def keep_alive(self):
"""Is keepalive enabled by client?"""
return self._keep_alive
@property
def match_info(self):
"""Result of route resolving."""
return self._match_info
@property
def app(self):
"""Application instance."""
return self._app
@property
def transport(self):
"""Transport used for request processing."""
return self._transport
@property
def cookies(self):
"""Return request cookies.
A read-only dictionary-like object.
"""
if self._cookies is None:
raw = self.headers.get(hdrs.COOKIE, '')
parsed = http.cookies.SimpleCookie(raw)
self._cookies = MappingProxyType(
{key: val.value for key, val in parsed.items()})
return self._cookies
@property
def payload(self):
"""Return raw payload stream."""
warnings.warn('use Request.content instead', DeprecationWarning)
return self._payload
@property
def content(self):
"""Return raw payload stream."""
return self._payload
@property
def has_body(self):
"""Return True if request has HTTP BODY, False otherwise."""
return self._has_body
@asyncio.coroutine
def release(self):
"""Release request.
Eat unread part of HTTP BODY if present.
"""
chunk = yield from self._payload.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self._payload.readany()
@asyncio.coroutine
def read(self):
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = yield from self._payload.readany()
body.extend(chunk)
if chunk is EOF_MARKER:
break
self._read_bytes = bytes(body)
return self._read_bytes
@asyncio.coroutine
def text(self):
"""Return BODY as text using encoding from .charset."""
bytes_body = yield from self.read()
encoding = self.charset or 'utf-8'
return bytes_body.decode(encoding)
@asyncio.coroutine
def json(self, *, loader=json.loads):
"""Return BODY as JSON."""
body = yield from self.text()
return loader(body)
@asyncio.coroutine
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self.method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
body = yield from self.read()
content_charset = self.charset or 'utf-8'
environ = {'REQUEST_METHOD': self.method,
'CONTENT_LENGTH': str(len(body)),
'QUERY_STRING': '',
'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}
fs = cgi.FieldStorage(fp=io.BytesIO(body),
environ=environ,
keep_blank_values=True,
encoding=content_charset)
supported_transfer_encoding = {
'base64': binascii.a2b_base64,
'quoted-printable': binascii.a2b_qp
}
out = MultiDict()
_count = 1
for field in fs.list or ():
transfer_encoding = field.headers.get(
hdrs.CONTENT_TRANSFER_ENCODING, None)
if field.filename:
ff = FileField(field.name,
field.filename,
field.file, # N.B. file closed error
field.type)
if self._post_files_cache is None:
self._post_files_cache = {}
self._post_files_cache[field.name+str(_count)] = field
_count += 1
out.add(field.name, ff)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
# binascii accepts bytes
value = value.encode('utf-8')
value = supported_transfer_encoding[
transfer_encoding](value)
out.add(field.name, value)
self._post = MultiDictProxy(out)
return self._post
def __repr__(self):
return "<{} {} {} >".format(self.__class__.__name__,
self.method, self.path)
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._chunk_size = None
self._compression = False
self._compression_force = False
self._headers = CIMultiDict()
self._cookies = http.cookies.SimpleCookie()
self.set_status(status, reason)
self._req = None
self._resp_impl = None
self._eof_sent = False
if headers is not None:
self._headers.extend(headers)
def _copy_cookies(self):
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
self.headers.add(hdrs.SET_COOKIE, value)
@property
def started(self):
return self._resp_impl is not None
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None):
self._status = int(status)
if reason is None:
reason = ResponseImpl.calc_reason(status)
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
self._chunk_size = chunk_size
def enable_compression(self, force=None):
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
if path is not None:
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0, domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self.headers[hdrs.CONTENT_LENGTH] = str(value)
elif hdrs.CONTENT_LENGTH in self.headers:
del self.headers[hdrs.CONTENT_LENGTH]
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self, _LAST_MODIFIED=hdrs.LAST_MODIFIED):
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@last_modified.setter
def last_modified(self, value):
if value is None:
if hdrs.LAST_MODIFIED in self.headers:
del self.headers[hdrs.LAST_MODIFIED]
elif isinstance(value, (int, float)):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)))
elif isinstance(value, datetime.datetime):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple())
elif isinstance(value, str):
self.headers[hdrs.LAST_MODIFIED] = value
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _start_pre_check(self, request):
if self._resp_impl is not None:
if self._req is not request:
raise RuntimeError(
'Response has been started with different request.')
else:
return self._resp_impl
else:
return None
def _start_compression(self, request):
def start(coding):
if coding != ContentCoding.identity:
self.headers[hdrs.CONTENT_ENCODING] = coding.value
self._resp_impl.add_compression_filter(coding.value)
if self._compression_force:
start(self._compression_force)
else:
accept_encoding = request.headers.get(
hdrs.ACCEPT_ENCODING, '').lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
start(coding)
return
def start(self, request):
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
request.version,
not keep_alive,
self._reason)
self._copy_cookies()
if self._compression:
self._start_compression(request)
if self._chunked:
resp_impl.enable_chunked_encoding()
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers = self.headers.items()
for key, val in headers:
resp_impl.add_header(key, val)
resp_impl.send_headers()
return resp_impl
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
'data argument must be byte-ish (%r)' % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._resp_impl is None:
raise RuntimeError("Cannot call write() before start()")
if data:
return self._resp_impl.write(data)
else:
return ()
@asyncio.coroutine
def drain(self):
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.transport.drain()
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.write_eof()
self._eof_sent = True
def __repr__(self):
if self.started:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not started"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None):
super().__init__(status=status, reason=reason, headers=headers)
if body is not None and text is not None:
raise ValueError("body and text are not allowed together.")
if text is not None:
if hdrs.CONTENT_TYPE not in self.headers:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError('text argument must be str (%r)' %
type(text))
if content_type is None:
content_type = 'text/plain'
self.headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=utf-8')
self._content_type = content_type
self._content_dict = {'charset': 'utf-8'}
self.body = text.encode('utf-8')
else:
self.text = text
else:
if content_type:
self.content_type = content_type
if body is not None:
self.body = body
else:
self.body = None
@property
def body(self):
return self._body
@body.setter
def body(self, body):
if body is not None and not isinstance(body, bytes):
raise TypeError('body argument must be bytes (%r)' % type(body))
self._body = body
if body is not None:
self.content_length = len(body)
else:
self.content_length = 0
@property
def text(self):
if self._body is None:
return None
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
if text is not None and not isinstance(text, str):
raise TypeError('text argument must be str (%r)' % type(text))
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self.body = text.encode(self.charset)
@asyncio.coroutine
def write_eof(self):
body = self._body
if body is not None:
self.write(body)
yield from super().write_eof()
| {
"repo_name": "alexsdutton/aiohttp",
"path": "aiohttp/web_reqrep.py",
"copies": "4",
"size": "23996",
"license": "apache-2.0",
"hash": 759236743468634800,
"line_mean": 30.2855280313,
"line_max": 91,
"alpha_frac": 0.5589681614,
"autogenerated": false,
"ratio": 4.394871794871795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00004484662096855785,
"num_lines": 767
} |
# All content provided by the loader
# must come from canonical sources
# found in the implementation of this server interface
# (within src/interfacing/ogs/)
# This module should not create *any* original content
# other than the loader
# Specifications of the keys the loader should provide
# can be found in src/interfacing/servers.py
# All imports go inside the functions
# otherwise, there is no point in doing it this way.
def main_loader() -> dict:
result = {}
from .resources import main_server_strings as strings
result['str_name'] = strings['context']
result['str_shortname'] = strings['context_short']
result['bool_tester'] = False
from .connect import Authentication
result['cls_auth'] = Authentication
result['list_authkeys'] = ['user', 'password'] # original content, beware
from .players import OGSPlayer
result['cls_player'] = OGSPlayer
from .tournaments import OGSTournament
result['cls_tournament'] = OGSTournament
return result
def beta_loader() -> dict:
result = {}
from .resources import beta_server_strings as strings
result['str_name'] = strings['context']
result['str_shortname'] = strings['context_short']
result['bool_tester'] = True
from .connect import Authentication
from functools import partial, wraps
result['cls_auth'] = wraps(Authentication)(partial(Authentication, testing = True))
result['list_authkeys'] = ['user', 'password'] # original content, beware
from .players import OGSBetaPlayer
result['cls_player'] = OGSBetaPlayer
from .tournaments import OGSBetaTournament
result['cls_tournament'] = OGSBetaTournament
return result
| {
"repo_name": "juanchodepisa/sbtk",
"path": "SBTK_League_Helper/src/interfacing/ogs/loaders.py",
"copies": "1",
"size": "1730",
"license": "mit",
"hash": 1847776373533966600,
"line_mean": 29.9107142857,
"line_max": 87,
"alpha_frac": 0.6924855491,
"autogenerated": false,
"ratio": 4.158653846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5351139395253846,
"avg_score": null,
"num_lines": null
} |
__all__ = ['contHSRA']
import numpy as np
def contHSRA(wavelength, units='frequency'):
"""
Return the continuum spectrum of the HSRA model, obtained as a 3rd degree polynomial fit. This gives
a maximum error of 0.125%
Args:
wavelength (float): wavelength in Angstrom
units (str, optional): 'frequency' or 'wavelength' units
Returns:
float: value of the continuum in frequency units
"""
spec = np.zeros_like(wavelength)
c = np.asarray([-4.906054765549e13,1.684734544039e11,1.507254517567e7,-7561.242976546])
mask = wavelength < 3644.15
w = wavelength[mask]
spec[mask] = np.polyval(c[::-1], w)
c = np.asarray([-4.4650822755e14,6.1319780351059e11,-9.350928003805e7])
mask = (wavelength >= 3644.15) & (wavelength < 3750)
w = wavelength[mask]
spec[mask] = np.polyval(c[::-1], w)
c = np.asarray([-1.025961e15,1.3172859e12,-3.873465e8,46486.541,-2.049])
mask = (wavelength >= 3750) & (wavelength < 6250)
w = wavelength[mask]
spec[mask] = np.polyval(c[::-1], w)
c = np.asarray([4.861821e15,-2.2589885e12,4.3764376e8,-39279.61444,1.34388])
mask = (wavelength >= 6250) & (wavelength < 8300)
w = wavelength[mask]
spec[mask] = np.polyval(c[::-1], w)
c = np.asarray([1.758394e15,-3.293986e11,1.6782617e7])
mask = (wavelength >= 8300) & (wavelength < 8850)
w = wavelength[mask]
spec[mask] = np.polyval(c[::-1], w)
c = np.asarray([1.61455557e16,-6.544209e12,1.0159316e9,-70695.58136,1.852022])
mask = (wavelength >= 8850) & (wavelength < 10000)
w = wavelength[mask]
spec[mask] = np.polyval(c[::-1], w)
c = np.asarray([7.97805136e14,-1.16906597e11,5.315222e6,-4.57327954,-3.473452e-3])
mask = (wavelength >= 10000)
w = wavelength[mask]
spec[mask] = np.polyval(c[::-1], w)
if (units == 'frequency'):
return spec * (wavelength * 1e-8)**2 / 299792458.0e2
else:
return spec
return spec * (wavelength)
if (__name__ == '__main__'):
import matplotlib.pyplot as pl
x = np.linspace(3500, 8500, 200)
res = contHSRA(x)
pl.plot(x, res) | {
"repo_name": "aasensio/pyiacsun",
"path": "pyiacsun/util/contHSRA.py",
"copies": "1",
"size": "2161",
"license": "mit",
"hash": 598044820808124500,
"line_mean": 32.2615384615,
"line_max": 104,
"alpha_frac": 0.6122165664,
"autogenerated": false,
"ratio": 2.756377551020408,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8774259102568212,
"avg_score": 0.018867002970439254,
"num_lines": 65
} |
__all__ = ('convert', 'default', 'round_units', 'decompose')
from .form import *
import cbpos
logger = cbpos.get_logger(__name__)
def convert(price_in_src, src, dst):
if price_in_src == 0:
return 0
if src == dst:
return price_in_src
logger.debug(u'Converting {} from {} to {}'.format(repr(price_in_src), src, dst))
price_in_dst = price_in_src * src.current_rate.reference_to_currency_ratio * dst.current_rate.currency_to_reference_ratio
logger.debug(u'Price in {}: {}'.format(dst, repr(price_in_dst)))
return price_in_dst
_default_cache = None
def get_default():
global _default_cache
if _default_cache is None:
currency_id = cbpos.config['mod.currency', 'default']
session = cbpos.database.session()
if currency_id is not None:
_default_cache = (currency_id, session.query(Currency).filter_by(id=currency_id).one())
else:
_default_cache = (None, session.query(Currency).first())
return _default_cache[1]
from peak.util.proxies import ObjectProxy, CallbackProxy
#import weakref
default = CallbackProxy(get_default)
#default = weakref.proxy(get_default)
def round_units(price, currency):
unit = min(currency.units).value
remainder = price%unit
return price+(unit-remainder if remainder != 0 else 0)
def decompose(price, currency):
remainder = price
units = []
while remainder>0:
biggest = (u for u in sorted(currency.units, reverse=True) if u.value<=remainder)
try:
u = biggest.next()
except StopIteration:
u = min(currency.units)
remainder -= u.value
units.append(u)
return units
| {
"repo_name": "coinbox/coinbox-mod-currency",
"path": "cbmod/currency/controllers/__init__.py",
"copies": "1",
"size": "1717",
"license": "mit",
"hash": -2734892821208616000,
"line_mean": 29.1228070175,
"line_max": 125,
"alpha_frac": 0.632498544,
"autogenerated": false,
"ratio": 3.59958071278826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.473207925678826,
"avg_score": null,
"num_lines": null
} |
__all__ = ['convex_hull_image', 'convex_hull_object']
import numpy as np
from ..measure import grid_points_in_poly
from ._convex_hull import possible_hull
from ..measure._label import label
from ..util import unique_rows
def convex_hull_image(image):
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image. This array is cast to bool before processing.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
image = image.astype(bool)
# Here we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual
# hull.
coords = possible_hull(image.astype(np.uint8))
N = len(coords)
# Add a vertex for the middle of each pixel edge
coords_corners = np.empty((N * 4, 2))
for i, (x_offset, y_offset) in enumerate(zip((0, 0, -0.5, 0.5),
(-0.5, 0.5, 0, 0))):
coords_corners[i * N:(i + 1) * N] = coords + [x_offset, y_offset]
# repeated coordinates can *sometimes* cause problems in
# scipy.spatial.Delaunay, so we remove them.
coords = unique_rows(coords_corners)
try:
from scipy.spatial import Delaunay
except ImportError:
raise ImportError('Could not import scipy.spatial, only available in '
'scipy >= 0.9.')
# Subtract offset
offset = coords.mean(axis=0)
coords -= offset
# Find the convex hull
chull = Delaunay(coords).convex_hull
v = coords[np.unique(chull)]
# Sort vertices clock-wise
v_centred = v - v.mean(axis=0)
angles = np.arctan2(v_centred[:, 0], v_centred[:, 1])
v = v[np.argsort(angles)]
# Add back offset
v += offset
# For each pixel coordinate, check whether that pixel
# lies inside the convex hull
mask = grid_points_in_poly(image.shape[:2], v)
return mask
def convex_hull_object(image, neighbors=8):
"""Compute the convex hull image of individual objects in a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image.
neighbors : {4, 8}, int
Whether to use 4- or 8-connectivity.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
Notes
-----
This function uses skimage.morphology.label to define unique objects,
finds the convex hull of each using convex_hull_image, and combines
these regions with logical OR. Be aware the convex hulls of unconnected
objects may overlap in the result. If this is suspected, consider using
convex_hull_image separately on each object.
"""
if neighbors != 4 and neighbors != 8:
raise ValueError('Neighbors must be either 4 or 8.')
labeled_im = label(image, neighbors, background=0)
convex_obj = np.zeros(image.shape, dtype=bool)
convex_img = np.zeros(image.shape, dtype=bool)
for i in range(0, labeled_im.max() + 1):
convex_obj = convex_hull_image(labeled_im == i)
convex_img = np.logical_or(convex_img, convex_obj)
return convex_img
| {
"repo_name": "oew1v07/scikit-image",
"path": "skimage/morphology/convex_hull.py",
"copies": "18",
"size": "3680",
"license": "bsd-3-clause",
"hash": 5027189214484255000,
"line_mean": 29.9243697479,
"line_max": 96,
"alpha_frac": 0.6429347826,
"autogenerated": false,
"ratio": 3.691073219658977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['convex_hull_image', 'convex_hull_object']
import numpy as np
from ._pnpoly import grid_points_inside_poly
from ._convex_hull import possible_hull
from ..measure._label import label
from skimage.util import unique_rows
def convex_hull_image(image):
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image. This array is cast to bool before processing.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
image = image.astype(bool)
# Here we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual
# hull.
coords = possible_hull(image.astype(np.uint8))
N = len(coords)
# Add a vertex for the middle of each pixel edge
coords_corners = np.empty((N * 4, 2))
for i, (x_offset, y_offset) in enumerate(zip((0, 0, -0.5, 0.5),
(-0.5, 0.5, 0, 0))):
coords_corners[i * N:(i + 1) * N] = coords + [x_offset, y_offset]
# repeated coordinates can *sometimes* cause problems in
# scipy.spatial.Delaunay, so we remove them.
coords = unique_rows(coords_corners)
try:
from scipy.spatial import Delaunay
except ImportError:
raise ImportError('Could not import scipy.spatial, only available in '
'scipy >= 0.9.')
# Subtract offset
offset = coords.mean(axis=0)
coords -= offset
# Find the convex hull
chull = Delaunay(coords).convex_hull
v = coords[np.unique(chull)]
# Sort vertices clock-wise
v_centred = v - v.mean(axis=0)
angles = np.arctan2(v_centred[:, 0], v_centred[:, 1])
v = v[np.argsort(angles)]
# Add back offset
v += offset
# For each pixel coordinate, check whether that pixel
# lies inside the convex hull
mask = grid_points_inside_poly(image.shape[:2], v)
return mask
def convex_hull_object(image, neighbors=8):
"""Compute the convex hull image of individual objects in a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image.
neighbors : {4, 8}, int
Whether to use 4- or 8-connectivity.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
Notes
-----
This function uses skimage.morphology.label to define unique objects,
finds the convex hull of each using convex_hull_image, and combines
these regions with logical OR. Be aware the convex hulls of unconnected
objects may overlap in the result. If this is suspected, consider using
convex_hull_image separately on each object.
"""
if neighbors != 4 and neighbors != 8:
raise ValueError('Neighbors must be either 4 or 8.')
labeled_im = label(image, neighbors, background=0)
convex_obj = np.zeros(image.shape, dtype=bool)
convex_img = np.zeros(image.shape, dtype=bool)
for i in range(0, labeled_im.max() + 1):
convex_obj = convex_hull_image(labeled_im == i)
convex_img = np.logical_or(convex_img, convex_obj)
return convex_img
| {
"repo_name": "SamHames/scikit-image",
"path": "skimage/morphology/convex_hull.py",
"copies": "1",
"size": "3693",
"license": "bsd-3-clause",
"hash": 7817687768553569000,
"line_mean": 30.0336134454,
"line_max": 96,
"alpha_frac": 0.6444624966,
"autogenerated": false,
"ratio": 3.6893106893106893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9833340022954785,
"avg_score": 0.00008663259118080222,
"num_lines": 119
} |
__all__ = ['convex_hull_image', 'convex_hull_object']
import numpy as np
from ._pnpoly import grid_points_inside_poly
from ._convex_hull import possible_hull
from skimage.morphology import label
from skimage.util import unique_rows
def convex_hull_image(image):
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image. This array is cast to bool before processing.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
image = image.astype(bool)
# Here we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual
# hull.
coords = possible_hull(image.astype(np.uint8))
N = len(coords)
# Add a vertex for the middle of each pixel edge
coords_corners = np.empty((N * 4, 2))
for i, (x_offset, y_offset) in enumerate(zip((0, 0, -0.5, 0.5),
(-0.5, 0.5, 0, 0))):
coords_corners[i * N:(i + 1) * N] = coords + [x_offset, y_offset]
# repeated coordinates can *sometimes* cause problems in
# scipy.spatial.Delaunay, so we remove them.
coords = unique_rows(coords_corners)
try:
from scipy.spatial import Delaunay
except ImportError:
raise ImportError('Could not import scipy.spatial, only available in '
'scipy >= 0.9.')
# Subtract offset
offset = coords.mean(axis=0)
coords -= offset
# Find the convex hull
chull = Delaunay(coords).convex_hull
v = coords[np.unique(chull)]
# Sort vertices clock-wise
v_centred = v - v.mean(axis=0)
angles = np.arctan2(v_centred[:, 0], v_centred[:, 1])
v = v[np.argsort(angles)]
# Add back offset
v += offset
# For each pixel coordinate, check whether that pixel
# lies inside the convex hull
mask = grid_points_inside_poly(image.shape[:2], v)
return mask
def convex_hull_object(image, neighbors=8):
"""Compute the convex hull image of individual objects in a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image.
neighbors : {4, 8}, int
Whether to use 4- or 8-connectivity.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
Notes
-----
This function uses skimage.morphology.label to define unique objects,
finds the convex hull of each using convex_hull_image, and combines
these regions with logical OR. Be aware the convex hulls of unconnected
objects may overlap in the result. If this is suspected, consider using
convex_hull_image separately on each object.
"""
if neighbors != 4 and neighbors != 8:
raise ValueError('Neighbors must be either 4 or 8.')
labeled_im = label(image, neighbors, background=0)
convex_obj = np.zeros(image.shape, dtype=bool)
convex_img = np.zeros(image.shape, dtype=bool)
for i in range(0, labeled_im.max() + 1):
convex_obj = convex_hull_image(labeled_im == i)
convex_img = np.logical_or(convex_img, convex_obj)
return convex_img
| {
"repo_name": "chintak/scikit-image",
"path": "skimage/morphology/convex_hull.py",
"copies": "1",
"size": "3695",
"license": "bsd-3-clause",
"hash": -4297090808601260500,
"line_mean": 30.0504201681,
"line_max": 96,
"alpha_frac": 0.6454668471,
"autogenerated": false,
"ratio": 3.6839481555334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48294150026334,
"avg_score": null,
"num_lines": null
} |
__all__ = ['convex_hull_image']
import numpy as np
from ._pnpoly import points_inside_poly, grid_points_inside_poly
from ._convex_hull import possible_hull
def convex_hull_image(image):
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image. This array is cast to bool before processing.
Returns
-------
hull : ndarray of uint8
Binary image with pixels in convex hull set to 255.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
image = image.astype(bool)
# Here we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual
# hull.
coords = possible_hull(image.astype(np.uint8))
N = len(coords)
# Add a vertex for the middle of each pixel edge
coords_corners = np.empty((N * 4, 2))
for i, (x_offset, y_offset) in enumerate(zip((0, 0, -0.5, 0.5),
(-0.5, 0.5, 0, 0))):
coords_corners[i * N:(i + 1) * N] = coords + [x_offset, y_offset]
coords = coords_corners
try:
from scipy.spatial import Delaunay
except ImportError:
raise ImportError('Could not import scipy.spatial, only available in '
'scipy >= 0.9.')
# Find the convex hull
chull = Delaunay(coords).convex_hull
v = coords[np.unique(chull)]
# Sort vertices clock-wise
v_centred = v - v.mean(axis=0)
angles = np.arctan2(v_centred[:, 0], v_centred[:, 1])
v = v[np.argsort(angles)]
# For each pixel coordinate, check whether that pixel
# lies inside the convex hull
mask = grid_points_inside_poly(image.shape[:2], v)
return mask
| {
"repo_name": "emmanuelle/scikits.image",
"path": "skimage/morphology/convex_hull.py",
"copies": "2",
"size": "2032",
"license": "bsd-3-clause",
"hash": -2243648198975040800,
"line_mean": 29.7878787879,
"line_max": 96,
"alpha_frac": 0.6220472441,
"autogenerated": false,
"ratio": 3.571177504393673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5193224748493673,
"avg_score": null,
"num_lines": null
} |
__all__ = ['cooc_simple', 'cooc_advanced',
'tfidf_unweighted', 'tfidf_unweighted',
'cosine', 'pearson', 'leave_top_n']
import math
import numpy as np
from scipy import sparse
np.seterr(all='ignore',invalid='ignore')
#returns dense matrix: [x,y] = number of users rated both item x and item y
def rated_X_and_Y(R, given_items):
#highlight the fact the matrix is dense
return ( R[:,given_items].T * R ).todense()
#returns dense matrix: [x,:] = number of users rated item x
def rated_X(R, given_items):
return R[:,given_items].sum(0).T * np.ones((1,R.shape[1]))
def cooc_simple(model, given_items):
'''Builds matrix M1 given_items x num_items, where M1[X,Y] = (rated both X and Y) / rated X:
- rated both X and Y: computed by dot product of binary matrix
- rated X: computed by column-wise sum, then duplicating that as a column
- note that the matrix are int, so we use true_divide to get float results
parameter 'model' is a PreferenceModel
given_items is a list of items to compute scores for
'''
# here we check that this is right type of model
R = model.P()
P = np.true_divide ( rated_X_and_Y(R,given_items) ,
rated_X(R,given_items) )
# cooccurence algorithms assume the given items aren't scored
# -1 is used because if there's less than n suggestions, 0s can also be recommended
P[range(len(given_items)),given_items] = -1
return P
def cooc_advanced(model, given_items):
'''Builds matrix M2 given_items x num_items, where
M2[X,Y] = ( (rated both X and Y) /
rated X) /
( (rated A not X) /
not rated X )
Let's avoid some divisions:
= ((rated both X and Y) * (not rated X)) /
( (rated X) * (rated Y not X) )
Theoretically, both numerator and denominator can be computed using the same function cooc_simple
and swapping 0s and 1s in X. However, it is not a good idea to do the swap in a sparse matrix ;-)
Instead, let's notice that 'not rated X = total users - rated X'
In a similar fashion, 'rated Y but not X = rated Y - rated Y and X'
= ((rated both X and Y) * (total users - rated X)) /
( (rated X) * (rated Y - rated X and Y) )
'''
# here we check that this is right type of model
R = model.P()
rated_x = rated_X(R, given_items)
rated_x_and_y = rated_X_and_Y(R, given_items)
rated_y = np.ones((len(given_items),1)) * R.sum(0)
total_users = R.shape[0]
# extract here to handle division by zero
cooc_x = np.multiply( rated_x_and_y , total_users - rated_x )
cooc_not_x = np.multiply( rated_x , rated_y - rated_x_and_y )
# For some y, there are no users rated y but not x (at least, for x = y).
# mask zero values in the denominator
zero_mask = cooc_not_x == 0
P = np.true_divide ( cooc_x,
np.ma.masked_array(cooc_not_x, zero_mask) )
# cooccurence algorithms assume the given items aren't scored
# -1 is used because if there's less than n suggestions, 0s can also be recommended
P[range(len(given_items)),given_items] = -1
# fill missing vlaues (x/0 and 0/0) with 0
return P.filled(0)
def tfidf_unweighted(model, given_users):
'''For given user ids, return scores for all items we have.
We assume that model has TFIDF scores for items (I) and binary user preferences (P).
The preferences are used to compute unweughted user profiles.
'''
# get submatrix for given users (still sparse)
U_given = __unweighted_user_profiles(model.P()[given_users], model.I())
# having given user profiles in U_given and all item profiles, compute pairwise similarity (distance)
# using cosine distance function
scores = cosine(U_given, model.I())
# now set to -999 scores of all items rated by user
# as we're working from dense matrix given_users x items,
# user the corresponding part of ratings matrix to mask all cells with ratings and fill them with zeros
# -999 is used because if there's less than n suggestions, 0s can also be recommended
return np.ma.masked_array(scores.todense(), model.R()[given_users].todense()).filled(-999)
def tfidf_weighted(model, given_users):
'''For given user ids, return scores for all items we have.
We assume the model has TFIDF item profiles (I) and user ratings (R).
First, we compute relative ratings for each user with the user median as a reference point.
Then, proceed with computing weighted user profiles.
'''
# get submatrix for given users (still sparse)
U_given = __weighted_user_profiles(model.R()[given_users], model.I())
# having given user profiles in U_given and all item profiles, compute pairwise similarity (distance)
# using cosine distance function
scores = cosine(U_given, model.I())
# now set to -999 scores of all items rated by user
# as we're working from dense matrix given_users x items,
# use the corresponding part of ratings matrix to mask all cells with ratings and fill them with -999
# -999 is used because if there's less than n item user likes, we'll recommend some with score < 0
return np.ma.masked_array(scores.todense(), model.R()[given_users].todense()).filled(-999)
def user_based_knn(model, n, given_users, given_items, distance, promote_users = True,
exclude_seen = False, normalize = 'none'):
'''For each (user,item) pair, identify n nearest neighbours that rated this item,
using distance on rating vectors (already mean-centered from model),
then compute mean-centered average score for this item using those n nighbours.
Parameter promote_users says if users from below n should be promoted to the nearest in case
some of the top n similar users have rated one of the items.
Parameter normalize reflects how we should normalize predicted scores:
- 'none': no normalization, compute weighted sum of neighbour's scores
- 'normalize': compute wighted sum of mean scores, add mean rating of the user
- 'centered': using mean-centered ratings, thus no normalization in that final formula, but still add the mean
The model should provide:
- model.R() - a sparse CSR matrix of mean-centered user ratings,
- model.mean() - a vector of user mean ratings.
NB: consider replacing the loops with sparse tensor operations.
'''
# 1. for each given user, calculate similarity with all users that co-rated at least 1 item
# (anyway we'll need them all when will calculate by-item neighbours)
# We keep matrices sparse (e.g. only non-zero similarities are computed):
# S is a given_users x all_users sparse matrix
S = distance(model.R()[given_users], model.R()) # given_users x all_users (sparse)
# prepare empty matrix to keep all the scores user x item
scores = np.zeros((len(given_users), len(given_items))) # given_users x given_items (will be dense)
for u in range(len(given_users)):
# 2. for each given user, identify n nearest neighbours for each given item
neighbours = S[u,:] # 1 x all_users (sparse)
# remove the user himself from the matrix
neighbours[0,given_users[u]] = 0
# 2.1 put distance to neighbours to diagonal matrix
neighbours_diag = sparse.spdiags(neighbours.todense(), 0,
neighbours.shape[1], neighbours.shape[1]) # all_users x all_users (sparse)
# 2.2 multiply neighbour similarities on a binary rating matrix
# the result is all_users x given_items sparse matrix for user u,
# where (i,j) = similarity(u,i) iff u and i rated item j
# NB: here we don't check R() == 0 as rating values may be mean-centered,
# in which case 0 doesn't mean the user hasn't rated the item.
# Instead, we use P() binary matrix that was built based on the original ratings
item_neighbours = neighbours_diag * model.P()[:,given_items] # all_users x given_items (sparse)
# if we promote users, do per-item maximum and wipe the rest
if promote_users:
# 2.3 turn into 0 everything but the first n values in each item column (inplace)
leave_top_n(item_neighbours, n) # all_users x given_items (sparse)
else:
# else identify top-n users (ids)
top_neighbours = np.asarray(neighbours.todense()).reshape(-1).argsort()[::-1][:n]
# then leave only their scores
the_rest = np.setdiff1d(range(item_neighbours.shape[0]),top_neighbours)
item_neighbours[the_rest,:] = 0
item_neighbours.eliminate_zeros()
if normalize == 'normalize':
# 2.4 having all_users x given_items matrix with similarities for top-n neighbours per each item
# use all_users x given_items matrix of their mean-centered scores
# to compute normalized average score for each of the items
# The normalized average score p(u,i) for user u and item i is computed as
# p(u,i) = mu(u) + sum(n_neighbours(u,i), sim(u,v) * (r(v,i) - mu(v))) /
# sum(n_neighbours(u,i), |sim(u,v)|)
# 3. put it to the proper position in the scores (given_users x given_items) matrix
ratings = model.R() - (model.R() != 0).multiply(model.mean())
weights_sum = np.asarray(item_neighbours.sum(0)).reshape(-1)
scores[u,:] = (np.true_divide(item_neighbours.multiply(ratings[:,given_items]).sum(0),
np.ma.masked_array(weights_sum, weights_sum == 0)) + model.mean()[given_users[u]]).filled(0)
elif normalize == 'centered':
weights_sum = np.asarray(item_neighbours.sum(0)).reshape(-1)
scores[u,:] = (np.true_divide(item_neighbours.multiply(model.R()[:,given_items]).sum(0),
np.ma.masked_array(weights_sum, weights_sum == 0)) + model.mean()[given_users[u]]).filled(0)
elif normalize == 'none':
weights_sum = np.asarray(item_neighbours.sum(0)).reshape(-1)
scores[u,:] = np.true_divide(item_neighbours.multiply(model.R()[:,given_items]).sum(0),
np.ma.masked_array(weights_sum, weights_sum == 0)).filled(0)
else:
print 'No such normalization: ', normalize
if exclude_seen:
# now set to -999 scores of all items rated by user
# as we're working from dense matrix given_users x items,
# use the corresponding part of ratings matrix to mask all cells with ratings and fill them with -999
# -999 is used because if there's less than n item user likes, we'll recommend some with score < 0
scores = np.ma.masked_array(scores, model.R()[given_users][:,given_items].todense()).filled(-999)
return scores
def leave_top_n(M, n):
'''For sparse CSR matrix M with float values, mask everything but up to n top elements in each column
'''
# get top n sorted indexes for each column (cols -> sort indexes -> revert -> take top n rows):
# we're interested in non-0 correlations as 0s are for users w/o data to compute the correlation
# FIXME ugly cycle, consider replace with more numpythonic idiom
# iterate through columns / items
for i in range(M.shape[1]):
# for each column, attach original row indexes to data
nonzero_elements = zip(M[:,i].data, __csr_row_indices(M[:,i].indptr))
# then sort the data and get indexes of everything outside of top-n .data elements
# reverse sort by 0th -> split to columns -> take 1th -> take after nth
tail_indices = zip(*sorted(nonzero_elements, key = lambda a : a[0], reverse = True))[1][n:]
# set those to zero
M[tail_indices,i] = 0
# eliminate zeros from the sparse matrix
M.eliminate_zeros()
def cosine(U, I):
'''Calculates the cosine distance between vectors in two sparse matrices U (a x b) and I (c x b).
The result is written to sparse a x c matrix.
cosine(u,v) = ( u .* v ) / (|u| * |v|)
'''
# u .* v numerator, assume that U() is |u| x |t| and I() is |i| x |t|, hence W is |u| x |i|
# still sparse matrix if the rows don't intersect on t
W = U * I.T
# calculate vectors of sums for rows (dense)
dU = np.sqrt(U.multiply(U).sum(1))
dI = np.sqrt(I.multiply(I).sum(1))
# elementwise divide W by user x item matrix of profile norm multiplications
# in order to avoid making a huge dense matrix, we need perform the division in two steps
__divide_csr_cols_by_vector(W, np.asarray(dU).reshape(-1))
__divide_csr_rows_by_vector(W, np.asarray(dI).reshape(-1))
# hooray, the u x v matrix is still sparse!
return W
def pearson(U, V):
'''Calculates the pearson correlation coeff. between vectors in two sparse matrices U (a x b) and V (c x b).
The result is written to dense a x c matrix.
This function mimics Excel CORREL function. To compute pearson(u,v), we should take elements that exist
in both vectors, and then compute sum((u_i - u_mean)*(v_i - v_mean)) / sum(sqrt( (u_i - u_mean) * (v_i - v_mean) ))
NB: b may be arbitrary big, avoid doing dense matrix with b as one of the dimensions
'''
# the thing is that mean values of u depends on elements of v and vice versa
# as zero elements don't count
P = np.zeros((U.shape[0],V.shape[0]))
for i in range(U.shape[0]): # u is sparse 1 x b
u = U[i]
# create a copy of I and remove elements that don't match u
u_zero = np.setdiff1d(range(u.shape[1]),u.indices)
V_u = V.copy()
V_u[:,u_zero] = 0
V_u.eliminate_zeros() # c x b sparse
Nz = V_u != 0
# duplicate u for a number of rows in V and remove non-matched elements
u_diag = sparse.spdiags(u.todense(), 0, u.shape[1], u.shape[1])
u_V = Nz * u_diag
u_V.eliminate_zeros() # c x b sparse
# calculate mean for each of vectors in V (c x 1) and mean u for each of vectors in V (c x 1)
V_u_mean = V_u.sum(1) / Nz.sum(1) # c x 1 dense
u_V_mean = u_V.sum(1) / Nz.sum(1) # c x 1 dense
# create mean-centered V_u and u_V
V_u_centered = V_u - sparse.csr_matrix(Nz.multiply(V_u_mean)) # c x b sparse
u_V_centered = u_V - sparse.csr_matrix(Nz.multiply(u_V_mean)) # c x b sparse
# compute denominators using some low-level sparse magic
# during centering, some values could turn into 0, thus making sparse structure different
# between V_u_centered and u_V_centered
denom = [math.sqrt(sum(u_V_centered.data[u_V_centered.indptr[j]:u_V_centered.indptr[j+1]]**2) *
sum(V_u_centered.data[V_u_centered.indptr[j]:V_u_centered.indptr[j+1]]**2))
for j in range(len(u_V_centered.indptr)-1)]
# now compute the pearson coefficients for all v
P[i,:] = u_V_centered.multiply(V_u_centered).sum(1).T / denom
# return sparse matrix to conform with cosine similarity function
return sparse.csr_matrix(P)
def __divide_csr_rows_by_vector(M,v):
'''Divide each CSR row by a value; values represented as an array or list v
! v is not a matrix !
'''
assert(M.shape[1] == len(v))
# option 1:
# decompress vector to fit data array in the CSR representation
M.data = np.true_divide(M.data, [v[i] for i in M.indices])
def __divide_csr_cols_by_vector(M,v):
'''Divide each CSR column by a value; values represenced as a vector v
Consider doing M.tocsc and dividing by col
'''
assert(M.shape[0] == len(v))
indices = __csr_row_indices(M.indptr)
# then as in the provious function
M.data = np.true_divide(M.data, [v[i] for i in indices])
def __csr_row_indices(indptr):
'''Takes csr_matrix.indptr and returns row indexes for data elements
'''
# get row index for each M.data element. Hold on, thet'd be da fun!
return [j for i in range(len(indptr)-1) for j in [i,]*(indptr[i+1] - indptr[i])]
def __unweighted_user_profiles(P,I):
'''P - sparse CSR matrix of user preferences (user x item x {0,1})
I - sparse CSR matrix of item profiles (item x feature x float)
Returns CSR matrix of user profiles (users x feature x float),
built as a sum of profiles for all items user 'likes'
'''
return P * I
def __weighted_user_profiles(R,I):
'''R - sparse CSR matrix of user preferences (user x item x float)
I - sparse CSR matrix of item profiles (item x feature x float)
Returns CSR matrix of user profiles (users x feature x float),
built as a weighted sum of profiles for all items user rated,
with a weight correlated with the rating value.
'''
# NB: below the ratings are made dense. It is possible to avoid that, doing all operations
# on csr_matrix.data
# calculate mean ratings
U_mean = R.sum(1) / (R != 0).sum(1)
# subtract them from non-zero elements
U_relative = R - sparse.csr_matrix((R != 0).multiply(U_mean))
return U_relative * I
| {
"repo_name": "ksavenkov/recsys-001",
"path": "recsys/score.py",
"copies": "1",
"size": "17713",
"license": "mit",
"hash": 1650863023513355500,
"line_mean": 51.8746268657,
"line_max": 122,
"alpha_frac": 0.6218596511,
"autogenerated": false,
"ratio": 3.613423092615259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4735282743715259,
"avg_score": null,
"num_lines": null
} |
__all__ = ["CookieTracker", "parse_cookies"]
from .response_objects import Cookie
class CookieTracker:
"""
Class for holding cookies in sessions, adding statefullness to
the otherwise stateless general http method functions.
"""
def __init__(self):
self.domain_dict = {}
def get_additional_cookies(self, netloc, path):
netloc = netloc.replace("://www.", "://", 1)
return self._check_cookies(netloc + path)
def _store_cookies(self, response_obj):
for cookie in response_obj.cookies:
try:
self.domain_dict[cookie.host.lstrip()].append(cookie)
except KeyError:
self.domain_dict[cookie.host.lstrip()] = [cookie]
def _check_cookies(self, endpoint):
relevant_domains = []
domains = self.domain_dict.keys()
if domains:
paths = []
for path in endpoint.split("/"):
paths.append(path)
check_domain = "/".join(paths)
if check_domain in domains:
relevant_domains.append(check_domain)
return self._get_cookies_to_send(relevant_domains)
def _get_cookies_to_send(self, domain_list):
cookies_to_go = {}
for domain in domain_list:
for cookie_obj in self.domain_dict[domain]:
cookies_to_go[cookie_obj.name] = cookie_obj.value
return cookies_to_go
def parse_cookies(response, host):
"""
Sticks cookies to a response.
"""
cookie_pie = []
try:
for cookie in response.headers["set-cookie"]:
cookie_jar = {}
name_val, *rest = cookie.split(";")
name, value = name_val.split("=", 1)
cookie_jar["name"] = name.strip()
cookie_jar["value"] = value
for item in rest:
try:
name, value = item.split("=")
if value.startswith("."):
value = value[1:]
cookie_jar[name.lower().lstrip()] = value
except ValueError:
cookie_jar[item.lower().lstrip()] = True
cookie_pie.append(cookie_jar)
response.cookies = [Cookie(host, x) for x in cookie_pie]
except KeyError:
pass
| {
"repo_name": "theelous3/asks",
"path": "asks/cookie_utils.py",
"copies": "1",
"size": "2308",
"license": "mit",
"hash": -961789923268345000,
"line_mean": 31.5070422535,
"line_max": 69,
"alpha_frac": 0.5394280763,
"autogenerated": false,
"ratio": 4.234862385321101,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 71
} |
__all__ = ('Coord', 'TileMap')
from collections import defaultdict, namedtuple
from filters import is_tile
class TileMapStorage(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.tiles = []
for y in range(self.height):
self.tiles.append([0] * self.width)
def __getitem__(self, subscript):
assert isinstance(subscript, Coord)
return self.tiles[subscript.y][subscript.x]
def __setitem__(self, subscript, value):
assert isinstance(subscript, Coord)
self.tiles[subscript.y][subscript.x] = value
def copy(self):
storage = self.__class__(width=self.width, height=self.height)
storage.tiles = []
for y in range(self.height):
storage.tiles.append(list(self.tiles[y]))
return storage
class Coord(namedtuple('Coord', ['x', 'y'])):
@classmethod
def from_tuple(cls, tup):
if isinstance(tup, cls):
return tup
else:
return cls(tup[0], tup[1])
@classmethod
def range(cls, c1, c2):
for y in range(c1[1], c2[1]):
for x in range(c1[0], c2[0]):
yield Coord(x, y)
@classmethod
def width(cls, c1, c2):
return abs(c1[0] - c2[0])
@classmethod
def height(cls, c1, c2):
return abs(c1[1] - c2[1])
def __add__(self, other):
return self.__class__(self[0] + other[0], self[1] + other[1])
def __sub__(self, other):
return self.__class__(self[0] - other[0], self[1] - other[1])
def __mul__(self, scalar):
return self.__class__(self[0] * scalar, self[1] * scalar)
def __neg__(self):
return self.__class__(-self[0], -self[1])
Coord.X = Coord(1, 0)
Coord.Y = Coord(0, 1)
class TileMap(object):
"""Subscriptable, editable view onto a TileMap."""
def __init__(self, tl=None, br=None, width=0, height=0, storage=None):
if tl is None:
tl = Coord(0, 0)
else:
tl = Coord.from_tuple(tl)
if br is None:
br = Coord(tl.x + width, tl.y + height)
else:
br = Coord.from_tuple(br)
if storage is None:
storage = TileMapStorage(width, height)
assert isinstance(storage, TileMapStorage)
assert tl.x >= 0
assert tl.y >= 0
assert tl.x < br.x
assert tl.y < br.y
assert br.x <= storage.width
assert br.y <= storage.height
self.storage = storage
self.tl = tl
self.br = br
@property
def width(self):
return Coord.width(self.tl, self.br)
@property
def height(self):
return Coord.height(self.tl, self.br)
@classmethod
def clone(cls, tile_map):
return cls(tl=tile_map.tl, br=tile_map.br, storage=tile_map.storage)
def to_other(self, coord, other):
return Coord(coord.x + self.tl.x - other.tl.x,
coord.y + self.tl.y - other.tl.y)
def _local_to_storage(self, coord):
return Coord(coord.x + self.tl.x, coord.y + self.tl.y)
def _storage_to_local(self, coord):
return Coord(coord.x - self.tl.x, coord.y - self.tl.y)
def _parse_subscript(self, subscript):
if isinstance(subscript, slice):
assert isinstance(subscript.start, tuple)
assert len(subscript.start) == 2
assert isinstance(subscript.stop, tuple)
assert len(subscript.stop) == 2
subscript = (
slice(subscript.start[0], subscript.stop[0]),
slice(subscript.start[1], subscript.stop[1]),
)
assert isinstance(subscript, tuple)
assert len(subscript) == 2
x, y = subscript
width, height = (1, 1)
if isinstance(x, slice):
start, stop, step = x.start, x.stop, x.step
if start is None: start = 0
if stop is None: stop = self.width
if step is None: step = 1
assert step == 1
width = stop - start
x = start
if isinstance(y, slice):
start, stop, step = y.start, y.stop, y.step
if start is None: start = 0
if stop is None: stop = self.height
if step is None: step = 1
assert step == 1
height = stop - start
y = start
if x < 0 or x + width > self.width or \
y < 0 or y + height > self.height or \
width == 0 or height == 0:
raise IndexError(subscript)
return Coord(x, y), Coord(x + width, y + height)
def __str__(self):
lines = ['']
for y in range(self.tl.y, self.br.y):
line = []
for x in range(self.tl.x, self.br.x):
line.append('%3s' % repr(self.storage[Coord(x, y)]))
lines.append(' '.join(line))
return '\n '.join(lines)
def __getitem__(self, subscript):
"""Return the value at (x, y), or a subview of the range (if either x or y is a slice)."""
tl, br = self._parse_subscript(subscript)
if Coord.width(tl, br) == 1 and Coord.height(tl, br) == 1:
tl = self._local_to_storage(tl)
return self.storage[tl]
else:
return self.subview(tl=tl, br=br)
def __setitem__(self, subscript, value):
"""Set the value at (x, y), or fill the range (if either x or y is a slice) with the value."""
tl, br = self._parse_subscript(subscript)
if isinstance(value, TileMap):
for coord in Coord.range(tl, br):
coord = self._local_to_storage(coord)
other_coord = Coord(coord.x - tl.x, coord.y - tl.y)
other_coord = value._local_to_storage(other_coord)
self.storage[coord] = value.storage[other_coord]
else:
if Coord.width(tl, br) == 1 and Coord.height(tl, br) == 1:
tl = self._local_to_storage(tl)
self.storage[tl] = value
else:
self.subview(tl=tl, br=br).fill(value)
def __contains__(self, value):
if isinstance(value, TileMap):
raise TypeError("__contains__ does not support TileMaps yet.")
for coord, __ in self.find(is_tile(value)):
return True
return False
def get(self, subscript):
try:
return self[subscript]
except IndexError:
return None
def find(self, predicate):
"""
Return an iterable of `(coordinate, data)` for which
`predicate(tile_map, coord)` returns a not False `data`.
"""
for coord in Coord.range(self.tl, self.br):
tile = self.storage[coord]
arg = self._storage_to_local(coord)
data = predicate(self, arg)
if data:
yield (arg, data)
def cast_until(self, start, increment, predicate):
"""
Return the first coordinate from `start` in steps
of `increment` where `predicate(tile_map, coord)` returns True.
Raises ValueError if the predicate never returned True.
"""
coord = start
end = self._storage_to_local(self.br)
def in_range(coord):
return (coord.x < end.x and coord.y < end.y)
while in_range(coord) and not predicate(self, coord):
coord += increment
if in_range(coord):
return coord
else:
raise ValueError("Coordinate matching predicate not found.")
def copy(self):
subview = self.subview()
subview.storage = self.storage.copy()
return subview
def fill(self, value):
for coord in Coord.range(self.tl, self.br):
self.storage[coord] = value
def subview(self, tl=None, br=None):
"""Return a subview at the given location (default top left) and size (default maximum)."""
if tl is None:
tl = Coord(0, 0)
else:
tl = Coord.from_tuple(tl)
if br is None:
br = Coord(self.width, self.height)
else:
br = Coord.from_tuple(br)
tl = self._local_to_storage(tl)
br = self._local_to_storage(br)
return self.__class__(tl=tl, br=br, storage=self.storage)
def linearize(self):
"""Return a linear iterable of all values in this tile map."""
return (self.storage[coord] for coord in Coord.range(self.tl, self.br))
def split_x(self, x):
"""Return a pair of views that are the halves of the tile map split vertically at `x`."""
assert 0 <= x < self.width
return (
self.subview(tl=Coord(0, 0), br=Coord(x, self.height)),
self.subview(tl=Coord(x, 0), br=Coord(self.width, self.height))
)
def split_y(self, y):
"""Return a pair of views that are the halves of the tile map split horizontally at `y`."""
assert 0 <= y < self.height
return (
self.subview(tl=Coord(0, 0), br=Coord(self.width, y)),
self.subview(tl=Coord(0, y), br=Coord(self.width, self.height))
)
| {
"repo_name": "adurdin/platformgen",
"path": "tilemap.py",
"copies": "1",
"size": "9181",
"license": "mit",
"hash": -6565937783902923000,
"line_mean": 32.1444043321,
"line_max": 102,
"alpha_frac": 0.5446029844,
"autogenerated": false,
"ratio": 3.6606858054226477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9698481113621523,
"avg_score": 0.0013615352402248475,
"num_lines": 277
} |
__all__ = ['copyfileobj']
import shutil
try:
from fadvise import posix_fadvise, POSIX_FADV_DONTNEED
def copyfileobj(fsrc, fdst, length=16*1024, advise_after=1024*1024):
"""
Reimplementation of shutil.copyfileobj that advises the OS to remove
parts of the source file from the OS's caches once copied to the
destination file.
Usage profile:
* You have a (potentially) large file to copy.
* You know you don't need to access the source file once copied.
* You're quite likely to access the destination file soon after.
"""
# If we can't access the the fileno then fallback to using shutil.
if not hasattr(fsrc, 'fileno'):
return shutil.copyfileobj(fsrc, fdst, length)
# Calculate the appoximate number of blocks to copy before advising the
# OS to drop pages from the cache.
advise_after_blocks = int(advise_after/length)
# Off we go ...
blocks_read = 0
while True:
data = fsrc.read(length)
if not data:
break
fdst.write(data)
blocks_read += 1
if not blocks_read % advise_after_blocks:
posix_fadvise(fsrc.fileno(), 0, length*blocks_read,
POSIX_FADV_DONTNEED)
# One final advise to flush the remaining blocks.
posix_fadvise(fsrc.fileno(), 0, 0, POSIX_FADV_DONTNEED)
except ImportError:
copyfileobj = shutil.copyfileobj
| {
"repo_name": "ish/formish",
"path": "formish/_copyfile.py",
"copies": "1",
"size": "1533",
"license": "bsd-3-clause",
"hash": 4427966937457355000,
"line_mean": 35.5,
"line_max": 79,
"alpha_frac": 0.6040443575,
"autogenerated": false,
"ratio": 3.9612403100775193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.99462370485299,
"avg_score": 0.023809523809523808,
"num_lines": 42
} |
__all__ = ["Core"]
from aioorm import AioDbFactory
class Core:
def __init__(self, app=None):
self.sqldatabases = {}
if app:
self.init_app(app)
else:
pass
def init_app(self, app):
if app.config.SQLDBURLS and isinstance(app.config.SQLDBURLS, dict):
self.SQLDBURLS = app.config.SQLDBURLS
self.app = app
for dbname, dburl in app.config.SQLDBURLS.items():
db = AioDbFactory(dburl)
self.sqldatabases[dbname] = db
else:
raise ValueError(
"nonstandard sanic config SQLDBURLS,SQLDBURLS must be a Dict[dbname,dburl]")
@app.listener('before_server_start')
async def setup_db(app, loop):
for name, db in self.sqldatabases.items():
tempdb = await db.connect(loop)
print(name, "successfully connected!")
@app.listener('after_server_start')
async def notify_server_started(app, loop):
print('Databases successfully connected!')
@app.listener('before_server_stop')
async def notify_server_stopping(app, loop):
print('Databases disconnecting')
@app.listener('after_server_stop')
async def close_db(app, loop):
for name, db in self.sqldatabases.items():
await db.close()
print(name, 'disconnected')
if "extensions" not in app.__dir__():
app.extensions = {}
app.extensions['SanicAioOrm'] = self
def init_proxys(self, **kwargs):
"""初始化peewee的代理数据库对象
"""
for name, proxy in kwargs.items():
try:
proxy.initialize(self.sqldatabases[name])
except:
print("unknown Databases {}".format(name))
| {
"repo_name": "Sanic-Extensions/sanic-aioorm",
"path": "sanic_aioorm/core.py",
"copies": "1",
"size": "1854",
"license": "mit",
"hash": -4662702033595761000,
"line_mean": 32.3090909091,
"line_max": 92,
"alpha_frac": 0.5562227074,
"autogenerated": false,
"ratio": 3.906183368869936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4962406076269936,
"avg_score": null,
"num_lines": null
} |
__all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine'
import collections.abc
import functools
import inspect
import os
import sys
import traceback
import types
import warnings
from . import base_futures
from . import constants
from . import format_helpers
from .log import logger
def _is_debug_mode():
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "await" or "yield from"
# with a coroutine call.
# Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
return sys.flags.dev_mode or (not sys.flags.ignore_environment and
bool(os.environ.get('PYTHONASYNCIODEBUG')))
_DEBUG = _is_debug_mode()
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func=None):
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
self.gen = gen
self.func = func # Used to unwrap @coroutine decorator
self._source_traceback = format_helpers.extract_stack(sys._getframe(1))
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += f', created at {frame[0]}:{frame[1]}'
return f'<{self.__class__.__name__} {coro_repr}>'
def __iter__(self):
return self
def __next__(self):
return self.gen.send(None)
def send(self, value):
return self.gen.send(value)
def throw(self, type, value=None, traceback=None):
return self.gen.throw(type, value, traceback)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.gi_code
def __await__(self):
return self
@property
def gi_yieldfrom(self):
return self.gen.gi_yieldfrom
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = f'{self!r} was never yielded from'
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += (f'\nCoroutine object created at '
f'(most recent call last, truncated to '
f'{constants.DEBUG_STACK_DEPTH} last lines):\n')
msg += tb.rstrip()
logger.error(msg)
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead',
DeprecationWarning,
stacklevel=2)
if inspect.iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defined with "async def".
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)):
res = yield from res
else:
# If 'res' is an awaitable, run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, collections.abc.Awaitable):
res = yield from await_meth()
return res
coro = types.coroutine(coro)
if not _DEBUG:
wrapper = coro
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
return wrapper
# A marker for iscoroutinefunction.
_is_coroutine = object()
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return (inspect.iscoroutinefunction(func) or
getattr(func, '_is_coroutine', None) is _is_coroutine)
# Prioritize native coroutine check to speed-up
# asyncio.iscoroutine.
_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,
collections.abc.Coroutine, CoroWrapper)
_iscoroutine_typecache = set()
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
if type(obj) in _iscoroutine_typecache:
return True
if isinstance(obj, _COROUTINE_TYPES):
# Just in case we don't want to cache more than 100
# positive types. That shouldn't ever happen, unless
# someone stressing the system on purpose.
if len(_iscoroutine_typecache) < 100:
_iscoroutine_typecache.add(type(obj))
return True
else:
return False
def _format_coroutine(coro):
assert iscoroutine(coro)
is_corowrapper = isinstance(coro, CoroWrapper)
def get_name(coro):
# Coroutines compiled with Cython sometimes don't have
# proper __qualname__ or __name__. While that is a bug
# in Cython, asyncio shouldn't crash with an AttributeError
# in its __repr__ functions.
if is_corowrapper:
return format_helpers._format_callback(coro.func, (), {})
if hasattr(coro, '__qualname__') and coro.__qualname__:
coro_name = coro.__qualname__
elif hasattr(coro, '__name__') and coro.__name__:
coro_name = coro.__name__
else:
# Stop masking Cython bugs, expose them in a friendly way.
coro_name = f'<{type(coro).__name__} without __name__>'
return f'{coro_name}()'
def is_running(coro):
try:
return coro.cr_running
except AttributeError:
try:
return coro.gi_running
except AttributeError:
return False
coro_code = None
if hasattr(coro, 'cr_code') and coro.cr_code:
coro_code = coro.cr_code
elif hasattr(coro, 'gi_code') and coro.gi_code:
coro_code = coro.gi_code
coro_name = get_name(coro)
if not coro_code:
# Built-in types might not have __qualname__ or __name__.
if is_running(coro):
return f'{coro_name} running'
else:
return coro_name
coro_frame = None
if hasattr(coro, 'gi_frame') and coro.gi_frame:
coro_frame = coro.gi_frame
elif hasattr(coro, 'cr_frame') and coro.cr_frame:
coro_frame = coro.cr_frame
# If Cython's coroutine has a fake code object without proper
# co_filename -- expose that.
filename = coro_code.co_filename or '<empty co_filename>'
lineno = 0
if (is_corowrapper and
coro.func is not None and
not inspect.isgeneratorfunction(coro.func)):
source = format_helpers._get_function_source(coro.func)
if source is not None:
filename, lineno = source
if coro_frame is None:
coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
else:
coro_repr = f'{coro_name} running, defined at {filename}:{lineno}'
elif coro_frame is not None:
lineno = coro_frame.f_lineno
coro_repr = f'{coro_name} running at {filename}:{lineno}'
else:
lineno = coro_code.co_firstlineno
coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
return coro_repr
| {
"repo_name": "batermj/algorithm-challenger",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/asyncio/coroutines.py",
"copies": "22",
"size": "8797",
"license": "apache-2.0",
"hash": -3938871364576076300,
"line_mean": 31.7026022305,
"line_max": 99,
"alpha_frac": 0.5917926566,
"autogenerated": false,
"ratio": 3.916740872662511,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00003717472118959108,
"num_lines": 269
} |
__all__ = ['coroutine',
'iscoroutinefunction', 'iscoroutine',
'From', 'Return']
import functools
import inspect
import opcode
import os
import sys
import textwrap
import traceback
import types
from . import compat
from . import events
from . import futures
from .log import logger
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap.get('YIELD_FROM', None)
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "yield from" with a
# coroutine call. Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
_DEBUG = bool(os.environ.get('TROLLIUSDEBUG'))
try:
_types_coroutine = types.coroutine
except AttributeError:
_types_coroutine = None
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
_inspect_iscoroutinefunction = lambda func: False
try:
from collections.abc import Coroutine as _CoroutineABC, \
Awaitable as _AwaitableABC
except ImportError:
_CoroutineABC = _AwaitableABC = None
if _YIELD_FROM is not None:
# Check for CPython issue #21209
exec('''if 1:
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
''')
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
else:
_YIELD_FROM_BUG = False
if compat.PY35:
return_base_class = Exception
else:
return_base_class = StopIteration
class ReturnException(return_base_class):
def __init__(self, *args):
return_base_class.__init__(self)
if not args:
self.value = None
elif len(args) == 1:
self.value = args[0]
else:
self.value = args
self.raised = False
if _DEBUG:
frame = sys._getframe(1)
self._source_traceback = traceback.extract_stack(frame)
# explicitly clear the reference to avoid reference cycles
frame = None
else:
self._source_traceback = None
def __del__(self):
if self.raised:
return
fmt = 'Return(%r) used without raise'
if self._source_traceback:
fmt += '\nReturn created at (most recent call last):\n'
tb = ''.join(traceback.format_list(self._source_traceback))
fmt += tb.rstrip()
logger.error(fmt, self.value)
if compat.PY33 and not compat.PY35:
# Don't use the Return class on Python 3.3 and 3.4 to support asyncio
# coroutines (to avoid the warning emited in Return destructor).
#
# The problem is that ReturnException inherits from StopIteration.
# "yield from trollius_coroutine". Task._step() does not receive the Return
# exception, because "yield from" handles it internally. So it's not
# possible to set the raised attribute to True to avoid the warning in
# Return destructor.
def Return(*args):
if not args:
value = None
elif len(args) == 1:
value = args[0]
else:
value = args
return StopIteration(value)
else:
Return = ReturnException
def debug_wrapper(gen):
# This function is called from 'sys.set_coroutine_wrapper'.
# We only wrap here coroutines defined via 'async def' syntax.
# Generator-based coroutines are wrapped in @coroutine
# decorator.
return CoroWrapper(gen, None)
def _coroutine_at_yield_from(coro):
"""Test if the last instruction of a coroutine is "yield from".
Return False if the coroutine completed.
"""
frame = coro.gi_frame
if frame is None:
return False
code = coro.gi_code
assert frame.f_lasti >= 0
offset = frame.f_lasti + 1
instr = code.co_code[offset]
return (instr == _YIELD_FROM)
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func=None):
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
self.gen = gen
self.func = func # Used to unwrap @coroutine decorator
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
return '<%s %s>' % (self.__class__.__name__, coro_repr)
def __iter__(self):
return self
def __next__(self):
return next(self.gen)
next = __next__
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value):
return self.gen.send(value)
def throw(self, exc_type, exc_value=None, exc_tb=None):
return self.gen.throw(exc_type, exc_value, exc_tb)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.gi_code
if compat.PY35:
__await__ = __iter__ # make compatible with 'await' expression
@property
def gi_yieldfrom(self):
return self.gen.gi_yieldfrom
@property
def cr_await(self):
return self.gen.cr_await
@property
def cr_running(self):
return self.gen.cr_running
@property
def cr_code(self):
return self.gen.cr_code
@property
def cr_frame(self):
return self.gen.cr_frame
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is None:
frame = getattr(gen, 'cr_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = '%r was never yielded from' % self
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += ('\nCoroutine object created at '
'(most recent call last):\n')
msg += tb.rstrip()
logger.error(msg)
if not compat.PY34:
# Backport functools.update_wrapper() from Python 3.4:
# - Python 2.7 fails if assigned attributes don't exist
# - Python 2.7 and 3.1 don't set the __wrapped__ attribute
# - Python 3.2 and 3.3 set __wrapped__ before updating __dict__
def _update_wrapper(wrapper,
wrapped,
assigned = functools.WRAPPER_ASSIGNMENTS,
updated = functools.WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Issue #17482: set __wrapped__ last so we don't inadvertently copy it
# from the wrapped function when updating __dict__
wrapper.__wrapped__ = wrapped
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def _wraps(wrapped,
assigned = functools.WRAPPER_ASSIGNMENTS,
updated = functools.WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
else:
_wraps = functools.wraps
_PEP479 = (sys.version_info >= (3, 5))
if _PEP479:
# Need exec() because yield+return raises a SyntaxError on Python 2
exec(textwrap.dedent('''
def pep479_wrapper(func, coro_func):
@_wraps(func)
def pep479_wrapped(*args, **kw):
coro = coro_func(*args, **kw)
value = None
error = None
while True:
try:
if error is not None:
value = coro.throw(error)
elif value is not None:
value = coro.send(value)
else:
value = next(coro)
except RuntimeError:
# FIXME: special case for
# FIXME: "isinstance(exc.__context__, StopIteration)"?
raise
except StopIteration as exc:
return exc.value
except Return as exc:
exc.raised = True
return exc.value
except BaseException as exc:
raise
try:
value = yield value
error = None
except BaseException as exc:
value = None
error = exc
return pep479_wrapped
'''))
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if _inspect_iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defiend with "async def".
# Wrapping in CoroWrapper will happen via
# 'sys.set_coroutine_wrapper' function.
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@_wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (isinstance(res, futures._FUTURE_CLASSES)
or inspect.isgenerator(res)):
res = yield From(res)
elif _AwaitableABC is not None:
# If 'func' returns an Awaitable (new in 3.5) we
# want to run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, _AwaitableABC):
res = yield From(await_meth())
raise Return(res)
if _PEP479:
# FIXME: use @_wraps
coro = pep479_wrapper(func, coro)
coro = _wraps(func)(coro)
if not _DEBUG:
if _types_coroutine is None:
wrapper = coro
else:
wrapper = _types_coroutine(coro)
else:
@_wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = True # For iscoroutinefunction().
return wrapper
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return (getattr(func, '_is_coroutine', False) or
_inspect_iscoroutinefunction(func))
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if _CoroutineABC is not None:
_COROUTINE_TYPES += (_CoroutineABC,)
if events.asyncio is not None:
# Accept also asyncio CoroWrapper for interoperability
if hasattr(events.asyncio, 'coroutines'):
_COROUTINE_TYPES += (events.asyncio.coroutines.CoroWrapper,)
else:
# old asyncio/Python versions
_COROUTINE_TYPES += (events.asyncio.tasks.CoroWrapper,)
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isinstance(obj, _COROUTINE_TYPES)
def _format_coroutine(coro):
assert iscoroutine(coro)
coro_name = None
if isinstance(coro, CoroWrapper):
func = coro.func
coro_name = coro.__qualname__
if coro_name is not None:
coro_name = '{0}()'.format(coro_name)
else:
func = coro
if coro_name is None:
coro_name = events._format_callback(func, ())
try:
coro_code = coro.gi_code
except AttributeError:
coro_code = coro.cr_code
try:
coro_frame = coro.gi_frame
except AttributeError:
coro_frame = coro.cr_frame
filename = coro_code.co_filename
if (isinstance(coro, CoroWrapper)
and not inspect.isgeneratorfunction(coro.func)
and coro.func is not None):
filename, lineno = events._get_function_source(coro.func)
if coro_frame is None:
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
else:
coro_repr = ('%s running, defined at %s:%s'
% (coro_name, filename, lineno))
elif coro_frame is not None:
lineno = coro_frame.f_lineno
coro_repr = ('%s running at %s:%s'
% (coro_name, filename, lineno))
else:
lineno = coro_code.co_firstlineno
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
return coro_repr
class FromWrapper(object):
__slots__ = ('obj',)
def __init__(self, obj):
if isinstance(obj, FromWrapper):
obj = obj.obj
assert not isinstance(obj, FromWrapper)
self.obj = obj
def From(obj):
if not _DEBUG:
return obj
else:
return FromWrapper(obj)
| {
"repo_name": "haypo/trollius",
"path": "trollius/coroutines.py",
"copies": "1",
"size": "16296",
"license": "apache-2.0",
"hash": -6516959371329543000,
"line_mean": 31.8548387097,
"line_max": 79,
"alpha_frac": 0.5674398625,
"autogenerated": false,
"ratio": 4.1795332136445245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001098491252465718,
"num_lines": 496
} |
__all__ = ['coroutine',
'iscoroutinefunction', 'iscoroutine']
import functools
import inspect
import opcode
import os
import sys
import traceback
import types
from ActualVim.lib.asyncio_inc import compat
from . import events
from . import futures
from .log import logger
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap['YIELD_FROM']
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "yield from" with a
# coroutine call. Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
_DEBUG = (not sys.flags.ignore_environment and
bool(os.environ.get('PYTHONASYNCIODEBUG')))
try:
_types_coroutine = types.coroutine
_types_CoroutineType = types.CoroutineType
except AttributeError:
# Python 3.4
_types_coroutine = None
_types_CoroutineType = None
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# Python 3.4
_inspect_iscoroutinefunction = lambda func: False
try:
from collections.abc import Coroutine as _CoroutineABC, \
Awaitable as _AwaitableABC
except ImportError:
_CoroutineABC = _AwaitableABC = None
# Check for CPython issue #21209
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
def debug_wrapper(gen):
# This function is called from 'sys.set_coroutine_wrapper'.
# We only wrap here coroutines defined via 'async def' syntax.
# Generator-based coroutines are wrapped in @coroutine
# decorator.
return CoroWrapper(gen, None)
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func=None):
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
self.gen = gen
self.func = func # Used to unwrap @coroutine decorator
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
return '<%s %s>' % (self.__class__.__name__, coro_repr)
def __iter__(self):
return self
def __next__(self):
return self.gen.send(None)
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value):
return self.gen.send(value)
def throw(self, type, value=None, traceback=None):
return self.gen.throw(type, value, traceback)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.gi_code
if compat.PY35:
def __await__(self):
cr_await = getattr(self.gen, 'cr_await', None)
if cr_await is not None:
raise RuntimeError(
"Cannot await on coroutine {!r} while it's "
"awaiting for {!r}".format(self.gen, cr_await))
return self
@property
def gi_yieldfrom(self):
return self.gen.gi_yieldfrom
@property
def cr_await(self):
return self.gen.cr_await
@property
def cr_running(self):
return self.gen.cr_running
@property
def cr_code(self):
return self.gen.cr_code
@property
def cr_frame(self):
return self.gen.cr_frame
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is None:
frame = getattr(gen, 'cr_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = '%r was never yielded from' % self
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += ('\nCoroutine object created at '
'(most recent call last):\n')
msg += tb.rstrip()
logger.error(msg)
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if _inspect_iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defiend with "async def".
# Wrapping in CoroWrapper will happen via
# 'sys.set_coroutine_wrapper' function.
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)):
res = yield from res
elif _AwaitableABC is not None:
# If 'func' returns an Awaitable (new in 3.5) we
# want to run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, _AwaitableABC):
res = yield from await_meth()
return res
if not _DEBUG:
if _types_coroutine is None:
wrapper = coro
else:
wrapper = _types_coroutine(coro)
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
return wrapper
# A marker for iscoroutinefunction.
_is_coroutine = object()
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return (getattr(func, '_is_coroutine', None) is _is_coroutine or
_inspect_iscoroutinefunction(func))
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if _CoroutineABC is not None:
_COROUTINE_TYPES += (_CoroutineABC,)
if _types_CoroutineType is not None:
# Prioritize native coroutine check to speed-up
# asyncio.iscoroutine.
_COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isinstance(obj, _COROUTINE_TYPES)
def _format_coroutine(coro):
assert iscoroutine(coro)
if not hasattr(coro, 'cr_code') and not hasattr(coro, 'gi_code'):
# Most likely a built-in type or a Cython coroutine.
# Built-in types might not have __qualname__ or __name__.
coro_name = getattr(
coro, '__qualname__',
getattr(coro, '__name__', type(coro).__name__))
coro_name = '{}()'.format(coro_name)
running = False
try:
running = coro.cr_running
except AttributeError:
try:
running = coro.gi_running
except AttributeError:
pass
if running:
return '{} running'.format(coro_name)
else:
return coro_name
coro_name = None
if isinstance(coro, CoroWrapper):
func = coro.func
coro_name = coro.__qualname__
if coro_name is not None:
coro_name = '{}()'.format(coro_name)
else:
func = coro
if coro_name is None:
coro_name = events._format_callback(func, (), {})
try:
coro_code = coro.gi_code
except AttributeError:
coro_code = coro.cr_code
try:
coro_frame = coro.gi_frame
except AttributeError:
coro_frame = coro.cr_frame
filename = coro_code.co_filename
lineno = 0
if (isinstance(coro, CoroWrapper) and
not inspect.isgeneratorfunction(coro.func) and
coro.func is not None):
source = events._get_function_source(coro.func)
if source is not None:
filename, lineno = source
if coro_frame is None:
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
else:
coro_repr = ('%s running, defined at %s:%s'
% (coro_name, filename, lineno))
elif coro_frame is not None:
lineno = coro_frame.f_lineno
coro_repr = ('%s running at %s:%s'
% (coro_name, filename, lineno))
else:
lineno = coro_code.co_firstlineno
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
return coro_repr
| {
"repo_name": "lunixbochs/actualvim",
"path": "lib/asyncio/coroutines.py",
"copies": "1",
"size": "10741",
"license": "mit",
"hash": 1032645758567754900,
"line_mean": 30.2238372093,
"line_max": 79,
"alpha_frac": 0.5806721907,
"autogenerated": false,
"ratio": 3.881821467293097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9959534275224644,
"avg_score": 0.0005918765536907417,
"num_lines": 344
} |
__all__ = ['coroutine',
'iscoroutinefunction', 'iscoroutine']
import functools
import inspect
import opcode
import os
import sys
import traceback
import types
from . import compat
from . import events
from . import base_futures
from .log import logger
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap['YIELD_FROM']
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "yield from" with a
# coroutine call. Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
_DEBUG = (not sys.flags.ignore_environment and
bool(os.environ.get('PYTHONASYNCIODEBUG')))
try:
_types_coroutine = types.coroutine
_types_CoroutineType = types.CoroutineType
except AttributeError:
# Python 3.4
_types_coroutine = None
_types_CoroutineType = None
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# Python 3.4
_inspect_iscoroutinefunction = lambda func: False
try:
from collections.abc import Coroutine as _CoroutineABC, \
Awaitable as _AwaitableABC
except ImportError:
_CoroutineABC = _AwaitableABC = None
# Check for CPython issue #21209
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
def debug_wrapper(gen):
# This function is called from 'sys.set_coroutine_wrapper'.
# We only wrap here coroutines defined via 'async def' syntax.
# Generator-based coroutines are wrapped in @coroutine
# decorator.
return CoroWrapper(gen, None)
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func=None):
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
self.gen = gen
self.func = func # Used to unwrap @coroutine decorator
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
return '<%s %s>' % (self.__class__.__name__, coro_repr)
def __iter__(self):
return self
def __next__(self):
return self.gen.send(None)
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value):
return self.gen.send(value)
def throw(self, type, value=None, traceback=None):
return self.gen.throw(type, value, traceback)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.gi_code
if compat.PY35:
def __await__(self):
cr_await = getattr(self.gen, 'cr_await', None)
if cr_await is not None:
raise RuntimeError(
"Cannot await on coroutine {!r} while it's "
"awaiting for {!r}".format(self.gen, cr_await))
return self
@property
def gi_yieldfrom(self):
return self.gen.gi_yieldfrom
@property
def cr_await(self):
return self.gen.cr_await
@property
def cr_running(self):
return self.gen.cr_running
@property
def cr_code(self):
return self.gen.cr_code
@property
def cr_frame(self):
return self.gen.cr_frame
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is None:
frame = getattr(gen, 'cr_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = '%r was never yielded from' % self
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += ('\nCoroutine object created at '
'(most recent call last):\n')
msg += tb.rstrip()
logger.error(msg)
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if _inspect_iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defiend with "async def".
# Wrapping in CoroWrapper will happen via
# 'sys.set_coroutine_wrapper' function.
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)):
res = yield from res
elif _AwaitableABC is not None:
# If 'func' returns an Awaitable (new in 3.5) we
# want to run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, _AwaitableABC):
res = yield from await_meth()
return res
if not _DEBUG:
if _types_coroutine is None:
wrapper = coro
else:
wrapper = _types_coroutine(coro)
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
return wrapper
# A marker for iscoroutinefunction.
_is_coroutine = object()
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return (getattr(func, '_is_coroutine', None) is _is_coroutine or
_inspect_iscoroutinefunction(func))
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if _CoroutineABC is not None:
_COROUTINE_TYPES += (_CoroutineABC,)
if _types_CoroutineType is not None:
# Prioritize native coroutine check to speed-up
# asyncio.iscoroutine.
_COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isinstance(obj, _COROUTINE_TYPES)
def _format_coroutine(coro):
assert iscoroutine(coro)
if not hasattr(coro, 'cr_code') and not hasattr(coro, 'gi_code'):
# Most likely a built-in type or a Cython coroutine.
# Built-in types might not have __qualname__ or __name__.
coro_name = getattr(
coro, '__qualname__',
getattr(coro, '__name__', type(coro).__name__))
coro_name = '{}()'.format(coro_name)
running = False
try:
running = coro.cr_running
except AttributeError:
try:
running = coro.gi_running
except AttributeError:
pass
if running:
return '{} running'.format(coro_name)
else:
return coro_name
coro_name = None
if isinstance(coro, CoroWrapper):
func = coro.func
coro_name = coro.__qualname__
if coro_name is not None:
coro_name = '{}()'.format(coro_name)
else:
func = coro
if coro_name is None:
coro_name = events._format_callback(func, (), {})
try:
coro_code = coro.gi_code
except AttributeError:
coro_code = coro.cr_code
try:
coro_frame = coro.gi_frame
except AttributeError:
coro_frame = coro.cr_frame
filename = coro_code.co_filename
lineno = 0
if (isinstance(coro, CoroWrapper) and
not inspect.isgeneratorfunction(coro.func) and
coro.func is not None):
source = events._get_function_source(coro.func)
if source is not None:
filename, lineno = source
if coro_frame is None:
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
else:
coro_repr = ('%s running, defined at %s:%s'
% (coro_name, filename, lineno))
elif coro_frame is not None:
lineno = coro_frame.f_lineno
coro_repr = ('%s running at %s:%s'
% (coro_name, filename, lineno))
else:
lineno = coro_code.co_firstlineno
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
return coro_repr
| {
"repo_name": "yotchang4s/cafebabepy",
"path": "src/main/python/asyncio/coroutines.py",
"copies": "6",
"size": "10727",
"license": "bsd-3-clause",
"hash": 2107089602893681700,
"line_mean": 30.1831395349,
"line_max": 79,
"alpha_frac": 0.5801249184,
"autogenerated": false,
"ratio": 3.883779869659667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005918765536907417,
"num_lines": 344
} |
__all__ = ['coroutine',
'iscoroutinefunction', 'iscoroutine']
import functools
import inspect
import opcode
import os
import sys
import traceback
import types
from . import events
from . import futures
from .log import logger
_PY35 = sys.version_info >= (3, 5)
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap['YIELD_FROM']
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "yield from" with a
# coroutine call. Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
_DEBUG = (not sys.flags.ignore_environment
and bool(os.environ.get('PYTHONASYNCIODEBUG')))
try:
types.coroutine
except AttributeError:
native_coroutine_support = False
else:
native_coroutine_support = True
try:
_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
_iscoroutinefunction = lambda func: False
try:
inspect.CO_COROUTINE
except AttributeError:
_is_native_coro_code = lambda code: False
else:
_is_native_coro_code = lambda code: (code.co_flags &
inspect.CO_COROUTINE)
try:
from collections.abc import Coroutine as CoroutineABC, \
Awaitable as AwaitableABC
except ImportError:
CoroutineABC = AwaitableABC = None
# Check for CPython issue #21209
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
def debug_wrapper(gen):
# This function is called from 'sys.set_coroutine_wrapper'.
# We only wrap here coroutines defined via 'async def' syntax.
# Generator-based coroutines are wrapped in @coroutine
# decorator.
if _is_native_coro_code(gen.gi_code):
return CoroWrapper(gen, None)
else:
return gen
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func=None):
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
self.gen = gen
self.func = func # Used to unwrap @coroutine decorator
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
return '<%s %s>' % (self.__class__.__name__, coro_repr)
def __iter__(self):
return self
if _PY35:
__await__ = __iter__ # make compatible with 'await' expression
def __next__(self):
return self.gen.send(None)
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value):
return self.gen.send(value)
def throw(self, exc):
return self.gen.throw(exc)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.gi_code
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = '%r was never yielded from' % self
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += ('\nCoroutine object created at '
'(most recent call last):\n')
msg += tb.rstrip()
logger.error(msg)
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
is_coroutine = _iscoroutinefunction(func)
if is_coroutine and _is_native_coro_code(func.__code__):
# In Python 3.5 that's all we need to do for coroutines
# defiend with "async def".
# Wrapping in CoroWrapper will happen via
# 'sys.set_coroutine_wrapper' function.
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if isinstance(res, futures.Future) or inspect.isgenerator(res):
res = yield from res
elif AwaitableABC is not None:
# If 'func' returns an Awaitable (new in 3.5) we
# want to run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, AwaitableABC):
res = yield from await_meth()
return res
if not _DEBUG:
if native_coroutine_support:
wrapper = types.coroutine(coro)
else:
wrapper = coro
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = True # For iscoroutinefunction().
return wrapper
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return (getattr(func, '_is_coroutine', False) or
_iscoroutinefunction(func))
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if CoroutineABC is not None:
_COROUTINE_TYPES += (CoroutineABC,)
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isinstance(obj, _COROUTINE_TYPES)
def _format_coroutine(coro):
assert iscoroutine(coro)
if isinstance(coro, CoroWrapper):
func = coro.func
else:
func = coro
coro_name = events._format_callback(func, ())
filename = coro.gi_code.co_filename
if (isinstance(coro, CoroWrapper)
and not inspect.isgeneratorfunction(coro.func)):
filename, lineno = events._get_function_source(coro.func)
if coro.gi_frame is None:
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
else:
coro_repr = ('%s running, defined at %s:%s'
% (coro_name, filename, lineno))
elif coro.gi_frame is not None:
lineno = coro.gi_frame.f_lineno
coro_repr = ('%s running at %s:%s'
% (coro_name, filename, lineno))
else:
lineno = coro.gi_code.co_firstlineno
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
return coro_repr
| {
"repo_name": "munyirik/python",
"path": "cpython/Lib/asyncio/coroutines.py",
"copies": "3",
"size": "8669",
"license": "bsd-3-clause",
"hash": 4181148479544993000,
"line_mean": 30.5236363636,
"line_max": 79,
"alpha_frac": 0.5910716346,
"autogenerated": false,
"ratio": 3.8494671403197156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5940538774919716,
"avg_score": null,
"num_lines": null
} |
__all__ = ['correlate']
import os
import numpy
import math
import pycuda.driver as cuda
import pycuda.autoinit
import pycuda.gpuarray
from pycuda.compiler import SourceModule
from .utils import gpu_func
from .enums import CorrelationModes, MAX_BLOCK_SIZE
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
CACHE_DIR = os.path.join(CUR_DIR, 'cache')
mod = SourceModule(open(os.path.join(CUR_DIR, 'kernel/correlate.cu')).read(), cache_dir=CACHE_DIR)
correlate_kernel = mod.get_function('correlate_kernel')
d_ax_size = mod.get_global('d_ax_size')[0]
d_ay_size = mod.get_global('d_ay_size')[0]
d_aout_size = mod.get_global('d_aout_size')[0]
d_padding = mod.get_global('d_padding')[0]
@gpu_func
def correlate(d_a1, d_a2, mode=CorrelationModes.FULL):
x1, y1, z1 = d_a1.shape
x2, y2, z2 = d_a2.shape
if mode == CorrelationModes.FULL:
# In FULL mode, cycle through minimum overlap, including those where
# the array is out of bounds. Out of bound values are treated as 0s
outx, outy, outz = x1+x2-1, y1+y2-1, z1+z2-1
xpad = x2 - 1
ypad = y2 - 1
zpad = z2 - 1
else:
outx, outy, outz = x1-x2+1, y1-y2+1, z1-z2+1
xpad = 0
ypad = 0
zpad = 0
cuda.memcpy_htod(d_ax_size, numpy.array(d_a1.shape, dtype=numpy.int32))
cuda.memcpy_htod(d_ay_size, numpy.array(d_a2.shape, dtype=numpy.int32))
d_aout = pycuda.gpuarray.zeros((outx, outy, outz), numpy.float32)
cuda.memcpy_htod(d_aout_size, numpy.array(d_aout.shape, dtype=numpy.int32))
cuda.memcpy_htod(d_padding, numpy.array((xpad, ypad, zpad), dtype=numpy.int32))
thread_size = min(outx*outy*outz, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(outx*outy*outz / float(thread_size))), 1)
correlate_kernel(d_a1, d_a2, d_aout,
block=(thread_size,1,1), grid=(block_size,1,1))
return d_aout
| {
"repo_name": "Captricity/sciguppy",
"path": "sciguppy/correlate.py",
"copies": "1",
"size": "1871",
"license": "mit",
"hash": 2100908676390892800,
"line_mean": 36.42,
"line_max": 98,
"alpha_frac": 0.6600748263,
"autogenerated": false,
"ratio": 2.6204481792717087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3780523005571709,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from itertools import imap as _imap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
return dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, _ = self.__map.pop(key)
link_prev[1] = link_next # update link_prev[NEXT]
link_next[0] = link_prev # update link_next[PREV]
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[1] # start at the first node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[1] # move to next node
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root[0] # start at the last node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[0] # move to previous node
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(_imap(_eq, self, other))
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
################################################################################
### namedtuple
################################################################################
_class_template = '''\
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return '{typename}({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
__dict__ = _property(_asdict)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
pass
{field_defs}
'''
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = map(str, field_names)
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c=='_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if type(name) != str:
raise TypeError('Type names and field names must be strings')
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
if verbose:
print class_definition
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
| {
"repo_name": "nzavagli/UnrealPy",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/collections.py",
"copies": "23",
"size": "27261",
"license": "mit",
"hash": -2611871443919341600,
"line_mean": 36.3438356164,
"line_max": 99,
"alpha_frac": 0.5383514911,
"autogenerated": false,
"ratio": 4.2232378001549185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001737038949694134,
"num_lines": 730
} |
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from itertools import imap as _imap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
return dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, _ = self.__map.pop(key)
link_prev[1] = link_next # update link_prev[NEXT]
link_next[0] = link_prev # update link_next[PREV]
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[1] # start at the first node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[1] # move to next node
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root[0] # start at the last node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[0] # move to previous node
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(_imap(_eq, self, other))
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
################################################################################
### namedtuple
################################################################################
_class_template = '''\
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return '{typename}({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
{field_defs}
'''
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = map(str, field_names)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c=='_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
if verbose:
print class_definition
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super(Counter, self).__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
| {
"repo_name": "kleientertainment/ds_mod_tools",
"path": "pkg/win32/Python27/Lib/collections.py",
"copies": "2",
"size": "26576",
"license": "mit",
"hash": -4664490877256699000,
"line_mean": 36.3492063492,
"line_max": 99,
"alpha_frac": 0.5263019266,
"autogenerated": false,
"ratio": 4.299627891926873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5825929818526873,
"avg_score": null,
"num_lines": null
} |
__all__ = ['CounterHashingVectorizer', 'ListHashingVectorizer', 'ListCountVectorizer', 'ListNGramAnalyzer', 'ListTfidfVectorizer', 'SpaceTokenizerTransformer', '_list_analyzer']
import logging
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from .base import PureTransformer
logger = logging.getLogger(__name__)
def _list_analyzer(L):
for elem in L:
yield elem
#end def
class ListNGramAnalyzer(PureTransformer):
def __init__(self, ngram_range=(1, 1), ngram_delimiter=' ', **kwargs):
kwargs.setdefault('nparray', False)
super(ListNGramAnalyzer, self).__init__(**kwargs)
if isinstance(ngram_range, tuple) and len(ngram_range) == 2: ngrams = list(range(ngram_range[0], ngram_range[1] + 1))
elif isinstance(ngram_range, str): ngrams = [int(ngram_range)]
elif isinstance(ngram_range, int): ngrams = [ngram_range]
else: ngrams = ngram_range
ngrams.sort()
self.ngrams = ngrams
self.ngram_delimiter = ngram_delimiter
#end def
def __call__(self, L):
L = list(L)
length = len(L)
ngram_delimiter = self.ngram_delimiter
for i in range(length):
for n in self.ngrams:
if i + n > length: break
yield ngram_delimiter.join(L[i:i + n])
#end for
#end for
#end def
def transform_one(self, tokens, **kwargs):
yield from self.__call__(tokens)
#end def
def _counter_analyzer(C):
for k, v in C.items():
for _ in range(v):
yield k
#end def
class ListCountVectorizer(CountVectorizer):
def __init__(self, **kwargs):
kwargs.setdefault('analyzer', ListNGramAnalyzer(**kwargs))
super(ListCountVectorizer, self).__init__(**kwargs)
#end def
def fit(self, *args, **kwargs):
ret = super(ListCountVectorizer, self).fit(*args)
logger.debug('There are {} vocabulary items in <{}>.'.format(len(self.vocabulary_), self))
return ret
#end def
def fit_transform(self, *args, **kwargs):
transformed = super(ListCountVectorizer, self).fit_transform(*args)
logger.debug('There {} vocabulary items in <{}>.'.format(len(self.vocabulary_), self))
return transformed
#end def
#end class
class ListTfidfVectorizer(TfidfVectorizer):
def __init__(self, **kwargs):
kwargs.setdefault('analyzer', ListNGramAnalyzer(**kwargs))
super(ListTfidfVectorizer, self).__init__(**kwargs)
#end def
def fit(self, *args, **kwargs):
ret = super(ListTfidfVectorizer, self).fit(*args)
logger.debug('There are {} vocabulary items in <{}>.'.format(len(self.vocabulary_), self))
return ret
#end def
def fit_transform(self, X, *args, **kwargs):
transformed = super(ListTfidfVectorizer, self).fit_transform(X)
logger.debug('There {} vocabulary items in <{}>.'.format(len(self.vocabulary_), self))
return transformed
#end def
#end class
class ListHashingVectorizer(HashingVectorizer):
def __init__(self, **kwargs):
kwargs.setdefault('analyzer', ListNGramAnalyzer(**kwargs))
super(ListHashingVectorizer, self).__init__(**kwargs)
#end def
#end class
class CounterHashingVectorizer(HashingVectorizer):
def __init__(self, **kwargs):
kwargs.setdefault('analyzer', _counter_analyzer)
super(CounterHashingVectorizer, self).__init__(**kwargs)
#end def
#end class
class SpaceTokenizerTransformer(PureTransformer):
def transform_one(self, text, **kwargs):
return text.split()
#end class
| {
"repo_name": "skylander86/ycml",
"path": "ycml/transformers/text.py",
"copies": "1",
"size": "3736",
"license": "apache-2.0",
"hash": -4803786683296372000,
"line_mean": 29.1290322581,
"line_max": 177,
"alpha_frac": 0.6445396146,
"autogenerated": false,
"ratio": 3.8083588175331293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49528984321331293,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Counter', 'OrderedDict']
# The code below is taken from the following ActiveState recipe:
#
# http://code.activestate.com/recipes/576611-counter-class/
#
# and was licensed under the MIT license.
try:
from collections import Counter
except ImportError:
from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in ifilter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
# The code below is licensed as:
#
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
try:
from collections import OrderedDict
except ImportError:
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| {
"repo_name": "JudoWill/glue",
"path": "glue/compat/collections.py",
"copies": "1",
"size": "11619",
"license": "bsd-3-clause",
"hash": 8955226114177224000,
"line_mean": 33.6835820896,
"line_max": 89,
"alpha_frac": 0.5083053619,
"autogenerated": false,
"ratio": 4.576211106734935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5584516468634936,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.