text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import egads
# import thermodynamic module and rename to simplify usage
import egads.algorithms.thermodynamics as thermo
# get list of all NetCDF files in 'data' directory
filenames = egads.input.get_file_list('data/*.nc')
f = egads.input.EgadsNetCdf() # create EgadsNetCdf instance
for name in filenames: # loop through files
f.open(name, 'a') # open NetCdf file with append permissions
T_s = f.read_variable('T_t') # read in static temperature
P_s = f.read_variable('P_s') # read in static pressure from file
rho = thermo.DensityDryAirCnrm().run(P_s, T_s) # calculate density
f.write_variable(rho, 'rho', ('Time',)) # output variable
f.close() # close file
|
{
"content_hash": "0cfcaa074a603b6a1aba310d82769b5f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 41.888888888888886,
"alnum_prop": 0.649867374005305,
"repo_name": "eufarn7sp/egads-eufar",
"id": "8b1555be93f973526ed169d807f6c8b0ee7c2a33",
"size": "800",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doc/source/example_files/example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "309547"
}
],
"symlink_target": ""
}
|
import json
def read_json_data(file_path):
with open(file_path) as f:
data = json.load(f)
return data
|
{
"content_hash": "3346230518b42b13e550f2b10fe6f412",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 30,
"avg_line_length": 17.142857142857142,
"alnum_prop": 0.625,
"repo_name": "edx/edx-app-android",
"id": "f69a7d7450f7184268ff9fc3e38666b1e3c7bec6",
"size": "120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transifex_utils/core/json_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1273"
},
{
"name": "HTML",
"bytes": "585518"
},
{
"name": "Java",
"bytes": "2665045"
},
{
"name": "JavaScript",
"bytes": "1319"
},
{
"name": "Kotlin",
"bytes": "222561"
},
{
"name": "Makefile",
"bytes": "2673"
},
{
"name": "Python",
"bytes": "35951"
},
{
"name": "Shell",
"bytes": "2137"
}
],
"symlink_target": ""
}
|
from __future__ import division
import vistrails.core.db.action
from vistrails.core.db.locator import XMLFileLocator
from vistrails.core.db.io import serialize, unserialize
from vistrails.core import debug
from vistrails.core.interpreter.default import get_default_interpreter
from vistrails.core.log.group_exec import GroupExec
from vistrails.core.log.machine import Machine
from vistrails.core.log.module_exec import ModuleExec
from vistrails.core.modules.basic_modules import Constant
import vistrails.core.modules.module_registry
import vistrails.core.modules.utils
from vistrails.core.modules.vistrails_module import Module, ModuleError, \
InvalidOutput
from vistrails.core.vistrail.annotation import Annotation
from vistrails.core.vistrail.controller import VistrailController
from vistrails.core.vistrail.group import Group
from vistrails.core.vistrail.module_function import ModuleFunction
from vistrails.core.vistrail.module_param import ModuleParam
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.db.domain import IdScope
import vistrails.db.versions
import copy
import inspect
from itertools import izip
import os
import re
import sys
import tempfile
from IPython.parallel.error import CompositeError
from .api import get_client
try:
import hashlib
sha1_hash = hashlib.sha1
except ImportError:
import sha
sha1_hash = sha.new
###############################################################################
# This function is sent to the engines which execute it
#
# It receives the workflow, and the list of targeted output ports
#
# It returns the corresponding computed outputs and the execution log
#
def execute_wf(wf, output_port):
# Save the workflow in a temporary file
temp_wf_fd, temp_wf = tempfile.mkstemp()
try:
f = open(temp_wf, 'w')
f.write(wf)
f.close()
os.close(temp_wf_fd)
# Clean the cache
interpreter = get_default_interpreter()
interpreter.flush()
# Load the Pipeline from the temporary file
vistrail = Vistrail()
locator = XMLFileLocator(temp_wf)
workflow = locator.load(Pipeline)
# Build a Vistrail from this single Pipeline
action_list = []
for module in workflow.module_list:
action_list.append(('add', module))
for connection in workflow.connection_list:
action_list.append(('add', connection))
action = vistrails.core.db.action.create_action(action_list)
vistrail.add_action(action, 0L)
vistrail.update_id_scope()
tag = 'parallel flow'
vistrail.addTag(tag, action.id)
# Build a controller and execute
controller = VistrailController()
controller.set_vistrail(vistrail, None)
controller.change_selected_version(vistrail.get_version_number(tag))
execution = controller.execute_current_workflow(
custom_aliases=None,
custom_params=None,
extra_info=None,
reason='API Pipeline Execution')
# Build a list of errors
errors = []
pipeline = vistrail.getPipeline(tag)
execution_errors = execution[0][0].errors
if execution_errors:
for key in execution_errors:
module = pipeline.modules[key]
msg = '%s: %s' %(module.name, execution_errors[key])
errors.append(msg)
# Get the execution log from the controller
try:
module_log = controller.log.workflow_execs[0].item_execs[0]
except IndexError:
errors.append("Module log not found")
return dict(errors=errors)
else:
machine = controller.log.workflow_execs[0].machines[
module_log.machine_id]
xml_log = serialize(module_log)
machine_log = serialize(machine)
# Get the output value
output = None
if not execution_errors:
executed_module, = execution[0][0].executed
executed_module = execution[0][0].objects[executed_module]
try:
output = executed_module.get_output(output_port)
except ModuleError:
errors.append("Output port not found: %s" % output_port)
return dict(errors=errors)
if isinstance(output, Module):
raise TypeError("Output value is a Module instance")
# Return the dictionary, that will be sent back to the client
return dict(errors=errors,
output=output,
xml_log=xml_log,
machine_log=machine_log)
finally:
os.unlink(temp_wf)
###############################################################################
_ansi_code = re.compile(r'%s(?:(?:\[[^A-Za-z]*[A-Za-z])|[^\[])' % '\x1B')
def strip_ansi_codes(s):
return _ansi_code.sub('', s)
###############################################################################
# Map Operator
#
class Map(Module):
"""The Map Module executes a map operator in parallel on IPython engines.
The FunctionPort should be connected to the 'self' output of the module you
want to execute.
The InputList is the list of values to be scattered on the engines.
"""
def __init__(self):
Module.__init__(self)
def update_upstream(self):
"""A modified version of the update_upstream method."""
# everything is the same except that we don't update anything
# upstream of FunctionPort
for port_name, connector_list in self.inputPorts.iteritems():
if port_name == 'FunctionPort':
for connector in connector_list:
connector.obj.update_upstream()
else:
for connector in connector_list:
connector.obj.update()
for port_name, connectorList in copy.copy(self.inputPorts.items()):
if port_name != 'FunctionPort':
for connector in connectorList:
if connector.obj.get_output(connector.port) is \
InvalidOutput:
self.remove_input_connector(port_name, connector)
@staticmethod
def print_compositeerror(e):
sys.stderr.write("Got %d exceptions from IPython engines:\n" %
len(e.elist))
for e_type, e_msg, formatted_tb, infos in e.elist:
sys.stderr.write("Error from engine %d (%r):\n" % (
infos['engine_id'], infos['engine_uuid']))
sys.stderr.write("%s\n" % strip_ansi_codes(formatted_tb))
@staticmethod
def list_exceptions(e):
return '\n'.join(
"% 3d: %s: %s" % (infos['engine_id'],
e_type,
e_msg)
for e_type, e_msg, tb, infos in e.elist)
def updateFunctionPort(self):
"""
Function to be used inside the updateUsptream method of the Map module. It
updates the module connected to the FunctionPort port, executing it in
parallel.
"""
nameInput = self.get_input('InputPort')
nameOutput = self.get_input('OutputPort')
rawInputList = self.get_input('InputList')
# Create inputList to always have iterable elements
# to simplify code
if len(nameInput) == 1:
element_is_iter = False
inputList = [[element] for element in rawInputList]
else:
element_is_iter = True
inputList = rawInputList
workflows = []
module = None
vtType = None
# iterating through the connectors
for connector in self.inputPorts.get('FunctionPort'):
module = connector.obj
# pipeline
original_pipeline = connector.obj.moduleInfo['pipeline']
# module
module_id = connector.obj.moduleInfo['moduleId']
vtType = original_pipeline.modules[module_id].vtType
# serialize the module for each value in the list
for i, element in enumerate(inputList):
if element_is_iter:
self.element = element
else:
self.element = element[0]
# checking type and setting input in the module
self.typeChecking(connector.obj, nameInput, inputList)
self.setInputValues(connector.obj, nameInput, element, i)
pipeline_db_module = original_pipeline.modules[module_id].do_copy()
# transforming a subworkflow in a group
# TODO: should we also transform inner subworkflows?
if pipeline_db_module.is_abstraction():
group = Group(id=pipeline_db_module.id,
cache=pipeline_db_module.cache,
location=pipeline_db_module.location,
functions=pipeline_db_module.functions,
annotations=pipeline_db_module.annotations)
source_port_specs = pipeline_db_module.sourcePorts()
dest_port_specs = pipeline_db_module.destinationPorts()
for source_port_spec in source_port_specs:
group.add_port_spec(source_port_spec)
for dest_port_spec in dest_port_specs:
group.add_port_spec(dest_port_spec)
group.pipeline = pipeline_db_module.pipeline
pipeline_db_module = group
# getting highest id between functions to guarantee unique ids
# TODO: can get current IdScope here?
if pipeline_db_module.functions:
high_id = max(function.db_id
for function in pipeline_db_module.functions)
else:
high_id = 0
# adding function and parameter to module in pipeline
# TODO: 'pos' should not be always 0 here
id_scope = IdScope(beginId=long(high_id+1))
for elementValue, inputPort in izip(element, nameInput):
p_spec = pipeline_db_module.get_port_spec(inputPort, 'input')
descrs = p_spec.descriptors()
if len(descrs) != 1:
raise ModuleError(
self,
"Tuple input ports are not supported")
if not issubclass(descrs[0].module, Constant):
raise ModuleError(
self,
"Module inputs should be Constant types")
type = p_spec.sigstring[1:-1]
mod_function = ModuleFunction(id=id_scope.getNewId(ModuleFunction.vtType),
pos=0,
name=inputPort)
mod_param = ModuleParam(id=0L,
pos=0,
type=type,
val=elementValue)
mod_function.add_parameter(mod_param)
pipeline_db_module.add_function(mod_function)
# serializing module
wf = self.serialize_module(pipeline_db_module)
workflows.append(wf)
# getting first connector, ignoring the rest
break
# IPython stuff
try:
rc = get_client()
except Exception, error:
raise ModuleError(self, "Exception while loading IPython: %s" %
debug.format_exception(error))
if rc is None:
raise ModuleError(self, "Couldn't get an IPython connection")
engines = rc.ids
if not engines:
raise ModuleError(
self,
"Exception while loading IPython: No IPython engines "
"detected!")
# initializes each engine
# importing modules and initializing the VisTrails application
# in the engines *only* in the first execution on this engine
uninitialized = []
for eng in engines:
try:
rc[eng]['init']
except Exception:
uninitialized.append(eng)
if uninitialized:
init_view = rc[uninitialized]
with init_view.sync_imports():
import tempfile
import inspect
# VisTrails API
import vistrails
import vistrails.core
import vistrails.core.db.action
import vistrails.core.application
import vistrails.core.modules.module_registry
from vistrails.core.db.io import serialize
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.db.locator import XMLFileLocator
from vistrails.core.vistrail.controller import VistrailController
from vistrails.core.interpreter.default import get_default_interpreter
# initializing a VisTrails application
try:
init_view.execute(
'app = vistrails.core.application.init('
' {"spawned": True},'
' args=[])',
block=True)
except CompositeError, e:
self.print_compositeerror(e)
raise ModuleError(self, "Error initializing application on "
"IPython engines:\n"
"%s" % self.list_exceptions(e))
init_view['init'] = True
# setting computing color
module.logging.set_computing(module)
# executing function in engines
# each map returns a dictionary
try:
ldview = rc.load_balanced_view()
map_result = ldview.map_sync(execute_wf, workflows, [nameOutput]*len(workflows))
except CompositeError, e:
self.print_compositeerror(e)
raise ModuleError(self, "Error from IPython engines:\n"
"%s" % self.list_exceptions(e))
# verifying errors
errors = []
for engine in range(len(map_result)):
if map_result[engine]['errors']:
msg = "ModuleError in engine %d: '%s'" % (
engine,
', '.join(map_result[engine]['errors']))
errors.append(msg)
if errors:
raise ModuleError(self, '\n'.join(errors))
# setting success color
module.logging.signalSuccess(module)
reg = vistrails.core.modules.module_registry.get_module_registry()
self.result = []
for map_execution in map_result:
output = map_execution['output']
self.result.append(output)
# including execution logs
for engine in range(len(map_result)):
log = map_result[engine]['xml_log']
exec_ = None
if (vtType == 'abstraction') or (vtType == 'group'):
exec_ = unserialize(log, GroupExec)
elif (vtType == 'module'):
exec_ = unserialize(log, ModuleExec)
else:
# something is wrong...
continue
# assigning new ids to existing annotations
exec_annotations = exec_.annotations
for i in range(len(exec_annotations)):
exec_annotations[i].id = self.logging.log.log.id_scope.getNewId(Annotation.vtType)
parallel_annotation = Annotation(key='parallel_execution', value=True)
parallel_annotation.id = self.logging.log.log.id_scope.getNewId(Annotation.vtType)
annotations = [parallel_annotation] + exec_annotations
exec_.annotations = annotations
# before adding the execution log, we need to get the machine information
machine = unserialize(map_result[engine]['machine_log'], Machine)
machine_id = self.logging.add_machine(machine)
# recursively add machine information to execution items
def add_machine_recursive(exec_):
for item in exec_.item_execs:
if hasattr(item, 'machine_id'):
item.machine_id = machine_id
if item.vtType in ('abstraction', 'group'):
add_machine_recursive(item)
exec_.machine_id = machine_id
if (vtType == 'abstraction') or (vtType == 'group'):
add_machine_recursive(exec_)
self.logging.add_exec(exec_)
def serialize_module(self, module):
"""
Serializes a module to be executed in parallel.
"""
def process_group(group):
group.pipeline.id = None
for module in group.pipeline.module_list:
if module.is_group():
process_group(module)
pipeline = Pipeline(version=vistrails.db.versions.currentVersion)
if module.is_group():
process_group(module)
module = module.do_copy()
pipeline.add_module(module)
return serialize(pipeline)
def compute(self):
"""The compute method for Map."""
self.result = None
self.updateFunctionPort()
self.set_output('Result', self.result)
###############################################################################
class NewConstant(Constant):
"""
A new Constant module to be used inside the Map module.
"""
def setValue(self, v):
self.set_output("value", v)
self.upToDate = True
def create_constant(value):
"""
Creates a NewConstant module, to be used for the ModuleConnector.
"""
constant = NewConstant()
constant.setValue(value)
return constant
def get_module(value, signature):
"""
Creates a module for value, in order to do the type checking.
"""
from vistrails.core.modules.basic_modules import Boolean, String, Integer, Float, List
if isinstance(value, Constant):
return type(value)
elif isinstance(value, bool):
return Boolean
elif isinstance(value, str):
return String
elif isinstance(value, int):
return Integer
elif isinstance(value, float):
return Float
elif isinstance(value, list):
return List
elif isinstance(value, tuple):
v_modules = ()
for element in xrange(len(value)):
v_modules += (get_module(value[element], signature[element]))
return v_modules
else:
from vistrails.core import debug
debug.warning("Could not identify the type of the list element.")
debug.warning("Type checking is not going to be done inside Map module.")
return None
|
{
"content_hash": "030df4801b6a1b69bbf385f4d1a78bfe",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 98,
"avg_line_length": 37.90998043052838,
"alnum_prop": 0.561377245508982,
"repo_name": "hjanime/VisTrails",
"id": "ee2c905a791ebedbd98e09e0c04ddcb30a5f4b32",
"size": "21286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/packages/parallelflow/map.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
from collections import deque
class Node():
def __init__(self,label=None,data=None):
self.label = label
self.data = data
self.children = dict()
self.iscomplete = True
def addChild(self,key,data=None):
if not isinstance(key,Node):
self.children[key] = Node(key, data)
else:
self.children[key.label] = key
def __getitem__(self, key):
return self.children[key]
class Trie():
def __init__(self):
self.head = Node()
def __getitem__(self, key):
return self.head.children[key]
def add(self,word):
current_node = self.head
word_finished = True
i = 0
for i in range(len(word)):
if word[i] in current_node.children:
current_node = current_node.children[word[i]]
else:
word_finished = False
break
if not word_finished:
while i < len(word):
current_node.addChild(word[i])
current_node = current_node.children[word[i]]
i += 1
current_node.data = word
current_node.iscomplete = True
def has_word(self, word):
if word == '':
return False
if word is None:
raise ValueError('Trie.has_word requires a not-Null string')
# Start at the top
current_node = self.head
exists = True
for letter in word:
if letter in current_node.children.keys():
current_node = current_node.children[letter]
else:
exists = False
break
if exists:
if current_node.data is None:
exists = False
return exists,current_node.data
def start_with_prefix(self, prefix):
""" Returns a list of all words in tree that start with prefix """
words = list()
if prefix == None:
raise ValueError('Requires not-Null prefix')
# Determine end-of-prefix node
top_node = self.head
for letter in prefix:
if letter in top_node.children:
top_node = top_node.children[letter]
else:
# Prefix not in tree, go no further
return words
# Get words under prefix
if top_node == self.head:
queue = deque([node for key, node in top_node.children.iteritems()])
else:
queue = [top_node]
# Perform a breadth first search under the prefix
# A cool effect of using BFS as opposed to DFS is that BFS will return
# a list of words ordered by increasing length
while queue:
current_node = queue.pop(0)
if current_node.data != None:
# Isn't it nice to not have to go back up the tree?
words.append(current_node.data)
queue = [node for key, node in current_node.children.iteritems()] + queue
return words
def getData(self, word):
""" This returns the 'data' of the node identified by the given word """
if not self.has_word(word):
raise ValueError('{} not found in trie'.format(word))
# Race to the bottom, get data
current_node = self.head
for letter in word:
current_node = current_node[letter]
return current_node.data
if __name__ == '__main__':
""" Example use """
trie = Trie()
words = 'hackerearth hackerrank'
for word in words.split():
trie.add(word)
print "'goodbye' in trie: ", trie.has_word('tom')
print trie.start_with_prefix('hacker')
|
{
"content_hash": "c5946aecbb6b8dd4963d928bdc28244f",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 85,
"avg_line_length": 30.951219512195124,
"alnum_prop": 0.5282374573154714,
"repo_name": "Faraaz54/python_training_problems",
"id": "af592b85978c5cbcb6b74af048b3cea180210c0e",
"size": "3807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hacker_earth/data_structures/Trie_implementation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "650319"
},
{
"name": "Python",
"bytes": "138166"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cubanoshaciamiami.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "47dad5869540a3efef009b6b90b6e172",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 81,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.6963562753036437,
"repo_name": "edilio/cubanoshaciamiami.com",
"id": "96f0c97566f7b1bd91686531c55c2e7a4d19ac49",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14658"
},
{
"name": "HTML",
"bytes": "297771"
},
{
"name": "JavaScript",
"bytes": "154244"
},
{
"name": "Python",
"bytes": "44233"
}
],
"symlink_target": ""
}
|
import collections
import os
import tabulate
import yaml
from molecule import state
from molecule import util
from molecule.driver import basedriver
class Molecule(object):
def __init__(self, config, args):
"""
Initialize a new molecule class and returns None.
:param config: A molecule config object.
:param args: A dict of options, arguments and commands from the CLI.
:returns: None
"""
self.env = os.environ.copy()
self.config = config
self.args = args
self._verifier = self._get_verifier()
self._dependency = self._get_dependency()
self._disabled = self._get_disabled()
def main(self):
if not os.path.exists(self.config.config['molecule']['molecule_dir']):
os.makedirs(self.config.config['molecule']['molecule_dir'])
self.state = state.State(
state_file=self.config.config.get('molecule').get('state_file'))
try:
self.driver = self._get_driver()
except basedriver.InvalidDriverSpecified:
msg = "Invalid driver '{}'.".format(self._get_driver_name())
util.print_error(msg)
# TODO(retr0h): Print valid drivers.
util.sysexit()
except basedriver.InvalidProviderSpecified:
msg = "Invalid provider '{}'.".format(self.args['provider'])
util.print_error(msg)
self.args['provider'] = None
self.args['platform'] = None
self.driver = self._get_driver()
self.print_valid_providers()
util.sysexit()
except basedriver.InvalidPlatformSpecified:
msg = "Invalid platform '{}'.".format(self.args['platform'])
util.print_error(msg)
self.args['provider'] = None
self.args['platform'] = None
self.driver = self._get_driver()
self.print_valid_platforms()
util.sysexit()
self.config.populate_instance_names(self.driver.platform)
self._add_or_update_vars('group_vars')
self._add_or_update_vars('host_vars')
@property
def driver(self):
return self._driver
@driver.setter
def driver(self, val):
self._driver = val
@property
def verifier(self):
return self._verifier
@verifier.setter
def verifier(self, val):
self._verifier = val
@property
def dependency(self):
return self._dependency
@dependency.setter
def dependency(self, val):
self._dependency = val
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, val):
self._disabled = val
def write_ssh_config(self):
ssh_config = self._get_ssh_config()
if ssh_config is None:
return
out = self.driver.conf(ssh_config=True)
util.write_file(ssh_config, out)
def print_valid_platforms(self, porcelain=False):
if not porcelain:
util.print_info("AVAILABLE PLATFORMS")
data = []
default_platform = self.driver.default_platform
for platform in self.driver.valid_platforms:
if porcelain:
default = 'd' if platform['name'] == default_platform else ''
else:
default = ' (default)' if platform[
'name'] == default_platform else ''
data.append([platform['name'], default])
self.display_tabulate_data(data)
def print_valid_providers(self, porcelain=False):
if not porcelain:
util.print_info("AVAILABLE PROVIDERS")
data = []
default_provider = self.driver.default_provider
for provider in self.driver.valid_providers:
if porcelain:
default = 'd' if provider['name'] == default_provider else ''
else:
default = ' (default)' if provider[
'name'] == default_provider else ''
data.append([provider['name'], default])
self.display_tabulate_data(data)
def remove_templates(self):
"""
Removes the templates created by molecule and returns None.
:return: None
"""
if os.path.exists(self.config.config['molecule']['rakefile_file']):
os.remove(self.config.config['molecule']['rakefile_file'])
config = self.config.config['ansible']['config_file']
if os.path.exists(config):
with open(config, 'r') as stream:
data = stream.read().splitlines()
if '# Molecule managed' in data:
os.remove(config)
def create_templates(self):
"""
Creates the templates used by molecule and returns None.
:return: None
"""
molecule_dir = self.config.config['molecule']['molecule_dir']
role_path = os.getcwd()
extra_context = self._get_cookiecutter_context(molecule_dir)
util.process_templates('molecule', extra_context, role_path)
def write_instances_state(self):
self.state.change_state('hosts', self._instances_state())
def create_inventory_file(self):
"""
Creates the inventory file used by molecule and returns None.
:return: None
"""
inventory = ''
for instance in self.driver.instances:
inventory += self.driver.inventory_entry(instance)
groups = {}
for instance in self.driver.instances:
ansible_groups = instance.get('ansible_groups')
if ansible_groups:
for group in ansible_groups:
if isinstance(group, str):
if group not in groups:
groups[group] = []
groups[group].append(instance['name'])
elif isinstance(group, dict):
for group_name, group_list in group.iteritems():
for g in group_list:
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(g)
if self.args.get('platform') == 'all':
self.driver.platform = 'all'
for group, subgroups in groups.iteritems():
inventory += '\n[{}]\n'.format(group)
for subgroup in subgroups:
instance_name = util.format_instance_name(
subgroup, self.driver.platform, self.driver.instances)
if instance_name:
inventory += '{}\n'.format(instance_name)
else:
inventory += '{}\n'.format(subgroup)
inventory_file = self.config.config['ansible']['inventory_file']
try:
util.write_file(inventory_file, inventory)
except IOError:
msg = 'WARNING: could not write inventory file {}.'.format(
inventory_file)
util.print_warn(msg)
def remove_inventory_file(self):
if os._exists(self.config.config['ansible']['inventory_file']):
os.remove(self.config.config['ansible']['inventory_file'])
def display_tabulate_data(self, data, headers=None):
"""
Shows the tabulate data on the screen and returns None.
If not header is defined, only the data is displayed, otherwise, the
results will be shown in a table.
:param data:
:param headers:
:returns: None
.. todo:: Document this method.
"""
# Nothing to display if there is no data.
if not data:
return
# Initialize empty headers if none are provided.
if not headers:
headers = []
# Define the table format based on the headers content.
table_format = "fancy_grid" if headers else "plain"
# Print the results.
print(tabulate.tabulate(data, headers, tablefmt=table_format))
def _get_driver_name(self):
driver = self.args.get('driver')
if driver:
return driver
elif self.config.config.get('driver'):
return self.config.config['driver'].get('name')
elif 'vagrant' in self.config.config:
return 'vagrant'
elif 'docker' in self.config.config:
return 'docker'
elif 'openstack' in self.config.config:
return 'openstack'
def _get_driver(self):
"""
Return an instance of the driver as returned by `_get_driver_name()`.
.. todo:: Implement a pluggable solution vs inline imports.
"""
driver = self._get_driver_name()
if (self.state.driver is not None) and (self.state.driver != driver):
msg = ("Instance(s) were converged with the '{}' driver, "
"but the subcommand is using '{}' driver.")
util.print_error(msg.format(self.state.driver, driver))
util.sysexit()
if driver == 'vagrant':
from molecule.driver import vagrantdriver
return vagrantdriver.VagrantDriver(self)
elif driver == 'docker':
from molecule.driver import dockerdriver
return dockerdriver.DockerDriver(self)
elif driver == 'openstack':
from molecule.driver import openstackdriver
return openstackdriver.OpenstackDriver(self)
raise basedriver.InvalidDriverSpecified()
def _get_ssh_config(self):
return self.driver.ssh_config_file
def _add_or_update_vars(self, target):
"""
Creates or updates to host/group variables if needed.
:param target:
:returns:
.. todo:: Document this method.
"""
if target in self.config.config['ansible']:
vars_target = self.config.config['ansible'][target]
else:
return
molecule_dir = self.config.config['molecule']['molecule_dir']
target_vars_path = os.path.join(molecule_dir, target)
if not os.path.exists(os.path.abspath(target_vars_path)):
os.mkdir(os.path.abspath(target_vars_path))
for target in vars_target.keys():
target_var_content = vars_target[target][0]
path = os.path.join(os.path.abspath(target_vars_path), target)
util.write_file(
path,
yaml.dump(
target_var_content,
default_flow_style=False,
explicit_start=True))
def _instances_state(self):
"""
Creates a dict of formatted instances names and the group(s) they're
part of to be added to state and returns dict containing state
information about current instances.
:return: dict
"""
instances = collections.defaultdict(dict)
for instance in self.driver.instances:
instance_name = util.format_instance_name(
instance['name'], self.driver._platform, self.driver.instances)
groups = set()
ansible_groups = instance.get('ansible_groups')
if ansible_groups:
for group in ansible_groups:
if isinstance(group, str):
groups.add(group)
elif isinstance(group, dict):
for group_name, _ in group.iteritems():
groups.add(group_name.split(':')[0])
instances[instance_name]['groups'] = sorted(list(groups))
return dict(instances)
def _get_verifier(self):
return self.config.config['verifier']['name']
def _get_dependency(self):
return self.config.config['dependency']['name']
def _get_disabled(self):
# Ability to turn off features until we roll them out.
return self.config.config.get('_disabled', [])
def _get_cookiecutter_context(self, molecule_dir):
state_file = self.config.config['molecule']['state_file']
serverspec_dir = self.config.config['molecule']['serverspec_dir']
return {
'repo_name': molecule_dir,
'ansiblecfg_molecule_dir': molecule_dir,
'ansiblecfg_ansible_library_path': 'library',
'rakefile_state_file': state_file,
'rakefile_serverspec_dir': serverspec_dir,
}
|
{
"content_hash": "bc65692b828ac7b8fb77642992c7eb04",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 79,
"avg_line_length": 34.07123287671233,
"alnum_prop": 0.5679478932132519,
"repo_name": "rgreinho/molecule",
"id": "2c8b41c206e130e77ed2647e567a59ba55cc582c",
"size": "13556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "molecule/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "315730"
},
{
"name": "Ruby",
"bytes": "1110"
},
{
"name": "Shell",
"bytes": "4029"
}
],
"symlink_target": ""
}
|
MAX_CHILDREN = 2
class BTree:
def __init__(self):
self.root = Node()
def insert(self, key=None, value=None):
mid, sib = self.root.insert(key, value)
if mid:
old_root = self.root
self.root = Node()
self.root.children = [old_root, sib]
self.root.values.append(mid)
def __unicode__(self):
return unicode(self.root)
class Node:
id = 0
def __init__(self):
# self.keys = []
self.id = Node.id
Node.id += 1
self.values = []
self.children = []
def to_string(self, depth=0):
sub = ",\n{}".format(' '*depth*3).join(
map(lambda x: x.to_string(depth+1), self.children))
nl = "\n" if self.children else ""
return "{0}({4}) V:{1}, C:[\n{0}{2}{3}{0}]".format(
' '*depth*3, self.values, sub, nl, self.id)
def __unicode__(self):
return self.to_string()
def is_leaf(self):
return len(self.children) == 0
def _find_child_for(self, value):
for i in xrange(len(self.values)):
if value < self.values[i]:
return self.children[i]
elif i == len(self.values)-1:
return self.children[-1]
elif self.values[i+1] > value:
return self.children[i+1]
def insert(self, key=None, value=None):
# -> mid, Node
mid, sib = (None, None)
if self.is_leaf():
self._insert_non_full(key, value)
if len(self.values) > MAX_CHILDREN:
print "SPLIT ({})\n{}\n".format(self.id, unicode(_T))
return self._split()
else:
mid, sib = self._find_child_for(value).insert(value=value)
if mid:
self._insert_non_full(value=mid)
self._insert_child(mid, sib)
if len(self.values) > MAX_CHILDREN:
print "SPLIT_PROPAGATE ({})\n{}\n".format(self.id, unicode(_T))
return self._split()
return None, None
def _insert_child(self, key, node):
for i in xrange(len(self.values)):
if self.values[i] > key:
self.children.insert(i, node)
return
# If we get to here it's the largest item
self.children.append(node)
def _insert_non_full(self, key=None, value=None):
for i in xrange(len(self.values)):
if self.values[i] > value:
self.values.insert(i, value)
return
if self.values[i] == value:
# TODO: add key/val here
return
# If we get to here, it's the largest item
self.values.append(value)
def _split(self):
# -> mid, Node
mid_i = len(self.values) // 2
mid_val = self.values[mid_i]
right = Node()
mid_val_i = mid_i if self.is_leaf() else mid_i+1
right.values.extend(self.values[mid_val_i:])
right.children.extend(self.children[mid_i+1:])
self.values = self.values[:mid_i]
self.children = self.children[:mid_i+1]
return mid_val, right
_T = BTree()
if __name__ == "__main__":
while True:
v = int(raw_input("insert> "))
_T.insert(value=v)
print unicode(_T)
|
{
"content_hash": "cc591a5569977a6dbbaabc7d102aa0fa",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 79,
"avg_line_length": 30.194444444444443,
"alnum_prop": 0.5142594296228151,
"repo_name": "asp2insp/lodestone",
"id": "2b45e72650dfe848bfb3652f3d0b8099800c0a96",
"size": "3261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aux/btree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6887"
},
{
"name": "Rust",
"bytes": "49689"
}
],
"symlink_target": ""
}
|
"""Generic Node base class for all workers that run on hosts."""
import inspect
import os
import random
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import service
from oslo_utils import importutils
import osprofiler.notifier
from osprofiler import profiler
import osprofiler.web
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import base as objects_base
from cinder import rpc
from cinder import version
from cinder import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Interval, in seconds, between nodes reporting state '
'to datastore'),
cfg.IntOpt('periodic_interval',
default=60,
help='Interval, in seconds, between running periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range, in seconds, to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.StrOpt('osapi_volume_listen',
default="0.0.0.0",
help='IP address on which OpenStack Volume API listens'),
cfg.IntOpt('osapi_volume_listen_port',
default=8776,
help='Port on which OpenStack Volume API listens'),
cfg.IntOpt('osapi_volume_workers',
help='Number of workers for OpenStack Volume API service. '
'The default is equal to the number of CPUs available.'), ]
profiler_opts = [
cfg.BoolOpt("profiler_enabled", default=False,
help=_('If False fully disable profiling feature.')),
cfg.BoolOpt("trace_sqlalchemy", default=False,
help=_("If False doesn't trace SQL requests."))
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.register_opts(profiler_opts, group="profiler")
def setup_profiler(binary, host):
if CONF.profiler.profiler_enabled:
_notifier = osprofiler.notifier.create(
"Messaging", messaging, context.get_admin_context().to_dict(),
rpc.TRANSPORT, "cinder", binary, host)
osprofiler.notifier.set(_notifier)
LOG.warning(
_LW("OSProfiler is enabled.\nIt means that person who knows "
"any of hmac_keys that are specified in "
"/etc/cinder/api-paste.ini can trace his requests. \n"
"In real life only operator can read this file so there "
"is no security issue. Note that even if person can "
"trigger profiler, only admin user can retrieve trace "
"information.\n"
"To disable OSprofiler set in cinder.conf:\n"
"[profiler]\nenabled=false"))
else:
osprofiler.web.disable()
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, *args, **kwargs):
super(Service, self).__init__()
if not rpc.initialized():
rpc.init(CONF)
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
manager_class = profiler.trace_cls("rpc")(manager_class)
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.basic_config_check()
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
setup_profiler(binary, host)
self.rpcserver = None
def start(self):
version_string = version.version_string()
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
self.manager.init_host()
ctxt = context.get_admin_context()
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
LOG.debug("Creating RPC server for service %s", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
serializer = objects_base.CinderObjectSerializer()
self.rpcserver = rpc.get_server(target, endpoints, serializer)
self.rpcserver.start()
self.manager.init_host_with_rpc()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(
self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
def basic_config_check(self):
"""Perform basic config checks before starting service."""
# Make sure report interval is less than service down time
if self.report_interval:
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
LOG.warning(
_LW("Report interval must be less than service down "
"time. Current config service_down_time: "
"%(service_down_time)s, report_interval for this: "
"service is: %(report_interval)s. Setting global "
"service_down_time to: %(new_down_time)s"),
{'service_down_time': CONF.service_down_time,
'report_interval': self.report_interval,
'new_down_time': new_down_time})
CONF.set_override('service_down_time', new_down_time)
def _create_service_ref(self, context):
zone = CONF.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone})
self.service_id = service_ref['id']
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('cinder-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
LOG.warning(_LW('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.rpcserver.stop()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
self.timers = []
super(Service, self).stop()
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
if self.rpcserver:
self.rpcserver.wait()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service in the datastore."""
if not self.manager.is_working():
# NOTE(dulek): If manager reports a problem we're not sending
# heartbeats - to indicate that service is actually down.
LOG.error(_LE('Manager for service %(binary)s %(host)s is '
'reporting problems, not sending heartbeat. '
'Service will appear "down".'),
{'binary': self.binary,
'host': self.host})
return
ctxt = context.get_admin_context()
zone = CONF.storage_availability_zone
state_catalog = {}
try:
try:
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
LOG.debug('The service database object disappeared, '
'recreating it.')
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
state_catalog['report_count'] = service_ref['report_count'] + 1
if zone != service_ref['availability_zone']:
state_catalog['availability_zone'] = zone
db.service_update(ctxt,
self.service_id, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
except db_exc.DBConnectionError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('model server went away'))
# NOTE(jsbryant) Other DB errors can happen in HA configurations.
# such errors shouldn't kill this thread, so we handle them here.
except db_exc.DBError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('DBError encountered: '))
except Exception:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('Exception encountered: '))
class WSGIService(service.ServiceBase):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = (getattr(CONF, '%s_workers' % name, None) or
processutils.get_worker_count())
if self.workers and self.workers < 1:
worker_name = '%s_workers' % name
msg = (_("%(worker_name)s value of %(workers)d is invalid, "
"must be greater than 0.") %
{'worker_name': worker_name,
'workers': self.workers})
raise exception.InvalidInput(msg)
setup_profiler(name, self.host)
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port)
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def process_launcher():
return service.ProcessLauncher(CONF)
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(CONF, server, workers=workers)
def wait():
LOG.debug('Full set of CONF:')
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and
("mysql:" in flag_get or "postgresql:" in flag_get))):
LOG.debug('%s : FLAG SET ', flag)
else:
LOG.debug('%(flag)s : %(flag_get)s',
{'flag': flag, 'flag_get': flag_get})
try:
_launcher.wait()
except KeyboardInterrupt:
_launcher.stop()
rpc.cleanup()
class Launcher(object):
def __init__(self):
self.launch_service = serve
self.wait = wait
def get_launcher():
# Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows
# due to missing support of non-blocking I/O pipes. For this reason, the
# service must be spawned differently on Windows, using the ServiceLauncher
# class instead.
if os.name == 'nt':
return Launcher()
else:
return process_launcher()
|
{
"content_hash": "62428e8047e2c00016da92bb63a8c5db",
"timestamp": "",
"source": "github",
"line_count": 473,
"max_line_length": 79,
"avg_line_length": 36.95771670190275,
"alnum_prop": 0.57725530576054,
"repo_name": "JioCloud/cinder",
"id": "01f929edee903460ef11b5be18666a84757a0a36",
"size": "18251",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11977630"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
}
|
import os
import yaml
import pytest
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from numpy.testing import assert_allclose
from tardis.io.atom_data.base import AtomData
from tardis.simulation import Simulation
from tardis.io.config_reader import Configuration
quantity_comparison = [
(
"/simulation/runner/last_line_interaction_in_id",
"runner.last_line_interaction_in_id",
),
(
"/simulation/runner/last_line_interaction_out_id",
"runner.last_line_interaction_out_id",
),
(
"/simulation/runner/last_line_interaction_shell_id",
"runner.last_line_interaction_shell_id",
),
("/simulation/plasma/j_blues", "plasma.j_blues"),
("/simulation/plasma/j_blue_estimator", "plasma.j_blue_estimator"),
(
"/simulation/runner/packet_luminosity",
"runner.packet_luminosity.cgs.value",
),
(
"/simulation/runner/montecarlo_virtual_luminosity",
"runner.montecarlo_virtual_luminosity.cgs.value",
),
("/simulation/runner/output_nu", "runner.output_nu.cgs.value"),
("/simulation/plasma/ion_number_density", "plasma.ion_number_density"),
("/simulation/plasma/level_number_density", "plasma.level_number_density"),
("/simulation/plasma/electron_densities", "plasma.electron_densities"),
("/simulation/plasma/tau_sobolevs", "plasma.tau_sobolevs"),
(
"/simulation/plasma/transition_probabilities",
"plasma.transition_probabilities",
),
("/simulation/model/t_radiative", "model.t_radiative.cgs.value"),
("/simulation/model/w", "model.w"),
("/simulation/runner/j_estimator", "runner.j_estimator"),
("/simulation/runner/nu_bar_estimator", "runner.nu_bar_estimator"),
(
"/simulation/plasma/j_blues_norm_factor",
"plasma.j_blues_norm_factor.cgs.value",
),
(
"/simulation/plasma/luminosity_inner",
"plasma.luminosity_inner.cgs.value",
),
]
@pytest.fixture(params=quantity_comparison)
def model_quantities(request):
return request.param
@pytest.mark.skipif(
'not config.getvalue("integration-tests")',
reason="integration tests are not included in this run",
)
@pytest.mark.integration
class TestIntegration(object):
"""Slow integration test for various setups present in subdirectories of
``tardis/tests/integration_tests``.
"""
@classmethod
@pytest.fixture(scope="class", autouse=True)
def setup(self, request, reference, data_path):
"""
This method does initial setup of creating configuration and performing
a single run of integration test.
"""
# Get capture manager
capmanager = request.config.pluginmanager.getplugin("capturemanager")
# The last component in dirpath can be extracted as name of setup.
self.name = data_path["setup_name"]
self.config_file = os.path.join(
data_path["config_dirpath"], "config.yml"
)
# A quick hack to use atom data per setup. Atom data is ingested from
# local HDF or downloaded and cached from a url, depending on data_path
# keys.
atom_data_name = yaml.load(open(self.config_file), Loader=yaml.CLoader)[
"atom_data"
]
# Get the path to HDF file:
atom_data_filepath = os.path.join(
data_path["atom_data_path"], atom_data_name
)
# Load atom data file separately, pass it for forming tardis config.
self.atom_data = AtomData.from_hdf(atom_data_filepath)
# Check whether the atom data file in current run and the atom data
# file used in obtaining the reference data are same.
# TODO: hard coded UUID for kurucz atom data file, generalize it later.
# kurucz_data_file_uuid1 = "5ca3035ca8b311e3bb684437e69d75d7"
# assert self.atom_data.uuid1 == kurucz_data_file_uuid1
# Create a Configuration through yaml file and atom data.
tardis_config = Configuration.from_yaml(self.config_file)
# Check whether current run is with less packets.
if request.config.getoption("--less-packets"):
less_packets = request.config.integration_tests_config[
"less_packets"
]
tardis_config["montecarlo"]["no_of_packets"] = less_packets[
"no_of_packets"
]
tardis_config["montecarlo"]["last_no_of_packets"] = less_packets[
"last_no_of_packets"
]
# We now do a run with prepared config and get the simulation object.
self.result = Simulation.from_config(
tardis_config, atom_data=self.atom_data
)
capmanager.suspend_global_capture(True)
# If current test run is just for collecting reference data, store the
# output model to HDF file, save it at specified path. Skip all tests.
# Else simply perform the run and move further for performing
# assertions.
self.result.run()
if request.config.getoption("--generate-reference"):
ref_data_path = os.path.join(
data_path["reference_path"], "{0}.h5".format(self.name)
)
if os.path.exists(ref_data_path):
pytest.skip(
"Reference data {0} does exist and tests will not "
"proceed generating new data".format(ref_data_path)
)
self.result.to_hdf(file_path=ref_data_path)
pytest.skip(
"Reference data saved at {0}".format(
data_path["reference_path"]
)
)
capmanager.resume_global_capture()
# Get the reference data through the fixture.
self.reference = reference
def test_model_quantities(self, model_quantities):
reference_quantity_name, tardis_quantity_name = model_quantities
if reference_quantity_name not in self.reference:
pytest.skip(
"{0} not calculated in this run".format(reference_quantity_name)
)
reference_quantity = self.reference[reference_quantity_name]
tardis_quantity = eval("self.result." + tardis_quantity_name)
assert_allclose(tardis_quantity, reference_quantity)
def plot_t_rad(self):
plt.suptitle("Shell temperature for packets", fontweight="bold")
figure = plt.figure()
ax = figure.add_subplot(111)
ax.set_xlabel("Shell id")
ax.set_ylabel("t_rad")
result_line = ax.plot(
self.result.model.t_rad.cgs,
color="blue",
marker=".",
label="Result",
)
reference_line = ax.plot(
self.reference["/simulation/model/t_rad"],
color="green",
marker=".",
label="Reference",
)
error_ax = ax.twinx()
error_line = error_ax.plot(
(
1
- self.result.model.t_rad.cgs.value
/ self.reference["/simulation/model/t_rad"]
),
color="red",
marker=".",
label="Rel. Error",
)
error_ax.set_ylabel("Relative error (1 - result / reference)")
lines = result_line + reference_line + error_line
labels = [l.get_label() for l in lines]
ax.legend(lines, labels, loc="lower left")
return figure
def test_spectrum(self, plot_object):
plot_object.add(self.plot_spectrum(), "{0}_spectrum".format(self.name))
assert_allclose(
self.reference["/simulation/runner/spectrum/luminosity_density_nu"],
self.result.runner.spectrum.luminosity_density_nu.cgs.value,
)
assert_allclose(
self.reference["/simulation/runner/spectrum/wavelength"],
self.result.runner.spectrum.wavelength.cgs.value,
)
assert_allclose(
self.reference[
"/simulation/runner/spectrum/luminosity_density_lambda"
],
self.result.runner.spectrum.luminosity_density_lambda.cgs.value,
)
def plot_spectrum(self):
# `ldl_` prefixed variables associated with `luminosity_density_lambda`.
# Axes of subplot are extracted, if we wish to make multiple plots
# for different spectrum quantities all in one figure.
gs = plt.GridSpec(2, 1, height_ratios=[3, 1])
spectrum_ax = plt.subplot(gs[0])
spectrum_ax.set_ylabel("Flux [cgs]")
deviation = 1 - (
self.result.runner.spectrum.luminosity_density_lambda.cgs.value
/ self.reference[
"/simulation/runner/spectrum/luminosity_density_lambda"
]
)
spectrum_ax.plot(
self.reference["/simulation/runner/spectrum/wavelength"],
self.reference[
"/simulation/runner/spectrum/luminosity_density_lambda"
],
color="black",
)
spectrum_ax.plot(
self.reference["/simulation/runner/spectrum/wavelength"],
self.result.runner.spectrum.luminosity_density_lambda.cgs.value,
color="red",
)
spectrum_ax.set_xticks([])
deviation_ax = plt.subplot(gs[1])
deviation_ax.plot(
self.reference["/simulation/runner/spectrum/wavelength"],
deviation,
color="black",
)
deviation_ax.set_xlabel("Wavelength [Angstrom]")
return plt.gcf()
|
{
"content_hash": "f84c4fe6e3b636006ae785cff296871f",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 80,
"avg_line_length": 35.44814814814815,
"alnum_prop": 0.6044300491066764,
"repo_name": "kaushik94/tardis",
"id": "c27195584169d0bae07e51f45bd71a15d7956631",
"size": "9571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tardis/tests/integration_tests/test_integration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "149292"
},
{
"name": "C++",
"bytes": "12034"
},
{
"name": "Python",
"bytes": "626803"
},
{
"name": "Shell",
"bytes": "3407"
}
],
"symlink_target": ""
}
|
import hmac
from hashlib import sha1
import os
from google.cloud import secretmanager
PROJECT_NAME = os.environ.get("PROJECT_NAME")
class EventSource(object):
"""
A source of event data being delivered to the webhook
"""
def __init__(self, signature_header, verification_func):
self.signature = signature_header
self.verification = verification_func
def github_verification(signature, body):
"""
Verifies that the signature received from the github event is accurate
"""
if not signature:
raise Exception("Github signature is empty")
expected_signature = "sha1="
try:
# Get secret from Cloud Secret Manager
secret = get_secret(PROJECT_NAME, "event-handler", "latest")
# Compute the hashed signature
hashed = hmac.new(secret, body, sha1)
expected_signature += hashed.hexdigest()
except Exception as e:
print(e)
return hmac.compare_digest(signature, expected_signature)
def circleci_verification(signature, body):
"""
Verifies that the signature received from the circleci event is accurate
"""
if not signature:
raise Exception("CircleCI signature is empty")
expected_signature = "v1="
try:
# Get secret from Cloud Secret Manager
secret = get_secret(PROJECT_NAME, "event-handler", "latest")
# Compute the hashed signature
hashed = hmac.new(secret, body, 'sha256')
expected_signature += hashed.hexdigest()
except Exception as e:
print(e)
return hmac.compare_digest(signature, expected_signature)
def simple_token_verification(token, body):
"""
Verifies that the token received from the event is accurate
"""
if not token:
raise Exception("Token is empty")
secret = get_secret(PROJECT_NAME, "event-handler", "1")
return secret.decode() == token
def get_secret(project_name, secret_name, version_num):
"""
Returns secret payload from Cloud Secret Manager
"""
try:
client = secretmanager.SecretManagerServiceClient()
name = client.secret_version_path(
project_name, secret_name, version_num
)
secret = client.access_secret_version(name)
return secret.payload.data
except Exception as e:
print(e)
def get_source(headers):
"""
Gets the source from the User-Agent header
"""
if "X-Gitlab-Event" in headers:
return "gitlab"
if "tekton" in headers.get("Ce-Type", ""):
return "tekton"
if "GitHub-Hookshot" in headers.get("User-Agent", ""):
return "github"
if "Circleci-Event-Type" in headers:
return "circleci"
if "Argo-CD" in headers.get("User-Agent", ""):
return "argocd"
return headers.get("User-Agent")
AUTHORIZED_SOURCES = {
"github": EventSource(
"X-Hub-Signature", github_verification
),
"gitlab": EventSource(
"X-Gitlab-Token", simple_token_verification
),
"tekton": EventSource(
"tekton-secret", simple_token_verification
),
"circleci": EventSource(
"Circleci-Signature", circleci_verification
),
"argocd": EventSource(
"Argo-Signature", simple_token_verification
),
}
|
{
"content_hash": "e07ebfe41b258b8018152e8269648323",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 76,
"avg_line_length": 26.126984126984127,
"alnum_prop": 0.6376063183475091,
"repo_name": "GoogleCloudPlatform/fourkeys",
"id": "c592ce6b3882b5256defe608b49cbc557250c645",
"size": "3868",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "terraform/modules/fourkeys-images/files/event-handler/sources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "26412"
},
{
"name": "HCL",
"bytes": "37792"
},
{
"name": "JavaScript",
"bytes": "92"
},
{
"name": "Python",
"bytes": "141681"
},
{
"name": "Shell",
"bytes": "25693"
}
],
"symlink_target": ""
}
|
import unittest
import mock
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestAveragePooling2D(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1,
(2, 3, 2, 2)).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {'eps': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {
'eps': 1e-1, 'atol': 5e-3, 'rtol': 5e-2}
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.average_pooling_2d(x, 3, stride=2,
pad=1, use_cudnn=use_cudnn)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
for k in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[k, c]
expect = numpy.array([
[x[0:2, 0:2].sum(), x[0:2, 1:3].sum()],
[x[1:4, 0:2].sum(), x[1:4, 1:3].sum()]]) / 9
gradient_check.assert_allclose(
expect, y_data[k, c], **self.check_forward_options)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), False)
def check_backward(self, x_data, y_grad, use_cudnn=True):
gradient_check.check_backward(
functions.AveragePooling2D(3, 2, 1, False, use_cudnn),
x_data, y_grad, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), False)
@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestAveragePooling2DCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.arange(
2 * 3 * 4 * 3, dtype=self.dtype).reshape(2, 3, 4, 3)
self.gy = cuda.cupy.random.uniform(-1, 1,
(2, 3, 2, 2)).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.average_pooling_2d(
x, 3, stride=2, pad=1, use_cudnn=self.use_cudnn)
@unittest.skipIf(cuda.cudnn_enabled and
cuda.cudnn.cudnn.getVersion() < 3000,
'Only cudnn ver>=3 supports average-pooling2d')
def test_call_cudnn_forward(self):
with mock.patch('cupy.cudnn.cudnn.poolingForward') as func:
self.forward()
self.assertEqual(func.called, self.use_cudnn)
@unittest.skipIf(cuda.cudnn_enabled and
cuda.cudnn.cudnn.getVersion() < 3000,
'Only cudnn ver>=3 supports average-pooling2d')
def test_call_cudnn_backward(self):
y = self.forward()
y.grad = self.gy
with mock.patch('cupy.cudnn.cudnn.poolingBackward') as func:
y.backward()
self.assertEqual(func.called, self.use_cudnn)
testing.run_module(__name__, __file__)
|
{
"content_hash": "eec97056933d4d9f165479c07e94b951",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 77,
"avg_line_length": 34.33057851239669,
"alnum_prop": 0.5816080885893115,
"repo_name": "benob/chainer",
"id": "a8d6639280f3841ba638e38852ed91876312a449",
"size": "4154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/pooling_tests/test_average_pooling_2d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "29678"
},
{
"name": "Cuda",
"bytes": "6634"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "1690503"
}
],
"symlink_target": ""
}
|
import asyncio
import aiohttp
@asyncio.coroutine
def aiohttp_request(loop, method, url, as_text, **kwargs):
with aiohttp.ClientSession(loop=loop) as session:
response = yield from session.request(method, url, **kwargs) # NOQA: E999
if as_text:
content = yield from response.text() # NOQA: E999
else:
content = yield from response.json() # NOQA: E999
return response, content
|
{
"content_hash": "5af1ef5c8ddbfd617eab1619fdb50c66",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 33.84615384615385,
"alnum_prop": 0.6454545454545455,
"repo_name": "poussik/vcrpy",
"id": "5b77fae0c69fbfca696956d858c0a428adc82141",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/aiohttp_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "230968"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
}
|
"""Checks if a set of configuration(s) is version and dependency compatible."""
import re
import sys
import six
from six.moves import range
import six.moves.configparser
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/compat_checker"
def _compare_versions(v1, v2):
"""Compare two versions and return information on which is smaller vs. larger.
Args:
v1: String that is a version to be compared against `v2`.
v2: String that is a version to be compared against `v1`.
Returns:
Dict that stores larger version with key `larger` and smaller version with
key `smaller`.
e.g. {`larger`: `1.5.0`, `smaller`: `1.2.0`}
Raises:
RuntimeError: If asked to compare `inf` to `inf`.
"""
# Throw error is asked to compare `inf` to `inf`.
if v1 == "inf" and v2 == "inf":
raise RuntimeError("Cannot compare `inf` to `inf`.")
rtn_dict = {"smaller": None, "larger": None}
v1_list = six.ensure_str(v1).split(".")
v2_list = six.ensure_str(v2).split(".")
# Take care of cases with infinity (arg=`inf`).
if v1_list[0] == "inf":
v1_list[0] = str(int(v2_list[0]) + 1)
if v2_list[0] == "inf":
v2_list[0] = str(int(v1_list[0]) + 1)
# Determine which of the two lists are longer vs. shorter.
v_long = v1_list if len(v1_list) >= len(v2_list) else v2_list
v_short = v1_list if len(v1_list) < len(v2_list) else v2_list
larger, smaller = None, None
for i, ver in enumerate(v_short, start=0):
if int(ver) > int(v_long[i]):
larger = _list_to_string(v_short, ".")
smaller = _list_to_string(v_long, ".")
elif int(ver) < int(v_long[i]):
larger = _list_to_string(v_long, ".")
smaller = _list_to_string(v_short, ".")
else:
if i == len(v_short) - 1:
if v_long[i + 1:] == ["0"]*(len(v_long) - 1 - i):
larger = "equal"
smaller = "equal"
else:
larger = _list_to_string(v_long, ".")
smaller = _list_to_string(v_short, ".")
else:
# Go to next round.
pass
if larger:
break
rtn_dict["smaller"] = smaller
rtn_dict["larger"] = larger
return rtn_dict
def _list_to_string(l, s):
"""Concatenates list items into a single string separated by `s`.
Args:
l: List with items to be concatenated into a single string.
s: String or char that will be concatenated in between each item.
Returns:
String that has all items in list `l` concatenated with `s` separator.
"""
return s.join(l)
def _get_func_name():
"""Get the name of current function.
Returns:
String that is the name of current function.
"""
return tf_inspect.stack()[1][3]
class ConfigCompatChecker(object):
"""Class that checks configuration versions and dependency compatibilities.
`ConfigCompatChecker` checks a given set of configurations and their versions
against supported versions and dependency rules defined in `.ini` config file.
For project `TensorFlow Builder`, it functions as a sub-module for the builder
service that validates requested build configurations from a client prior to
initiating a TensorFlow build.
"""
class _Reqs(object):
"""Class that stores specifications related to a single requirement.
`_Reqs` represents a single version or dependency requirement specified in
the `.ini` config file. It is meant ot be used inside `ConfigCompatChecker`
to help organize and identify version and dependency compatibility for a
given configuration (e.g. gcc version) required by the client.
"""
def __init__(self, req, config, section):
"""Initializes a version or dependency requirement object.
Args:
req: List that contains individual supported versions or a single string
that contains `range` definition.
e.g. [`range(1.0, 2.0) include(3.0) exclude(1.5)`]
e.g. [`1.0`, `3.0`, `7.1`]
config: String that is the configuration name.
e.g. `platform`
section: String that is the section name from the `.ini` config file
under which the requirement is defined.
e.g. `Required`, `Optional`, `Unsupported`, `Dependency`
"""
# Req class variables.
self.req = req
self.exclude = None
self.include = None
self.range = [None, None] # for [min, max]
self.config = config
self._req_type = "" # e.g. `range` or `no_range`
self._section = section
self._initialized = None
self._error_message = []
# Parse and store requirement specifications.
self.parse_single_req()
@property
def get_status(self):
"""Get status of `_Reqs` initialization.
Returns:
Tuple
(Boolean indicating initialization status,
List of error messages, if any)
"""
return self._initialized, self._error_message
def __str__(self):
"""Prints a requirement and its components.
Returns:
String that has concatenated information about a requirement.
"""
info = {
"section": self._section,
"config": self.config,
"req_type": self._req_type,
"req": str(self.req),
"range": str(self.range),
"exclude": str(self.exclude),
"include": str(self.include),
"init": str(self._initialized)
}
req_str = "\n >>> _Reqs Instance <<<\n"
req_str += "Section: {section}\n"
req_str += "Configuration name: {config}\n"
req_str += "Requirement type: {req_type}\n"
req_str += "Requirement: {req}\n"
req_str += "Range: {range}\n"
req_str += "Exclude: {exclude}\n"
req_str += "Include: {include}\n"
req_str += "Initialized: {init}\n\n"
return req_str.format(**info)
def parse_single_req(self):
"""Parses a requirement and stores information.
`self.req` _initialized in `__init__` is called for retrieving the
requirement.
A requirement can come in two forms:
[1] String that includes `range` indicating range syntax for defining
a requirement.
e.g. `range(1.0, 2.0) include(3.0) exclude(1.5)`
[2] List that includes individual supported versions or items.
e.g. [`1.0`, `3.0`, `7.1`]
For a list type requirement, it directly stores the list to
`self.include`.
Call `get_status` for checking the status of the parsing. This function
sets `self._initialized` to `False` and immediately returns with an error
message upon encountering a failure. It sets `self._initialized` to `True`
and returns without an error message upon success.
"""
# Regex expression for filtering requirement line. Please refer
# to docstring above for more information.
expr = r"(range\()?([\d\.\,\s]+)(\))?( )?(include\()?"
expr += r"([\d\.\,\s]+)?(\))?( )?(exclude\()?([\d\.\,\s]+)?(\))?"
# Check that arg `req` is not empty.
if not self.req:
err_msg = "[Error] Requirement is missing. "
err_msg += "(section = %s, " % str(self._section)
err_msg += "config = %s, req = %s)" % (str(self.config), str(self.req))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
# For requirement given in format with `range`. For example:
# python = [range(3.3, 3.7) include(2.7)] as opposed to
# python = [2.7, 3.3, 3.4, 3.5, 3.6, 3.7]
if "range" in self.req[0]:
self._req_type = "range"
match = re.match(expr, self.req[0])
if not match:
err_msg = "[Error] Encountered issue when parsing the requirement."
err_msg += " (req = %s, match = %s)" % (str(self.req), str(match))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
else:
match_grp = match.groups()
match_size = len(match_grp)
for i, m in enumerate(match_grp[0:match_size-1], start=0):
# Get next index. For example:
# | idx | next_idx |
# +------------+------------+
# | `range(` | `1.1, 1.5` |
# | `exclude(` | `1.1, 1.5` |
# | `include(` | `1.1, 1.5` |
next_match = match_grp[i + 1]
if m not in ["", None, " ", ")"]:
if "range" in m:
# Check that the range definition contains only one comma.
# If more than one comma, then there is format error with the
# requirement config file.
comma_count = next_match.count(",")
if comma_count > 1 or comma_count == 0:
err_msg = "[Error] Found zero or more than one comma in range"
err_msg += " definition. (req = %s, " % str(self.req)
err_msg += "match = %s)" % str(next_match)
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
# Remove empty space in range and separate min, max by
# comma. (e.g. `1.0, 2.0` => `1.0,2.0` => [`1.0`, `2.0`])
min_max = next_match.replace(" ", "").split(",")
# Explicitly define min and max values.
# If min_max = ['', ''], then `range(, )` was provided as
# req, which is equivalent to `include all versions`.
if not min_max[0]:
min_max[0] = "0"
if not min_max[1]:
min_max[1] = "inf"
self.range = min_max
if "exclude" in m:
self.exclude = next_match.replace(" ", "").split(",")
if "include" in m:
self.include = next_match.replace(" ", "").split(",")
self._initialized = True
# For requirement given in format without a `range`. For example:
# python = [2.7, 3.3, 3.4, 3.5, 3.6, 3.7] as opposed to
# python = [range(3.3, 3.7) include(2.7)]
else:
self._req_type = "no_range"
# Requirement (self.req) should be a list.
if not isinstance(self.req, list):
err_msg = "[Error] Requirement is not a list."
err_msg += "(req = %s, " % str(self.req)
err_msg += "type(req) = %s)" % str(type(self.req))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
else:
self.include = self.req
self._initialized = True
return
def __init__(self, usr_config, req_file):
"""Initializes a configuration compatibility checker.
Args:
usr_config: Dict of all configuration(s) whose version compatibilities are
to be checked against the rules defined in the `.ini` config
file.
req_file: String that is the full name of the `.ini` config file.
e.g. `config.ini`
"""
# ConfigCompatChecker class variables.
self.usr_config = usr_config
self.req_file = req_file
self.warning_msg = []
self.error_msg = []
# Get and store requirements.
reqs_all = self.get_all_reqs()
self.required = reqs_all["required"]
self.optional = reqs_all["optional"]
self.unsupported = reqs_all["unsupported"]
self.dependency = reqs_all["dependency"]
self.successes = []
self.failures = []
def get_all_reqs(self):
"""Parses all compatibility specifications listed in the `.ini` config file.
Reads and parses each and all compatibility specifications from the `.ini`
config file by sections. It then populates appropriate dicts that represent
each section (e.g. `self.required`) and returns a tuple of the populated
dicts.
Returns:
Dict of dict
{ `required`: Dict of `Required` configs and supported versions,
`optional`: Dict of `Optional` configs and supported versions,
`unsupported`: Dict of `Unsupported` configs and supported versions,
`dependency`: Dict of `Dependency` configs and supported versions }
"""
# First check if file exists. Exit on failure.
try:
open(self.req_file, "rb")
except IOError:
msg = "[Error] Cannot read file '%s'." % self.req_file
logging.error(msg)
sys.exit(1)
# Store status of parsing requirements. For local usage only.
curr_status = True
# Initialize config parser for parsing version requirements file.
parser = six.moves.configparser.ConfigParser()
parser.read(self.req_file)
if not parser.sections():
err_msg = "[Error] Empty config file. "
err_msg += "(file = %s, " % str(self.req_file)
err_msg += "parser sectons = %s)" % str(parser.sections())
self.error_msg.append(err_msg)
logging.error(err_msg)
curr_status = False
# Each dependency dict will have the following format.
# _dict = {
# `<config_name>` : [_Reqs()],
# `<config_name>` : [_Reqs()]
# }
required_dict = {}
optional_dict = {}
unsupported_dict = {}
dependency_dict = {}
# Parse every config under each section defined in config file
# and populate requirement dict(s).
for section in parser.sections():
all_configs = parser.options(section)
for config in all_configs:
spec = parser.get(section, config)
# Separately manage each section:
# `Required`,
# `Optional`,
# `Unsupported`,
# `Dependency`
# One of the sections is required.
if section == "Dependency":
dependency_dict[config] = []
spec_split = spec.split(",\n")
# First dependency item may only or not have `[` depending
# on the indentation style in the config (.ini) file.
# If it has `[`, then either skip or remove from string.
if spec_split[0] == "[":
spec_split = spec_split[1:]
elif "[" in spec_split[0]:
spec_split[0] = spec_split[0].replace("[", "")
else:
warn_msg = "[Warning] Config file format error: Missing `[`."
warn_msg += "(section = %s, " % str(section)
warn_msg += "config = %s)" % str(config)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# Last dependency item may only or not have `]` depending
# on the indentation style in the config (.ini) file.
# If it has `[`, then either skip or remove from string.
if spec_split[-1] == "]":
spec_split = spec_split[:-1]
elif "]" in spec_split[-1]:
spec_split[-1] = spec_split[-1].replace("]", "")
else:
warn_msg = "[Warning] Config file format error: Missing `]`."
warn_msg += "(section = %s, " % str(section)
warn_msg += "config = %s)" % str(config)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# Parse `spec_split` which is a list of all dependency rules
# retrieved from the config file.
# Create a _Reqs() instance for each rule and store it under
# appropriate class dict (e.g. dependency_dict) with a proper
# key.
#
# For dependency definition, it creates one _Reqs() instance each
# for requirement and dependency. For example, it would create
# a list in the following indexing sequence:
#
# [`config', <`config` _Reqs()>, `dep', <`dep` _Reqs()>]
#
# For example:
# [`python`, _Reqs(), `tensorflow`, _Reqs()] for
# `python 3.7 requires tensorflow 1.13`
for rule in spec_split:
# Filter out only the necessary information from `rule` string.
spec_dict = self.filter_dependency(rule)
# Create _Reqs() instance for each rule.
cfg_name = spec_dict["cfg"] # config name
dep_name = spec_dict["cfgd"] # dependency name
cfg_req = self._Reqs(
self.convert_to_list(spec_dict["cfg_spec"], " "),
config=cfg_name,
section=section
)
dep_req = self._Reqs(
self.convert_to_list(spec_dict["cfgd_spec"], " "),
config=dep_name,
section=section
)
# Check status of _Reqs() initialization. If wrong formats are
# detected from the config file, it would return `False` for
# initialization status.
# `<_Reqs>.get_status` returns [_initialized, _error_message]
cfg_req_status = cfg_req.get_status
dep_req_status = dep_req.get_status
if not cfg_req_status[0] or not dep_req_status[0]:
# `<_Reqs>.get_status()[1]` returns empty upon successful init.
msg = "[Error] Failed to create _Reqs() instance for a "
msg += "dependency item. (config = %s, " % str(cfg_name)
msg += "dep = %s)" % str(dep_name)
logging.error(msg)
self.error_msg.append(cfg_req_status[1])
self.error_msg.append(dep_req_status[1])
curr_status = False
break
else:
dependency_dict[config].append(
[cfg_name, cfg_req, dep_name, dep_req])
# Break out of `if section == 'Dependency'` block.
if not curr_status:
break
else:
if section == "Required":
add_to = required_dict
elif section == "Optional":
add_to = optional_dict
elif section == "Unsupported":
add_to = unsupported_dict
else:
msg = "[Error] Section name `%s` is not accepted." % str(section)
msg += "Accepted section names are `Required`, `Optional`, "
msg += "`Unsupported`, and `Dependency`."
logging.error(msg)
self.error_msg.append(msg)
curr_status = False
break
# Need to make sure `req` argument for _Reqs() instance is always
# a list. If not, convert to list.
req_list = self.convert_to_list(self.filter_line(spec), " ")
add_to[config] = self._Reqs(req_list, config=config, section=section)
# Break out of `for config in all_configs` loop.
if not curr_status:
break
# Break out of `for section in parser.sections()` loop.
if not curr_status:
break
return_dict = {
"required": required_dict,
"optional": optional_dict,
"unsupported": unsupported_dict,
"dependency": dependency_dict
}
return return_dict
def filter_dependency(self, line):
"""Filters dependency compatibility rules defined in the `.ini` config file.
Dependency specifications are defined as the following:
`<config> <config_version> requires <dependency> <dependency_version>`
e.g.
`python 3.7 requires tensorflow 1.13`
`tensorflow range(1.0.0, 1.13.1) requires gcc range(4.8, )`
Args:
line: String that is a dependency specification defined under `Dependency`
section in the `.ini` config file.
Returns:
Dict with configuration and its dependency information.
e.g. {`cfg`: `python`, # configuration name
`cfg_spec`: `3.7`, # configuration version
`cfgd`: `tensorflow`, # dependency name
`cfgd_spec`: `4.8`} # dependency version
"""
line = line.strip("\n")
expr = r"(?P<cfg>[\S]+) (?P<cfg_spec>range\([\d\.\,\s]+\)( )?"
expr += r"(include\([\d\.\,\s]+\))?( )?(exclude\([\d\.\,\s]+\))?( )?"
expr += r"|[\d\,\.\s]+) requires (?P<cfgd>[\S]+) (?P<cfgd_spec>range"
expr += r"\([\d\.\,\s]+\)( )?(include\([\d\.\,\s]+\))?( )?"
expr += r"(exclude\([\d\.\,\s]+\))?( )?|[\d\,\.\s]+)"
r = re.match(expr, line.strip("\n"))
return r.groupdict()
def convert_to_list(self, item, separator):
"""Converts a string into a list with a separator.
Args:
item: String that needs to be separated into a list by a given separator.
List item is also accepted but will take no effect.
separator: String with which the `item` will be splited.
Returns:
List that is a splited version of a given input string.
e.g. Input: `1.0, 2.0, 3.0` with `, ` separator
Output: [1.0, 2.0, 3.0]
"""
out = None
if not isinstance(item, list):
if "range" in item:
# If arg `item` is a single string, then create a list with just
# the item.
out = [item]
else:
# arg `item` can come in as the following:
# `1.0, 1.1, 1.2, 1.4`
# if requirements were defined without the `range()` format.
# In such a case, create a list separated by `separator` which is
# an empty string (' ') in this case.
out = item.split(separator)
for i in range(len(out)):
out[i] = out[i].replace(",", "")
# arg `item` is a list already.
else:
out = [item]
return out
def filter_line(self, line):
"""Removes `[` or `]` from the input line.
Args:
line: String that is a compatibility specification line from the `.ini`
config file.
Returns:
String that is a compatibility specification line without `[` and `]`.
"""
filtered = []
warn_msg = []
splited = line.split("\n")
# If arg `line` is empty, then requirement might be missing. Add
# to warning as this issue will be caught in _Reqs() initialization.
if not line and len(splited) < 1:
warn_msg = "[Warning] Empty line detected while filtering lines."
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# In general, first line in requirement definition will include `[`
# in the config file (.ini). Remove it.
if splited[0] == "[":
filtered = splited[1:]
elif "[" in splited[0]:
splited = splited[0].replace("[", "")
filtered = splited
# If `[` is missing, then it could be a formatting issue with
# config file (.ini.). Add to warning.
else:
warn_msg = "[Warning] Format error. `[` could be missing in "
warn_msg += "the config (.ini) file. (line = %s)" % str(line)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# In general, last line in requirement definition will include `]`
# in the config file (.ini). Remove it.
if filtered[-1] == "]":
filtered = filtered[:-1]
elif "]" in filtered[-1]:
filtered[-1] = six.ensure_str(filtered[-1]).replace("]", "")
# If `]` is missing, then it could be a formatting issue with
# config file (.ini.). Add to warning.
else:
warn_msg = "[Warning] Format error. `]` could be missing in "
warn_msg += "the config (.ini) file. (line = %s)" % str(line)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
return filtered
def in_range(self, ver, req):
"""Checks if a version satisfies a version and/or compatibility requirement.
Args:
ver: List whose first item is a config version that needs to be checked
for support status and version compatibility.
e.g. ver = [`1.0`]
req: `_Reqs` class instance that represents a configuration version and
compatibility specifications.
Returns:
Boolean output of checking if version `ver` meets the requirement
stored in `req` (or a `_Reqs` requirements class instance).
"""
# If `req.exclude` is not empty and `ver` is in `req.exclude`,
# no need to proceed to next set of checks as it is explicitly
# NOT supported.
if req.exclude is not None:
for v in ver:
if v in req.exclude:
return False
# If `req.include` is not empty and `ver` is in `req.include`,
# no need to proceed to next set of checks as it is supported and
# NOT unsupported (`req.exclude`).
include_checked = False
if req.include is not None:
for v in ver:
if v in req.include:
return True
include_checked = True
# If `req.range` is not empty, then `ver` is defined with a `range`
# syntax. Check whether `ver` falls under the defined supported
# range.
if req.range != [None, None]:
min_v = req.range[0] # minimum supported version
max_v = req.range[1] # maximum supported version
ver = ver[0] # version to compare
lg = _compare_versions(min_v, ver)["larger"] # `ver` should be larger
sm = _compare_versions(ver, max_v)["smaller"] # `ver` should be smaller
if lg in [ver, "equal"] and sm in [ver, "equal", "inf"]:
return True
else:
err_msg = "[Error] Version is outside of supported range. "
err_msg += "(config = %s, " % str(req.config)
err_msg += "version = %s, " % str(ver)
err_msg += "supported range = %s)" % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
else:
err_msg = ""
if include_checked:
# user config is not supported as per exclude, include, range
# specification.
err_msg = "[Error] Version is outside of supported range. "
else:
# user config is not defined in exclude, include or range. config file
# error.
err_msg = "[Error] Missing specification. "
err_msg += "(config = %s, " % str(req.config)
err_msg += "version = %s, " % str(ver)
err_msg += "supported range = %s)" % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
def _print(self, *args):
"""Prints compatibility check status and failure or warning messages.
Prints to console without using `logging`.
Args:
*args: String(s) that is one of:
[`failures`, # all failures
`successes`, # all successes
`failure_msgs`, # failure message(s) recorded upon failure(s)
`warning_msgs`] # warning message(s) recorded upon warning(s)
Raises:
Exception: If *args not in:
[`failures`, `successes`, `failure_msgs`, `warning_msg`]
"""
def _format(name, arr):
"""Prints compatibility check results with a format.
Args:
name: String that is the title representing list `arr`.
arr: List of items to be printed in a certain format.
"""
title = "### All Compatibility %s ###" % str(name)
tlen = len(title)
print("-"*tlen)
print(title)
print("-"*tlen)
print(" Total # of %s: %s\n" % (str(name), str(len(arr))))
if arr:
for item in arr:
detail = ""
if isinstance(item[1], list):
for itm in item[1]:
detail += str(itm) + ", "
detail = detail[:-2]
else:
detail = str(item[1])
print(" %s ('%s')\n" % (str(item[0]), detail))
else:
print(" No %s" % name)
print("\n")
for p_item in args:
if p_item == "failures":
_format("Failures", self.failures)
elif p_item == "successes":
_format("Successes", self.successes)
elif p_item == "failure_msgs":
_format("Failure Messages", self.error_msg)
elif p_item == "warning_msgs":
_format("Warning Messages", self.warning_msg)
else:
raise Exception(
"[Error] Wrong input provided for %s." % _get_func_name())
def check_compatibility(self):
"""Checks version and dependency compatibility for a given configuration.
`check_compatibility` immediately returns with `False` (or failure status)
if any child process or checks fail. For error and warning messages, either
print `self.(error_msg|warning_msg)` or call `_print` function.
Returns:
Boolean that is a status of the compatibility check result.
"""
# Check if all `Required` configs are found in user configs.
usr_keys = list(self.usr_config.keys())
for k in six.iterkeys(self.usr_config):
if k not in usr_keys:
err_msg = "[Error] Required config not found in user config."
err_msg += "(required = %s, " % str(k)
err_msg += "user configs = %s)" % str(usr_keys)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([k, err_msg])
return False
# Parse each user config and validate its compatibility.
overall_status = True
for config_name, spec in six.iteritems(self.usr_config):
temp_status = True
# Check under which section the user config is defined.
in_required = config_name in list(self.required.keys())
in_optional = config_name in list(self.optional.keys())
in_unsupported = config_name in list(self.unsupported.keys())
in_dependency = config_name in list(self.dependency.keys())
# Add to warning if user config is not specified in the config file.
if not (in_required or in_optional or in_unsupported or in_dependency):
warn_msg = "[Error] User config not defined in config file."
warn_msg += "(user config = %s)" % str(config_name)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
self.failures.append([config_name, warn_msg])
temp_status = False
else:
if in_unsupported:
if self.in_range(spec, self.unsupported[config_name]):
err_msg = "[Error] User config is unsupported. It is "
err_msg += "defined under 'Unsupported' section in the config file."
err_msg += " (config = %s, spec = %s)" % (config_name, str(spec))
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
if in_required:
if not self.in_range(spec, self.required[config_name]):
err_msg = "[Error] User config cannot be supported. It is not in "
err_msg += "the supported range as defined in the 'Required' "
err_msg += "section. (config = %s, " % config_name
err_msg += "spec = %s)" % str(spec)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
if in_optional:
if not self.in_range(spec, self.optional[config_name]):
err_msg = "[Error] User config cannot be supported. It is not in "
err_msg += "the supported range as defined in the 'Optional' "
err_msg += "section. (config = %s, " % config_name
err_msg += "spec = %s)" % str(spec)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
# If user config and version has a dependency, check both user
# config + version and dependency config + version are supported.
if in_dependency:
# Get dependency information. The information gets retrieved in the
# following format:
# [`config`, `config _Reqs()`, `dependency`, `dependency _Reqs()`]
dep_list = self.dependency[config_name]
if dep_list:
for rule in dep_list:
cfg = rule[0] # config name
cfg_req = rule[1] # _Reqs() instance for config requirement
dep = rule[2] # dependency name
dep_req = rule[3] # _Reqs() instance for dependency requirement
# Check if user config has a dependency in the following sequence:
# [1] Check user config and the config that has dependency
# are the same. (This is defined as `cfg_status`.)
# [2] Check if dependency is supported.
try:
cfg_name = self.usr_config[cfg]
dep_name = self.usr_config[dep]
cfg_status = self.in_range(cfg_name, cfg_req)
dep_status = self.in_range(dep_name, dep_req)
# If both status's are `True`, then user config meets dependency
# spec.
if cfg_status:
if not dep_status:
# throw error
err_msg = "[Error] User config has a dependency that cannot"
err_msg += " be supported. "
err_msg += "'%s' has a dependency on " % str(config_name)
err_msg += "'%s'." % str(dep)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
except KeyError:
err_msg = "[Error] Dependency is missing from `Required`. "
err_msg += "(config = %s, ""dep = %s)" % (cfg, dep)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
# At this point, all requirement related to the user config has been
# checked and passed. Append to `successes` list.
if temp_status:
self.successes.append([config_name, spec])
else:
overall_status = False
return overall_status
|
{
"content_hash": "e2bb63b933bf6216ea312f71160800a9",
"timestamp": "",
"source": "github",
"line_count": 887,
"max_line_length": 80,
"avg_line_length": 38.174746335963924,
"alnum_prop": 0.5690912849590974,
"repo_name": "Intel-Corporation/tensorflow",
"id": "7139118d1fcb9a0a31a5c55d30eac8e486055e4c",
"size": "34580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
"""Expose a TestCase class.
- TestCase: a basic Arrange, Act, Assert test case implementation
"""
import mock
class TestCase(object):
"""Arrange, Act, Assert test case.
Sub-classes implement test cases by *arranging* the environment in
the :meth:`.arrange` class method, perform the *action* in the
:meth:`.act` class method, and implement *assertions* as test
methods. The individual assertion methods have to be written in such
a way that the test runner in use finds them.
.. py:attribute:: allowed_exceptions
The exception or list of exceptions that the test case is
interested in capturing. An exception raised from :meth:`.act`
will be stored in :attr:`exception`.
.. py:attribute:: exception
The exception that was thrown during the action or ``None``.
"""
allowed_exceptions = ()
"""Catch this set of exception classes."""
@classmethod
def setUpClass(cls):
"""Arrange the environment and perform the action.
This method ensures that :meth:`.arrange` and :meth:`.act` are
invoked exactly once before the assertions are fired. If you do
find the need to extend this method, you should call this
implementation as the last statement in your extension method as
it will perform the action under test when it is called.
"""
cls.exception = None
cls._patches = []
cls.arrange()
try:
cls.act()
except cls.allowed_exceptions as exc:
cls.exception = exc
finally:
cls.destroy()
@classmethod
def tearDownClass(cls):
"""Stop any patches that have been created."""
for patcher in cls._patches:
patcher.stop()
@classmethod
def arrange(cls):
"""Arrange the testing environment.
Concrete test classes will probably override this method and
should invoke this implementation via ``super()``.
"""
pass
@classmethod
def destroy(cls):
"""Perform post-test cleanup.
Concrete tests classes may override this method if there are
actions that need to be performed after :meth:`.act` is called.
Subclasses should invoke this implementation via ``super()``.
This method is guaranteed to be called *after* the action under
test is invoked and before :meth:`.teardown_class`. It will be
called after any captured exception has been caught.
"""
pass
@classmethod
def patch(cls, target, **kwargs):
r"""Patch a named class or method.
:param str target: the dotted-name to patch
:returns: the result of starting the patch.
This method calls :func:`mock.patch` with *target* and
*\*\*kwargs*, saves the result, and returns the running patch.
"""
patcher = mock.patch(target, **kwargs)
patched = patcher.start()
cls._patches.append(patcher)
return patched
@classmethod
def patch_instance(cls, target, **kwargs):
r"""Patch a named class and return the created instance.
:param str target: the dotted-name of the class to patch
:returns: tuple of (patched class, patched instance)
This method calls :meth:`.patch` with *\*\*kwargs* to patch
*target* and returns a tuple containing the patched class as
well as the ``return_value`` attribute of the patched class.
This is useful if you want to patch a class and manipulate the
result of the code under test creating an instance of the class.
"""
patched_class = cls.patch(target, **kwargs)
return patched_class, patched_class.return_value
@classmethod
def act(cls):
"""The action to test.
**Subclasses are required to replace this method.**
"""
raise NotImplementedError
|
{
"content_hash": "499058d26ffc3a49a2ee8475b9f5ec73",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 73,
"avg_line_length": 31.04724409448819,
"alnum_prop": 0.635049454729901,
"repo_name": "dave-shawley/fluent-test",
"id": "f36a8a08eff722d4f2ccbb27aa06af9edc4fd38b",
"size": "3943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fluenttest/test_case.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "19548"
}
],
"symlink_target": ""
}
|
import pytest
import re
import capybara
class TestHaveNoneOfSelectors:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_is_false_if_any_of_the_given_locators_are_on_the_page(self, session):
assert session.has_none_of_selectors("xpath", "//p", "//a") is False
assert session.has_none_of_selectors("css", "p a#foo") is False
def test_is_true_if_none_of_the_given_locators_are_on_the_page(self, session):
assert session.has_none_of_selectors("xpath", "//abbr", "//td") is True
assert session.has_none_of_selectors("css", "p a#doesnotexist", "abbr") is True
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
assert session.has_none_of_selectors("p a#doesnotexist", "abbr")
assert not session.has_none_of_selectors("abbr", "p a#foo")
def test_respects_scopes_when_used_with_a_context(self, session):
with session.scope("//p[@id='first']"):
assert not session.has_none_of_selectors(".//a[@id='foo']")
assert session.has_none_of_selectors(".//a[@id='red']")
def test_respects_scopes_when_called_on_an_element(self, session):
el = session.find("//p[@id='first']")
assert not el.has_none_of_selectors(".//a[@id='foo']")
assert el.has_none_of_selectors(".//a[@id='red']")
def test_applies_the_options_to_all_locators(self, session):
assert not session.has_none_of_selectors("//p//a", text="Redirect")
assert session.has_none_of_selectors("//p", text="Doesnotexist")
def test_discards_all_matches_where_the_given_regexp_is_matched(self, session):
assert not session.has_none_of_selectors(
"//p//a", text=re.compile(r"re[dab]i", re.IGNORECASE), count=1)
assert session.has_none_of_selectors("//p//a", text=re.compile(r"Red$"))
@pytest.mark.requires("js")
def test_does_not_find_elements_if_they_appear_after_given_wait_duration(self, session):
session.visit("/with_js")
session.click_link("Click me")
assert session.has_none_of_selectors("css", "#new_field", "a#has-been-clicked", wait=0.1)
|
{
"content_hash": "6c83337f970f19c8bb46eedebdec8975",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 97,
"avg_line_length": 45.9375,
"alnum_prop": 0.6462585034013606,
"repo_name": "elliterate/capybara.py",
"id": "b5f92ccbed2f22f04a49d4ba88ddaef5a0e4591c",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capybara/tests/session/test_has_none_of_selectors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38254"
},
{
"name": "JavaScript",
"bytes": "5225"
},
{
"name": "Python",
"bytes": "573480"
}
],
"symlink_target": ""
}
|
from django.db import models
# Classes
##########
class Artist(models.Model):
name = models.CharField(max_length=100, unique=True)
def __unicode__(self):
return u"Artist #{}: {}".format(self.id, self.name)
@staticmethod
def getArtists():
artists = []
for artist in Artist.objects.all():
artists.append({"id": artist.id,
"name": artist.name})
return artists
@staticmethod
def getArtistTracks(artist_id):
# Check that the given artist exists - if it doesn't, raise an exception
if Artist.objects.filter(id=artist_id).exists():
items = []
for item in Media.objects.filter(artist=artist_id):
items.append(item.make_dict())
return items
else:
raise Artist.DoesNotExist("Artist #{} not found.".format(artist_id))
class Album(models.Model):
name = models.CharField(max_length=100, unique=True)
coverurl = models.URLField()
def __unicode__(self):
return u"Album #{}: {}".format(self.id, self.name)
@staticmethod
def getAlbums():
albums = []
for album in Album.objects.all():
albums.append({"id": album.id,
"name": album.name,
"coverurl": album.coverurl})
return albums
@staticmethod
def getAlbumTracks(album_id):
# Check that the given album exists - if it doesn't, raise an exception
if Album.objects.filter(id=album_id).exists():
items = []
for item in Media.objects.filter(album=album_id):
items.append(item.make_dict())
return items
else:
raise Album.DoesNotExist("Album #{} not found.".format(album_id))
class Media(models.Model):
title = models.CharField(max_length=127)
artist = models.ForeignKey(Artist)
album = models.ForeignKey(Album)
length = models.FloatField(help_text="Track length in seconds, floating point")
original_source = models.FilePathField()
scan_date = models.DateTimeField(auto_now=True)
def __unicode__(self):
return u"#{}: {} ({}) - {}".format(self.id, self.album.name, self.artist.name, self.title)
def make_dict(self):
"""Create a dict with commonly used data, suitable for in (e.g.) playlists"""
# First sort the media sources, pushing transcodes to the back
sources = [s.make_dict() for s in self.mediasource_set.all()]
sources.sort(key=lambda s: ".transcode" in s["url"])
return {"id": self.id,
"title": self.title,
"artist": self.artist.name,
"album": self.album.name,
"length": self.length,
"sources": sources,
"poster": self.album.coverurl}
# Data source API helper methods
@staticmethod
def getFullLibrary():
items = []
for item in Media.objects.all():
items.append(item.make_dict())
return items
@staticmethod
def getDetails(media_id):
# Get the common data and add all the rest of the data stored
media = Media.objects.get(pk=media_id)
details = media.make_dict()
details.update({"scan_date": media.scan_date.isoformat()})
return details
class MediaSource(models.Model):
media = models.ForeignKey(Media)
url = models.URLField()
path = models.FilePathField()
mime = models.CharField(max_length=100)
transcode = models.BooleanField()
def __unicode__(self):
return "#{}: {} - {}".format(self.id, self.url, self.mime)
def make_dict(self):
return {"url": self.url,
"mime": self.mime,
"transcode": self.transcode}
class Playlist(models.Model):
items = models.ManyToManyField(Media)
name = models.CharField(max_length=63)
def __unicode__(self):
return u"Playlist #{}: {} ({} items)".format(self.id, self.name, self.items.count())
# Data source API helper methods
@staticmethod
def getPlaylist(playlist_id):
playlistObj = Playlist.objects.get(pk=playlist_id)
playlist = {"id": playlistObj.id,
"name": playlistObj.name,
"items": []}
for item in playlistObj.items.all():
playlist["items"].append(item.make_dict())
return playlist
@staticmethod
def getPlaylistList():
lists = Playlist.objects.all()
listout = []
for playlist in lists:
listout.append({"id": playlist.id,
"name": playlist.name})
return listout
@staticmethod
def savePlaylist(idList, name):
items = Media.objects.filter(pk__in=idList)
playlist = Playlist()
playlist.save()
playlist.items.add(*items)
playlist.name = name
playlist.save()
return playlist.id
|
{
"content_hash": "de5491ae00ec8615ad4d25a9130a18ee",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 98,
"avg_line_length": 33.329032258064515,
"alnum_prop": 0.5586527293844367,
"repo_name": "BitLooter/Cadence",
"id": "73a3fea5b6a7721df45652541e5b3940e5f16cad",
"size": "5166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cadence/apps/backend/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2946"
},
{
"name": "JavaScript",
"bytes": "31595"
},
{
"name": "Makefile",
"bytes": "5568"
},
{
"name": "Python",
"bytes": "59088"
},
{
"name": "Shell",
"bytes": "5904"
}
],
"symlink_target": ""
}
|
"""
========================
Cycle finding algorithms
========================
"""
# Copyright (C) 2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import *
from collections import defaultdict
__all__ = ['cycle_basis','simple_cycles']
__author__ = "\n".join(['Jon Olav Vik <jonovik@gmail.com>',
'Aric Hagberg <hagberg@lanl.gov>'])
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cycle_basis(G,root=None):
""" Returns a list of cycles which form a basis for cycles of G.
A basis for cycles of a network is a minimal collection of
cycles such that any cycle in the network can be written
as a sum of cycles in the basis. Here summation of cycles
is defined as "exclusive or" of the edges. Cycle bases are
useful, e.g. when deriving equations for electric circuits
using Kirchhoff's Laws.
Parameters
----------
G : NetworkX Graph
root : node, optional
Specify starting node for basis.
Returns
-------
A list of cycle lists. Each cycle list is a list of nodes
which forms a cycle (loop) in G.
Examples
--------
>>> G=nx.Graph()
>>> G.add_cycle([0,1,2,3])
>>> G.add_cycle([0,3,4,5])
>>> print(nx.cycle_basis(G,0))
[[3, 4, 5, 0], [1, 2, 3, 0]]
Notes
-----
This is adapted from algorithm CACM 491 [1]_.
References
----------
.. [1] Paton, K. An algorithm for finding a fundamental set of
cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
See Also
--------
simple_cycles
"""
# if G.is_directed():
# e='cycle_basis() not implemented for directed graphs'
# raise Exception(e)
# if G.is_multigraph():
# e='cycle_basis() not implemented for multigraphs'
# raise Exception(e)
gnodes=set(G.nodes())
cycles=[]
while gnodes: # loop over connected components
if root is None:
root=gnodes.pop()
stack=[root]
pred={root:root}
used={root:set()}
while stack: # walk the spanning tree finding cycles
z=stack.pop() # use last-in so cycles easier to find
zused=used[z]
for nbr in G[z]:
if nbr not in used: # new node
pred[nbr]=z
stack.append(nbr)
used[nbr]=set([z])
elif nbr is z: # self loops
cycles.append([z])
elif nbr not in zused:# found a cycle
pn=used[nbr]
cycle=[nbr,z]
p=pred[z]
while p not in pn:
cycle.append(p)
p=pred[p]
cycle.append(p)
cycles.append(cycle)
used[nbr].add(z)
gnodes-=set(pred)
root=None
return cycles
@not_implemented_for('undirected')
def simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
An simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
A list of circuits, where each circuit is a list of nodes, with the first
and last node being the same.
Example:
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> nx.simple_cycles(G)
[[0, 0], [0, 1, 2, 0], [0, 2, 0], [1, 2, 1], [2, 2]]
See Also
--------
cycle_basis (for undirected graphs)
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is O((n+e)(c+1)) for n nodes, e edges and c
elementary circuits.
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
See Also
--------
cycle_basis
"""
# Jon Olav Vik, 2010-08-09
def _unblock(thisnode):
"""Recursively unblock and remove nodes from B[thisnode]."""
if blocked[thisnode]:
blocked[thisnode] = False
while B[thisnode]:
_unblock(B[thisnode].pop())
def circuit(thisnode, startnode, component):
closed = False # set to True if elementary path is closed
path.append(thisnode)
blocked[thisnode] = True
for nextnode in component[thisnode]: # direct successors of thisnode
if nextnode == startnode:
result.append(path + [startnode])
closed = True
elif not blocked[nextnode]:
if circuit(nextnode, startnode, component):
closed = True
if closed:
_unblock(thisnode)
else:
for nextnode in component[thisnode]:
if thisnode not in B[nextnode]: # TODO: use set for speedup?
B[nextnode].append(thisnode)
path.pop() # remove thisnode from path
return closed
# if not G.is_directed():
# raise nx.NetworkXError(\
# "simple_cycles() not implemented for undirected graphs.")
path = [] # stack of nodes in current path
blocked = defaultdict(bool) # vertex: blocked from search?
B = defaultdict(list) # graph portions that yield no elementary circuit
result = [] # list to accumulate the circuits found
# Johnson's algorithm requires some ordering of the nodes.
# They might not be sortable so we assign an arbitrary ordering.
ordering=dict(zip(G,range(len(G))))
for s in ordering:
# Build the subgraph induced by s and following nodes in the ordering
subgraph = G.subgraph(node for node in G
if ordering[node] >= ordering[s])
# Find the strongly connected component in the subgraph
# that contains the least node according to the ordering
strongcomp = nx.strongly_connected_components(subgraph)
mincomp=min(strongcomp,
key=lambda nodes: min(ordering[n] for n in nodes))
component = G.subgraph(mincomp)
if component:
# smallest node in the component according to the ordering
startnode = min(component,key=ordering.__getitem__)
for node in component:
blocked[node] = False
B[node][:] = []
dummy=circuit(startnode, startnode, component)
return result
|
{
"content_hash": "b8e97e42c970f1f27f12fd69ef1af73d",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 80,
"avg_line_length": 33.309178743961354,
"alnum_prop": 0.5628716461203771,
"repo_name": "lthurlow/Network-Grapher",
"id": "d302ba6e96be7a432f9ea69dc23579885ab561d8",
"size": "6895",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "proj/external/networkx-1.7/build/lib.linux-i686-2.7/networkx/algorithms/cycles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6550"
}
],
"symlink_target": ""
}
|
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
def score(dice):
score = 0
# group results
results = {1:0, 2:0 ,3:0 ,4:0 ,5:0 ,6:0}
for die in dice:
results[die] += 1
# triple ones
if(results[1] > 2):
score += 1000
results[1] -= 3
# triples other than ones
for die in dice:
if results[die] > 2:
score += die * 100
results[die] -= 3
# ones and fives not part of a triple
score += results[1] * 100
score += results[5] * 50
return score
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
|
{
"content_hash": "41e5b33c34329f4071f24a85c007ad2a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 28.305882352941175,
"alnum_prop": 0.6122194513715711,
"repo_name": "r3dshirt/Python-Koans",
"id": "661f53afe0a377bb9d4242d42e232ba575f4c9f2",
"size": "2453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python 3/koans/about_scoring_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316252"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
import pytest
from molecule.model import schema_v2
@pytest.fixture
def _model_dependency_section_data():
return {
'dependency': {
'name': 'galaxy',
'enabled': True,
'options': {
'foo': 'bar',
},
'env': {
'FOO': 'foo',
'FOO_BAR': 'foo_bar',
},
}
}
@pytest.mark.parametrize(
'_config', ['_model_dependency_section_data'], indirect=True)
def test_dependency(_config):
assert {} == schema_v2.validate(_config)
@pytest.fixture
def _model_dependency_errors_section_data():
return {
'dependency': {
'name': int(),
'command': None,
'enabled': str(),
'options': [],
'env': {
'foo': 'foo',
'foo-bar': 'foo-bar',
},
}
}
@pytest.mark.parametrize(
'_config', ['_model_dependency_errors_section_data'], indirect=True)
def test_dependency_has_errors(_config):
x = {
'dependency': [{
'name': ['must be of string type'],
'enabled': ['must be of boolean type'],
'options': ['must be of dict type'],
'env': [{
'foo': ["value does not match regex '^[A-Z0-9_-]+$'"],
'foo-bar': ["value does not match regex '^[A-Z0-9_-]+$'"],
}],
}]
}
assert x == schema_v2.validate(_config)
@pytest.fixture
def _model_dependency_allows_galaxy_section_data():
return {
'dependency': {
'name': 'galaxy',
}
}
@pytest.fixture
def _model_dependency_allows_gilt_section_data():
return {
'dependency': {
'name': 'gilt',
}
}
@pytest.fixture
def _model_dependency_allows_shell_section_data():
return {
'dependency': {
'name': 'shell',
}
}
@pytest.mark.parametrize(
'_config', [
('_model_dependency_allows_galaxy_section_data'),
('_model_dependency_allows_gilt_section_data'),
('_model_dependency_allows_shell_section_data'),
],
indirect=True)
def test_dependency_allows_shell_name(_config):
assert {} == schema_v2.validate(_config)
@pytest.fixture
def _model_dependency_shell_errors_section_data():
return {
'dependency': {
'name': 'shell',
'command': None,
}
}
@pytest.mark.parametrize(
'_config', ['_model_dependency_shell_errors_section_data'], indirect=True)
def test_dependency_shell_has_errors(_config):
x = {'dependency': [{'command': ['null value not allowed']}]}
assert x == schema_v2.validate(_config)
|
{
"content_hash": "89a87a0be6dfc27a0c84cc624c6a9343",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 23.28448275862069,
"alnum_prop": 0.5138837467604591,
"repo_name": "metacloud/molecule",
"id": "fe505e59794c57e5a6d43e1e8160ee05a4a032eb",
"size": "3821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/model/v2/test_dependency_section.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1082"
},
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "Python",
"bytes": "767920"
},
{
"name": "Ruby",
"bytes": "536"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from google.cloud.dataproc_v1 import ClusterControllerClient
from google.cloud.dataproc_v1 import JobControllerClient
from google.cloud.dataproc_v1 import WorkflowTemplateServiceClient
from google.cloud.dataproc_v1 import enums
from google.cloud.dataproc_v1 import types
__all__ = (
"enums",
"types",
"ClusterControllerClient",
"JobControllerClient",
"WorkflowTemplateServiceClient",
)
|
{
"content_hash": "1691f3becb1c702f97fd94151a3160f4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 28,
"alnum_prop": 0.78125,
"repo_name": "tseaver/google-cloud-python",
"id": "772a0dbfd0de481827ea05a2c4e64022ecb8b388",
"size": "1051",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dataproc/google/cloud/dataproc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
}
|
import hexchat
import threading
import twitch.hook, twitch.channel, twitch.logger, twitch.settings
log = twitch.logger.get()
# Don't show the "changed topic" message (but do update the topic bar)
def topic_print_cb(word, word_eol, msgtype):
return hexchat.EAT_ALL
# work around hexchat bugs (likes to segfault when using RECV in other threads)
topic_changes = []
def topic_update_cb(userdata):
try:
while len(topic_changes) > 0:
log.debug("%d updates queued" % len(topic_changes))
topic = topic_changes.pop(0)
log.debug("topic change: %s" % str(topic))
cmd = "RECV :twitch.py!twitch@twitch.tv TOPIC #{0} :{1}"\
.format(topic["channel"], topic["topic"])
log.debug(cmd)
hexchat.command(cmd)
log.debug("Posted topic change OK")
except:
log.exception("Unhandled exception in twitch.topic_update_cb")
finally:
return True # keep timer running
# Thread callback to update a channel
def update_channel_thread(chan):
try:
if chan.update() and chan.makeTopic():
log.debug("queue update for channel %s" % chan.name)
topic_changes.append({
"channel": chan.name,
"topic" : chan.topic,
})
except:
log.exception("Unhandled exception in %s" %
threading.current_thread().name)
# Periodically update channel info
def update_channels_cb(userdata):
try:
for name in twitch.channel.channels:
chan = twitch.channel.channels[name]
if chan.isJoined():
log.debug("Update channel %s" % name)
t = threading.Thread(
target = update_channel_thread,
args = (twitch.channel.channels[name],),
name = "twitch.update_channel_thread(%s)" % name,
daemon = True)
t.start()
except:
log.exception("Unhandled exception in twitch.update_channels_cb")
finally:
return True # keep timer running
# Manually update a channel
def update_channel(chan):
log.debug("manually queue update for channel %s" % chan.name)
topic_changes.append({
"channel": chan.name,
"topic" : chan.topic,
})
def run():
# XXX if setting is changed, reset the timer
twitch.hook.prnt('Topic Change', topic_print_cb)
twitch.hook.timer(1000, topic_update_cb)
twitch.hook.timer(twitch.settings.get('topic.refreshinterval') * 1000,
update_channels_cb)
|
{
"content_hash": "ae96cf55a02ae23211472429f381484d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 26.927710843373493,
"alnum_prop": 0.6921700223713646,
"repo_name": "RenaKunisaki/hexchat-twitch",
"id": "9ead958a7da5188ea00a0712492768eef3546a67",
"size": "2235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitch/topic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59370"
}
],
"symlink_target": ""
}
|
from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'text',)
|
{
"content_hash": "11619e28282dc188cd0667687bb1ffe9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 35,
"avg_line_length": 17.555555555555557,
"alnum_prop": 0.6392405063291139,
"repo_name": "manojpandey/simple-blog",
"id": "3a30f8d31eff5cada284b305d879eb090d19b37c",
"size": "158",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "blog/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "812"
},
{
"name": "HTML",
"bytes": "2084"
},
{
"name": "Python",
"bytes": "8497"
}
],
"symlink_target": ""
}
|
""" Testing suite for the PyTorch CLIP model. """
import inspect
import os
import tempfile
import unittest
import requests
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import CLIPConfig, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPVisionConfig, CLIPVisionModel
from transformers.models.clip.modeling_clip import CLIP_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import CLIPProcessor
class CLIPVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = CLIPVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
return config, pixel_values
def create_and_check_model(self, config, pixel_values):
model = CLIPVisionModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class CLIPVisionModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (CLIPVisionModel,) if is_torch_available() else ()
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = CLIPVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
# CLIP does not use inputs_embeds
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = num_patches + 1
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# CLIP has a different seq_length
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = num_patches + 1
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_training(self):
pass
def test_training_gradient_checkpointing(self):
pass
# skip this test as CLIPVisionModel has no base class and is
# not available in MODEL_MAPPING
def test_save_load_fast_init_from_base(self):
pass
# skip this test as CLIPVisionModel has no base class and is
# not available in MODEL_MAPPING
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CLIPVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class CLIPTextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = CLIPTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
return config, input_ids, input_mask
def create_and_check_model(self, config, input_ids, input_mask):
model = CLIPTextModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class CLIPTextModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (CLIPTextModel,) if is_torch_available() else ()
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = CLIPTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_training(self):
pass
def test_training_gradient_checkpointing(self):
pass
def test_inputs_embeds(self):
# CLIP does not use inputs_embeds
pass
# skip this test as CLIPTextModel has no base class and is
# not available in MODEL_MAPPING
def test_save_load_fast_init_from_base(self):
pass
# skip this test as CLIPTextModel has no base class and is
# not available in MODEL_MAPPING
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CLIPTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class CLIPModelTester:
def __init__(self, parent, is_training=True):
self.parent = parent
self.text_model_tester = CLIPTextModelTester(parent)
self.vision_model_tester = CLIPVisionModelTester(parent)
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = CLIPConfig.from_text_vision_configs(text_config, vision_config, projection_dim=64)
return config, input_ids, attention_mask, pixel_values
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = CLIPModel(config).to(torch_device).eval()
result = model(input_ids, pixel_values, attention_mask)
self.parent.assertEqual(
result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"return_loss": True,
}
return config, inputs_dict
@require_torch
class CLIPModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (CLIPModel,) if is_torch_available() else ()
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = CLIPModelTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# hidden_states are tested in individual model tests
def test_hidden_states_output(self):
pass
# input_embeds are tested in individual model tests
def test_inputs_embeds(self):
pass
# tested in individual model tests
def test_retain_grad_hidden_states_attentions(self):
pass
# CLIPModel does not have input/output embeddings
def test_model_common_attributes(self):
pass
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
try:
input_ids = inputs_dict["input_ids"]
pixel_values = inputs_dict["pixel_values"] # CLIP needs pixel_values
traced_model = torch.jit.trace(model, (input_ids, pixel_values))
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
@slow
def test_model_from_pretrained(self):
for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CLIPModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
class CLIPModelIntegrationTest(unittest.TestCase):
@slow
def test_inference(self):
model_name = "openai/clip-vit-base-patch32"
model = CLIPModel.from_pretrained(model_name).to(torch_device)
processor = CLIPProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(
text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt"
).to(torch_device)
# forward pass
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.Tensor([[24.5056, 18.8076]]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
|
{
"content_hash": "0baf095368c52d68271571d5b105a29b",
"timestamp": "",
"source": "github",
"line_count": 547,
"max_line_length": 119,
"avg_line_length": 37.09323583180987,
"alnum_prop": 0.6325776244455397,
"repo_name": "huggingface/pytorch-transformers",
"id": "c5ab9416d152e01d5fd9da2d9c118daa629d9346",
"size": "20916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_modeling_clip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
}
|
"""image_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from rest_framework import routers
from rest_framework_nested import routers as nrouters
from django.conf import settings
from django.conf.urls.static import static
from image_app import views
from image_app.views import user
from image_app.views import image
from image_app.views.image import label as i_label
from image_app.views import label
from image_app.views import download
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'user', user.UserViewSet, base_name='user')
router.register(r'image', image.ImageViewSet, base_name='image')
router.register(r'label', label.LabelViewSet, base_name='label')
router.register(r'download', download.ImageDownloadViewSet, base_name='download')
label_router = nrouters.NestedSimpleRouter(router, r'image', lookup='image', trailing_slash=False)
label_router.register(r'label', i_label.LabelViewSet, base_name='image-label')
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^api/(?P<version>(v1))/', include([
url(r'^', include(router.urls)),
url(r'^', include(label_router.urls)),
])),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'auth/', include('knox.urls')),
url(r'^media/.*$', views.ImageView.as_view(), name='media'),
# pass everything else through to Angular
url('^.*$', views.IndexView.as_view(), name='index'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
{
"content_hash": "454bbb30cfa4cef7095bb1e26d24226c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 98,
"avg_line_length": 39.67857142857143,
"alnum_prop": 0.7245724572457246,
"repo_name": "pstrinkle/drf-image-app",
"id": "57e2b61e709aa25118efc6bfe19814e0fcf6564b",
"size": "2222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14358"
},
{
"name": "JavaScript",
"bytes": "34645"
},
{
"name": "Makefile",
"bytes": "93"
},
{
"name": "Python",
"bytes": "33752"
},
{
"name": "Shell",
"bytes": "1138"
}
],
"symlink_target": ""
}
|
class Node:
#initializes Node based on JSON data
#param metro - JSON city data
#param edges - edges associated with Graph
def __init__(self, metro, edges):
self.code = metro['code']
self.name = metro['name']
self.country = metro['country']
self.continent = metro['continent']
self.timezone = metro['timezone']
self.coords = metro['coordinates']
if(metro['coordinates'].get('S') != None):
self.latitude = metro['coordinates']['S']
else:
self.latitude = metro['coordinates']['N']
if(metro['coordinates'].get('E') != None):
self.longitude = metro['coordinates']['E']
else:
self.longitude = metro['coordinates']['W']
self.population = metro['population']
self.region = metro['region']
self.adjacent_cities = [] #list of keys (cities) that correspond to values (distances)
self.get_adjacent_cities(edges)
#gets a list of all cities adjacent to this node and updates member variable
#param edges - edges associated with Graph
def get_adjacent_cities(self, edges):
i = 0
for Edge in edges:
if edges[i].home == self.code:
self.adjacent_cities.append([edges[i].dest, edges[i].distance])
i+=1
|
{
"content_hash": "8a9cbfd47d7976b9ba564e35f178a2ee",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 94,
"avg_line_length": 36.97222222222222,
"alnum_prop": 0.5867768595041323,
"repo_name": "anneomcl/FlightNetwork",
"id": "52c5ea0cd395da4b498fc90c7db8c2b82ff1c408",
"size": "1331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37353"
}
],
"symlink_target": ""
}
|
import argparse
import csv
import sys
import avro.schema
from collections import namedtuple
from avro.datafile import DataFileReader, DataFileWriter
from avro.io import DatumReader, DatumWriter
################################################################################
#
# CLI arg parsing
#
################################################################################
parser = argparse.ArgumentParser(description="CSV-to-Avro Converter")
parser.add_argument("-f", "--filename", help="Path to CSV input file");
parser.add_argument("-s", "--schemafile", help="Path to Avro schema file");
parser.add_argument("-o", "--output", help="Path to Avro output destination");
args = parser.parse_args();
fields = ("playerID", "yearID", "stint", "teamID", "lgID",
"W", "L", "G", "GS", "CG", "SHO", "SV", "IPouts", "H",
"ER", "HR", "BB", "SO", "BAOpp", "ERA", "IBB", "WP",
"HBP", "BK", "BFP", "GF", "R", "SH", "SF", "GIDP")
################################################################################
#
# User-Defined Functions
#
################################################################################
# Going this (named tuple) route since data type
# conversion seemed marginally less hairy here
# than using a dict. That said, there's probably
# A Better Way (tm) to do this such that field
# names and data types aren't hardcoded, but it
# works well enough in the context of this specific
# example.
class DataReader(namedtuple('Player', fields)):
@classmethod
def parse(dataType, row):
row = list(row)
row[1] = int(row[1]) if row[1] else None
row[2] = int(row[2]) if row[2] else None
row[5] = int(row[5]) if row[5] else None
row[6] = int(row[6]) if row[6] else None
row[7] = int(row[7]) if row[7] else None
row[8] = int(row[8]) if row[8] else None
row[9] = int(row[9]) if row[9] else None
row[10] = int(row[10]) if row[10] else None
row[11] = int(row[11]) if row[11] else None
row[12] = int(row[12]) if row[12] else None
row[13] = int(row[13]) if row[13] else None
row[14] = int(row[14]) if row[14] else None
row[15] = int(row[15]) if row[15] else None
row[16] = int(row[16]) if row[16] else None
row[17] = int(row[17]) if row[17] else None
row[18] = float(row[18]) if row[18] else None
row[19] = float(row[19]) if row[19] else None
row[20] = int(row[20]) if row[20] else None
row[21] = int(row[21]) if row[21] else None
row[22] = int(row[22]) if row[22] else None
row[23] = int(row[23]) if row[23] else None
row[24] = int(row[24]) if row[24] else None
row[25] = int(row[25]) if row[25] else None
row[26] = int(row[26]) if row[26] else None
row[27] = int(row[27]) if row[27] else None
row[28] = int(row[28]) if row[28] else None
row[29] = int(row[29]) if row[29] else None
return dataType(*row)
def read_data(path):
with open(path, 'rU') as data:
data.readline()
reader = csv.reader(data)
for row in map(DataReader.parse, reader):
yield row
def parse_schema(path):
with open(path, 'r') as schema:
return avro.schema.Parse(schema.read())
# There's no compelling reason to convert our
# CSV to an Avro binary other than I don't know
# as much about the file format as I'd like and
# was curious. Since these data are coming to us
# already in a columnar format and we're writing
# SQL-like queries against them, it would probably
# make more sense to convert the data to Parquet
# if we're optimizing for performance.
def convert_to_avro(records, schema, output):
schema = parse_schema(schema)
with open(output, 'wb') as out:
writer = DataFileWriter(out, DatumWriter(), schema)
for record in records:
record = dict((field, getattr(record, field)) for field in record._fields)
writer.append(record)
writer.close()
################################################################################
#
# Dataset ingestion
#
################################################################################
data = read_data(args.filename)
################################################################################
#
# CSV-To-Avro Conversion
#
################################################################################
convert_to_avro(data, args.schemafile, args.output)
|
{
"content_hash": "1c1c30adbe62a3412e49bebaec52403f",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 86,
"avg_line_length": 38.205128205128204,
"alnum_prop": 0.5308724832214765,
"repo_name": "faulconbridge/aver-spark",
"id": "b500abc126a1fa4dc75b11f18115db334eca6f0d",
"size": "4666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/csvToAvro.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5809"
},
{
"name": "Shell",
"bytes": "6898"
}
],
"symlink_target": ""
}
|
"""
Sun Oct 12 11:38:18 IDT 2014
by xorpd.
A script for auto generating the xorpd's website.
"""
from mako.template import Template
from mako.lookup import TemplateLookup
import os
import shutil
import lib.utils
class MakeWebsiteError(Exception): pass
# The content's directory name:
CONTENT_DIR = "content"
# The output's directory name:
OUTPUT_DIR = "output"
class ExceptStaticWeber(Exception):
pass
class ExceptInvalidExtension(ExceptStaticWeber):
pass
def change_extension(filename,new_ext):
"""
Change the extension of a file to be new_ext
"""
last_dot = filename.rfind(".")
return filename[:last_dot] + "." + new_ext
def get_extension(filename):
"""
Get the extension of a file (What comes right after the last dot).
"""
return filename.split(".")[-1]
def clean_empty_dirs(root_dir,ignore_prefixes=["."]):
"""
Check the directory tree for any empty directories, or directories that
contain only empty directories (etc.)
Then delete any such directories.
Do not delete inside any directories which begin with one of the
ignore_prefixes.
"""
# We don't get into directories which begin with
# one of the ignore prefixes:
for iprefix in ignore_prefixes:
if os.path.basename(root_dir).startswith(iprefix):
return
files = os.listdir(root_dir)
for f in files:
new_root = os.path.join(root_dir,f)
if os.path.isdir(new_root):
clean_empty_dirs(new_root)
# After some deleting, we get again the list of contents.
# Note that this will not be the same list from the first time.
files = os.listdir(root_dir)
if len(files) == 0:
# If there are no files inside the directory, remove it and exit:
os.rmdir(root_dir)
class Website():
def __init__(self,path):
# Load the path of the website:
self.website_path = path
def build_website(self):
# Build the path of the content folder:
content_path = os.path.join(self.website_path,CONTENT_DIR)
# Build the path of the output folder:
output_path = os.path.join(self.website_path,OUTPUT_DIR)
# Directory lookup.
# This way include or inherit directive from any of the mako templates
# doesn't have to specify the full path.
wlookup = TemplateLookup(directories=[content_path])
# Remove output directory if it exists:
# try:
# shutil.rmtree(output_path)
# except FileNotFoundError:
# pass
# Iterate over all files inside the content directory, to find MAKO
# templates to render:
for root,dirs,files in os.walk(content_path):
# Copy to the equivalent at the output directory:
rel_root = os.path.relpath(root,content_path)
# Get equivalent path inside output directory:
root_output = os.path.join(output_path,rel_root)
# Create equivalent folder if necessary:
if not os.path.exists(root_output):
os.makedirs(root_output)
for fl in files:
# Get full path inside content directory:
fl_path = os.path.join(root,fl)
fl_ext = lib.utils.get_extension(fl)
props = lib.utils.get_ext_props(fl_ext)
if props.should_render:
# Build a template:
fl_tmp = Template(filename=fl_path,lookup=wlookup)
# Get the filename as target_ext extensioned file:
fl_with_ext = lib.utils.change_extension(fl,props.target_ext)
fl_with_ext_output = os.path.join(root_output,fl_with_ext)
try:
# Render the template:
res_render = fl_tmp.render(my_filename=fl,\
my_content_dir=content_path,\
my_output_dir=output_path,\
my_rel_dir=rel_root)
except Exception as e:
raise MakeWebsiteError('Failed rendering '
'{}'.format(fl)) from e
if props.render_output:
# Write the template's rendering result to a file at
# the output directory tree:
with open(fl_with_ext_output,"w") as fw:
fw.write(res_render)
continue
if props.should_copy:
# We copy the file to the destination folder:
# Get equivalent path inside output directory:
fl_output = os.path.join(root_output,fl)
# Copy to output directory:
shutil.copyfile(fl_path,fl_output)
# Continue to the next file:
continue
# Clean any empty directories inside output:
clean_empty_dirs(output_path,ignore_prefixes=["."])
def go():
wb = Website(".")
wb.build_website()
if __name__ == "__main__":
go()
|
{
"content_hash": "e4dc43b5c8959d02e12ac44747733b8c",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 81,
"avg_line_length": 31.43030303030303,
"alnum_prop": 0.5713459313536444,
"repo_name": "xorpd/static_weber",
"id": "e155402c94beb9d80968eeab9cc2a66dc08c2f60",
"size": "5186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_website.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4625"
},
{
"name": "Mako",
"bytes": "7060"
},
{
"name": "Python",
"bytes": "21784"
}
],
"symlink_target": ""
}
|
from pathlib import Path
import asyncpgsa
import django
import pytest
import yaml
from aiohttp.web import Application
from aioworkers.core.config import Config
from aioworkers.core.context import Context
from django.core.management import call_command
from dvhb_hybrid import BASE_DIR
TESTS_DIR = Path(__file__).parent
# Django settings
SECRET_KEY = '123'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'dvhb_hybrid.users',
'dvhb_hybrid.mailer',
'dvhb_hybrid.user_action_log',
'tests',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'dvhb_hybrid',
}
}
class Conf(Config):
def load_yaml(self, s):
self.update(yaml.load(s))
@pytest.fixture
def config():
c = Conf()
c.load_plugins(force=True)
c.load(
TESTS_DIR / 'config.yaml',
)
return c
@pytest.fixture
def context(config, loop):
with Context(config, loop=loop) as ctx:
yield ctx
def pytest_configure():
django.setup()
@pytest.fixture
def app(context):
yield context.app
@pytest.fixture
def cli(app, test_client):
# TODO Rename (cli is command line interface)
async def create_client():
client = await test_client(app)
return client
return create_client
@pytest.fixture(scope='session')
def django_db_setup(django_db_setup, django_db_blocker):
"""
Creates and initializes test DB
"""
names = []
for i in BASE_DIR.glob('*/fixtures/*yaml'):
# TODO: Split test fixtures
# Do not import fixtures from users app
if i.parent.parent.name == 'users':
continue
names.append(i.with_suffix('').name)
with django_db_blocker.unblock():
call_command('loaddata', *names)
|
{
"content_hash": "8c12b6b017e5385e4653880826e18ee6",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 56,
"avg_line_length": 20.942528735632184,
"alnum_prop": 0.650384193194292,
"repo_name": "dvhbru/dvhb-hybrid",
"id": "ecc3430af4b37af007089e10038e07b04ae80079",
"size": "1822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177082"
}
],
"symlink_target": ""
}
|
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.domain.definitions.retail_decisions_cc_fraud_check_output import RetailDecisionsCCFraudCheckOutput
from ingenico.connect.sdk.domain.definitions.validation_bank_account_output import ValidationBankAccountOutput
class ResultDoRiskAssessment(DataObject):
__category = None
__result = None
__retaildecisions_cc_fraud_check_output = None
__validation_bank_account_output = None
@property
def category(self):
"""
| The Risk Services category with the following possible values:
* retaildecisionsCCFraudCheck - checks performed by Retail Decisions
* globalcollectBlacklistCheckCC - Checked against the blacklist on the GlobalCollect platform
* authorizationCheck - 0$ auth card account validation check
* ddFraudCheck - Check performed for German market via InterCard
* validationbankAccount - Bank account details are algorithmically checked if they could exist
* globalcollectBlacklistCheckDD - Checked against the blacklist on the GlobalCollect platform
Type: str
"""
return self.__category
@category.setter
def category(self, value):
self.__category = value
@property
def result(self):
"""
| Risk service result with the following possible results:
* accepted - Based on the checks performed the transaction can be accepted
* challenged - Based on the checks performed the transaction should be manually reviewed
* denied - Based on the checks performed the transaction should be rejected
* no-advice - No fraud check was requested/performed
* error - The fraud check resulted in an error and the fraud check was thus not performed
Type: str
"""
return self.__result
@result.setter
def result(self, value):
self.__result = value
@property
def retaildecisions_cc_fraud_check_output(self):
"""
| Object containing the results of the fraud checks performed by Retail Decisions
Type: :class:`ingenico.connect.sdk.domain.definitions.retail_decisions_cc_fraud_check_output.RetailDecisionsCCFraudCheckOutput`
"""
return self.__retaildecisions_cc_fraud_check_output
@retaildecisions_cc_fraud_check_output.setter
def retaildecisions_cc_fraud_check_output(self, value):
self.__retaildecisions_cc_fraud_check_output = value
@property
def validation_bank_account_output(self):
"""
| Object containing the results of the fraud checks performed on the bank account data
Type: :class:`ingenico.connect.sdk.domain.definitions.validation_bank_account_output.ValidationBankAccountOutput`
"""
return self.__validation_bank_account_output
@validation_bank_account_output.setter
def validation_bank_account_output(self, value):
self.__validation_bank_account_output = value
def to_dictionary(self):
dictionary = super(ResultDoRiskAssessment, self).to_dictionary()
if self.category is not None:
dictionary['category'] = self.category
if self.result is not None:
dictionary['result'] = self.result
if self.retaildecisions_cc_fraud_check_output is not None:
dictionary['retaildecisionsCCFraudCheckOutput'] = self.retaildecisions_cc_fraud_check_output.to_dictionary()
if self.validation_bank_account_output is not None:
dictionary['validationBankAccountOutput'] = self.validation_bank_account_output.to_dictionary()
return dictionary
def from_dictionary(self, dictionary):
super(ResultDoRiskAssessment, self).from_dictionary(dictionary)
if 'category' in dictionary:
self.category = dictionary['category']
if 'result' in dictionary:
self.result = dictionary['result']
if 'retaildecisionsCCFraudCheckOutput' in dictionary:
if not isinstance(dictionary['retaildecisionsCCFraudCheckOutput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['retaildecisionsCCFraudCheckOutput']))
value = RetailDecisionsCCFraudCheckOutput()
self.retaildecisions_cc_fraud_check_output = value.from_dictionary(dictionary['retaildecisionsCCFraudCheckOutput'])
if 'validationBankAccountOutput' in dictionary:
if not isinstance(dictionary['validationBankAccountOutput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['validationBankAccountOutput']))
value = ValidationBankAccountOutput()
self.validation_bank_account_output = value.from_dictionary(dictionary['validationBankAccountOutput'])
return self
|
{
"content_hash": "043c25a9d53c3d8422161717540c433e",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 135,
"avg_line_length": 46.179245283018865,
"alnum_prop": 0.6947906026557712,
"repo_name": "Ingenico-ePayments/connect-sdk-python3",
"id": "248d59aa6a08db2c84579acba9e553efa3cb6e77",
"size": "5046",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ingenico/connect/sdk/domain/definitions/result_do_risk_assessment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1735057"
}
],
"symlink_target": ""
}
|
import asyncio
import aiomysql
import asynctest
from domopyc.web.switch_service import SwichService
class SwitchServiceTest(asynctest.TestCase):
@asyncio.coroutine
def setUp(self):
self.pool = yield from aiomysql.create_pool(host='127.0.0.1', port=3306,
user='test', password='test', db='test',
loop=self.loop)
with (yield from self.pool) as conn:
cur = yield from conn.cursor()
yield from cur.execute("drop table if EXISTS domopyc_switch")
yield from cur.close()
self.switch_service = SwichService(self.pool)
@asyncio.coroutine
def tearDown(self):
self.pool.close()
yield from self.pool.wait_closed()
@asyncio.coroutine
def test_get_all_no_data(self):
self.assertEqual({'switches': []}, (yield from self.switch_service.get_all()))
@asyncio.coroutine
def test_insert_and_delete_new_switch(self):
yield from self.switch_service.insert('1234567', 'my new switch')
switches = yield from self.switch_service.get_all()
self.assertEqual({'switches': [{'id': '1234567', 'label': 'my new switch', 'state': 0}]}, switches)
yield from self.switch_service.delete('1234567')
self.assertEqual({'switches': []}, (yield from self.switch_service.get_all()))
@asyncio.coroutine
def test_switch_on_off(self):
yield from self.switch_service.insert('1234567', 'my new switch')
yield from self.switch_service.switch('1234567', '1')
switches = yield from self.switch_service.get_all()
self.assertEqual({'switches': [{'id': '1234567', 'label': 'my new switch', 'state': 1}]}, switches)
@asyncio.coroutine
def test_insert_new_switch_bad_id(self):
with self.assertRaises(ValueError):
yield from self.switch_service.insert('123456', 'too short switch id')
with self.assertRaises(ValueError):
yield from self.switch_service.insert('12345678', 'too long switch id')
with self.assertRaises(ValueError):
yield from self.switch_service.insert('ABCDEFG', 'G is not hexadecimal')
|
{
"content_hash": "4b6316056c788f5551258bd431e2b302",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 107,
"avg_line_length": 39.785714285714285,
"alnum_prop": 0.624326750448833,
"repo_name": "bamthomas/DomoPyc",
"id": "6bb0b6b8e39ac843a48b3146f8077424eb328734",
"size": "2243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domopyc/web/tests/test_switch_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7218"
},
{
"name": "CSS",
"bytes": "17103"
},
{
"name": "JavaScript",
"bytes": "18910"
},
{
"name": "Python",
"bytes": "76024"
}
],
"symlink_target": ""
}
|
"""pyWWA local module."""
# stdlib
import json
import os
# Local
from .cmdline import parse_cmdline
# Shared configuration
SETTINGS = {}
# Eventually updated by command line parsing
CTX = parse_cmdline([])
# Eventually updated to be a JABBER instance
JABBER = None
def get_table_file(filename):
"""Return file pointer for a given table file."""
testfn = os.path.join(get_basedir(), "tables", filename)
if os.path.isfile(testfn):
return open(testfn, encoding='utf-8')
raise FileNotFoundError(f"could not locate table file {testfn}")
def get_basedir() -> str:
"""Since I am a hack, we need to compute the base folder of this repo."""
thisdir = os.path.dirname(__file__)
# up two folders
return os.path.abspath(os.path.join(thisdir, "../.."))
def load_config() -> dict:
"""Attempt to locate our configuration file."""
testfn = os.path.join(get_basedir(), "settings.json")
if not os.path.isfile(testfn):
return {}
with open(testfn, encoding='utf-8') as fh:
res = json.load(fh)
return res
CONFIG = load_config()
|
{
"content_hash": "cd81cf66c6eaf54d47644abf0d76c93a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 26.023809523809526,
"alnum_prop": 0.6614821591948765,
"repo_name": "akrherz/pyWWA",
"id": "e3d157cc55774c52229e5abef002e6d2065a3f94",
"size": "1093",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parsers/pywwa/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "207659"
},
{
"name": "Shell",
"bytes": "4472"
}
],
"symlink_target": ""
}
|
import json
import sys
from datetime import datetime
from optparse import OptionParser
def read_benchmark(fname):
blob = json.load(open(fname))
tstamp = blob['timestamp']
t = datetime.fromtimestamp(tstamp).strftime('%Y-%m-%d')
label = "%s (%s)" % (blob['commit'][:6], t)
tests = blob['tests']
return tstamp, label, tests
def fmt_col(id, label, dtype):
return "{id: '%s', label: '%s', type: '%s'}" % (id,label,dtype)
def fmt_row(vals):
frow_elements = ["{v: '%s'}" % vals[0]]
baseline = float(vals[1])
for v in vals[2:]:
frow_elements.append("{v: %f, f: '%d/%d ms'}" % (v/baseline, v, baseline))
return "{c:[%s]}" % ', '.join(frow_elements)
def combine_to_dataTable(data, hot=True):
data.sort() # sorts by timestamp
columns = [fmt_col('bench','Test','string')]
for _,label,_ in data[1:]:
columns.append(fmt_col(label[:6],label,'number'))
test_order = sorted(data[0][2])
rows = [[str(test)] for test in test_order]
idx = int(hot) # 0 if false, 1 if true
for _,_,test_data in data:
#XXX: only works when all tests are the same
for i,test_name in enumerate(test_order):
rows[i].append(test_data[test_name][idx])
# produce the dataTable
fcols = ', '.join(columns)
frows = ', '.join(map(fmt_row, rows))
return "{cols: [%s], rows: [%s]}" % (fcols,frows)
def write_html(title, dataTable, fh=sys.stdout):
print >>fh, '''<html><head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
var data = new google.visualization.DataTable('''
print >>fh, dataTable
print >>fh, ''');
var options = {
title: '%s',
hAxis: {title: 'Test', showTextEvery: 1,
slantedText: true, slantedTextAngle: 50},
vAxis: {title: 'Time/Baseline'}
};''' % title
print >>fh, '''(new google.visualization.ColumnChart(
document.getElementById('chart_div'))).draw(data, options);
}
</script></head><body>
<div id="chart_div" style="width: 1800px; height: 800px;"></div>
</body></html>'''
if __name__ == '__main__':
op = OptionParser()
op.add_option('-c', '--cold', action='store_true', default=False, help="use cold-start times instead of hot")
opts, args = op.parse_args()
if len(args) < 2:
op.error("Usage: %s baseline.json [benchmark.json]+" % sys.argv[0])
data = map(read_benchmark, args)
table = combine_to_dataTable(data, hot=(not opts.cold))
temp = 'cold' if opts.cold else 'hot'
write_html('Benchmarks vs %s - %s' %(data[0][1],temp), table)
|
{
"content_hash": "c8a5f72ec7524de548936ba61f25de3b",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 111,
"avg_line_length": 35.986486486486484,
"alnum_prop": 0.6214795343597447,
"repo_name": "netopyr/doppio",
"id": "98131cc560fba2316f2d8c13ec6441054c14d25a",
"size": "2663",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/speed_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "221198"
},
{
"name": "JavaScript",
"bytes": "2655"
},
{
"name": "TypeScript",
"bytes": "666721"
}
],
"symlink_target": ""
}
|
import sys, os, time
import doctest
sys.path.append("..")
from mas.multiagent import *
def test_sim_basic() :
'''
>>> test_sim_basic()
Initialization.
Simulator: <<multiagent.Simulator has_driver=1>>
'''
print("Initialization.")
driver = Driver(context = Context(), schedule = Schedule())
sim = Simulator(driver = driver)
print("Simulator: %s" % sim.info())
def test_sim_sim() :
'''
>>> test_sim_sim()
Initialization.
Simulator: <<multiagent.Simulator has_driver=1>>
Simulate.
'''
print("Initialization.")
driver = Driver(context = Context(), schedule = Schedule())
sim = Simulator(driver = driver)
print("Simulator: %s" % sim.info())
print("Simulate.")
sim.simulate(limit = 1, filename = "")
if __name__ == '__main__' :
result = doctest.testmod()
print("-" * 50)
print("[Simulator Test] attempted/failed tests: %d/%d" % (result.attempted, result.failed))
|
{
"content_hash": "40e87f8cf3fe7f7db4c57272006736d1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 95,
"avg_line_length": 25.89189189189189,
"alnum_prop": 0.6075156576200418,
"repo_name": "csningli/MultiAgent",
"id": "0d72d44a880647be4a8670e99775558fed31307e",
"size": "1018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_simulator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "142545"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
}
|
'''
This module contains the classes which represent XCB data types.
'''
from xcbgen.expr import Field, Expression
import __main__
class Type(object):
'''
Abstract base class for all XCB data types.
Contains default fields, and some abstract methods.
'''
def __init__(self, name):
'''
Default structure initializer. Sets up default fields.
Public fields:
name is a tuple of strings specifying the full type name.
size is the size of the datatype in bytes, or None if variable-sized.
nmemb is 1 for non-list types, None for variable-sized lists, otherwise number of elts.
booleans for identifying subclasses, because I can't figure out isinstance().
'''
self.name = name
self.size = None
self.nmemb = None
self.resolved = False
# Screw isinstance().
self.is_simple = False
self.is_list = False
self.is_expr = False
self.is_container = False
self.is_reply = False
self.is_union = False
self.is_pad = False
self.is_switch = False
self.is_bitcase = False
def resolve(self, module):
'''
Abstract method for resolving a type.
This should make sure any referenced types are already declared.
'''
raise Exception('abstract resolve method not overridden!')
def out(self, name):
'''
Abstract method for outputting code.
These are declared in the language-specific modules, and
there must be a dictionary containing them declared when this module is imported!
'''
raise Exception('abstract out method not overridden!')
def fixed_size(self):
'''
Abstract method for determining if the data type is fixed-size.
'''
raise Exception('abstract fixed_size method not overridden!')
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto, enum=None):
'''
Default method for making a data type a member of a structure.
Extend this if the data type needs to add an additional length field or something.
module is the global module object.
complex_type is the structure object.
see Field for the meaning of the other parameters.
'''
new_field = Field(self, field_type, field_name, visible, wire, auto, enum)
# We dump the _placeholder_byte if any fields are added.
for (idx, field) in enumerate(complex_type.fields):
if field == _placeholder_byte:
complex_type.fields[idx] = new_field
return
complex_type.fields.append(new_field)
class SimpleType(Type):
'''
Derived class which represents a cardinal type like CARD32 or char.
Any type which is typedef'ed to cardinal will be one of these.
Public fields added:
none
'''
def __init__(self, name, size):
Type.__init__(self, name)
self.is_simple = True
self.size = size
self.nmemb = 1
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
out = __main__.output['simple']
# Cardinal datatype globals. See module __init__ method.
tcard8 = SimpleType(('u8',), 1)
tcard16 = SimpleType(('u16',), 2)
tcard32 = SimpleType(('u32',), 4)
tint8 = SimpleType(('i8',), 1)
tint16 = SimpleType(('i16',), 2)
tint32 = SimpleType(('i32',), 4)
tchar = SimpleType(('c_char',), 1)
tfloat = SimpleType(('f32',), 4)
tdouble = SimpleType(('f64',), 8)
class Enum(SimpleType):
'''
Derived class which represents an enum. Fixed-size.
Public fields added:
values contains a list of (name, value) tuples. value is empty, or a number.
bits contains a list of (name, bitnum) tuples. items only appear if specified as a bit. bitnum is a number.
'''
def __init__(self, name, elt):
SimpleType.__init__(self, name, 4)
self.values = []
self.bits = []
self.doc = None
for item in list(elt):
if item.tag == 'doc':
self.doc = Doc(name, item)
# First check if we're using a default value
if len(list(item)) == 0:
self.values.append((item.get('name'), ''))
continue
# An explicit value or bit was specified.
value = list(item)[0]
if value.tag == 'value':
self.values.append((item.get('name'), value.text))
elif value.tag == 'bit':
self.values.append((item.get('name'), '%u' % (1 << int(value.text, 0))))
self.bits.append((item.get('name'), value.text))
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
out = __main__.output['enum']
class ListType(Type):
'''
Derived class which represents a list of some other datatype. Fixed- or variable-sized.
Public fields added:
member is the datatype of the list elements.
parent is the structure type containing the list.
expr is an Expression object containing the length information, for variable-sized lists.
'''
def __init__(self, elt, member, *parent):
Type.__init__(self, member.name)
self.is_list = True
self.member = member
self.parents = list(parent)
if elt.tag == 'list':
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
elif elt.tag == 'valueparam':
self.expr = Expression(elt, self)
self.size = member.size if member.fixed_size() else None
self.nmemb = self.expr.nmemb if self.expr.fixed_size() else None
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto, enum=None):
if not self.fixed_size():
# We need a length field.
# Ask our Expression object for it's name, type, and whether it's on the wire.
lenfid = self.expr.lenfield_type
lenfield_name = self.expr.lenfield_name
lenwire = self.expr.lenwire
needlen = True
# See if the length field is already in the structure.
for parent in self.parents:
for field in parent.fields:
if field.field_name == lenfield_name:
needlen = False
# It isn't, so we need to add it to the structure ourself.
if needlen:
type = module.get_type(lenfid)
lenfield_type = module.get_type_name(lenfid)
type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False, enum)
# Add ourself to the structure by calling our original method.
Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto, enum)
def resolve(self, module):
if self.resolved:
return
self.member.resolve(module)
self.expr.resolve(module, self.parents)
# Find my length field again. We need the actual Field object in the expr.
# This is needed because we might have added it ourself above.
if not self.fixed_size():
for parent in self.parents:
for field in parent.fields:
if field.field_name == self.expr.lenfield_name and field.wire:
self.expr.lenfield = field
break
self.resolved = True
def fixed_size(self):
return self.member.fixed_size() and self.expr.fixed_size()
class ExprType(Type):
'''
Derived class which represents an exprfield. Fixed size.
Public fields added:
expr is an Expression object containing the value of the field.
'''
def __init__(self, elt, member, *parents):
Type.__init__(self, member.name)
self.is_expr = True
self.member = member
self.parents = parents
self.expr = Expression(list(elt)[0], self)
self.size = member.size
self.nmemb = 1
def resolve(self, module):
if self.resolved:
return
self.member.resolve(module)
self.resolved = True
def fixed_size(self):
return True
class PadType(Type):
'''
Derived class which represents a padding field.
'''
def __init__(self, elt):
Type.__init__(self, tcard8.name)
self.is_pad = True
self.size = 1
self.nmemb = 1 if (elt == None) else int(elt.get('bytes'), 0)
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
class ComplexType(Type):
'''
Derived class which represents a structure. Base type for all structure types.
Public fields added:
fields is an array of Field objects describing the structure fields.
'''
def __init__(self, name, elt):
Type.__init__(self, name)
self.is_container = True
self.elt = elt
self.fields = []
self.nmemb = 1
self.size = 0
self.lenfield_parent = [self]
def resolve(self, module):
if self.resolved:
return
pads = 0
enum = None
# Resolve all of our field datatypes.
for child in list(self.elt):
if child.tag == 'pad':
field_name = 'pad' + str(pads)
fkey = 'CARD8'
type = PadType(child)
pads = pads + 1
visible = False
elif child.tag == 'field':
field_name = child.get('name')
enum = child.get('enum')
fkey = child.get('type')
type = module.get_type(fkey)
visible = True
elif child.tag == 'exprfield':
field_name = child.get('name')
fkey = child.get('type')
type = ExprType(child, module.get_type(fkey), *self.lenfield_parent)
visible = False
elif child.tag == 'list':
field_name = child.get('name')
fkey = child.get('type')
type = ListType(child, module.get_type(fkey), *self.lenfield_parent)
visible = True
elif child.tag == 'valueparam':
field_name = child.get('value-list-name')
fkey = 'CARD32'
type = ListType(child, module.get_type(fkey), *self.lenfield_parent)
visible = True
elif child.tag == 'switch':
field_name = child.get('name')
# construct the switch type name from the parent type and the field name
field_type = self.name + (field_name,)
type = SwitchType(field_type, child, *self.lenfield_parent)
visible = True
type.make_member_of(module, self, field_type, field_name, visible, True, False)
type.resolve(module)
continue
else:
# Hit this on Reply
continue
# Get the full type name for the field
field_type = module.get_type_name(fkey)
# Add the field to ourself
type.make_member_of(module, self, field_type, field_name, visible, True, False, enum)
# Recursively resolve the type (could be another structure, list)
type.resolve(module)
self.calc_size() # Figure out how big we are
self.resolved = True
def calc_size(self):
self.size = 0
for m in self.fields:
if not m.wire:
continue
if m.type.fixed_size():
self.size = self.size + (m.type.size * m.type.nmemb)
else:
self.size = None
break
def fixed_size(self):
for m in self.fields:
if not m.type.fixed_size():
return False
return True
class SwitchType(ComplexType):
'''
Derived class which represents a List of Items.
Public fields added:
bitcases is an array of Bitcase objects describing the list items
'''
def __init__(self, name, elt, *parents):
ComplexType.__init__(self, name, elt)
self.parents = parents
# FIXME: switch cannot store lenfields, so it should just delegate the parents
self.lenfield_parent = list(parents) + [self]
# self.fields contains all possible fields collected from the Bitcase objects,
# whereas self.items contains the Bitcase objects themselves
self.bitcases = []
self.is_switch = True
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
def resolve(self, module):
if self.resolved:
return
# pads = 0
parents = list(self.parents) + [self]
# Resolve all of our field datatypes.
for index, child in enumerate(list(self.elt)):
if child.tag == 'bitcase':
field_name = child.get('name')
if field_name is None:
field_type = self.name + ('bitcase%d' % index,)
else:
field_type = self.name + (field_name,)
# use self.parent to indicate anchestor,
# as switch does not contain named fields itself
type = BitcaseType(index, field_type, child, *parents)
# construct the switch type name from the parent type and the field name
if field_name is None:
type.has_name = False
# Get the full type name for the field
field_type = type.name
visible = True
# add the field to ourself
type.make_member_of(module, self, field_type, field_name, visible, True, False)
# recursively resolve the type (could be another structure, list)
type.resolve(module)
inserted = False
for new_field in type.fields:
# We dump the _placeholder_byte if any fields are added.
for (idx, field) in enumerate(self.fields):
if field == _placeholder_byte:
self.fields[idx] = new_field
inserted = True
break
if False == inserted:
self.fields.append(new_field)
self.calc_size() # Figure out how big we are
self.resolved = True
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto, enum=None):
if not self.fixed_size():
# We need a length field.
# Ask our Expression object for it's name, type, and whether it's on the wire.
lenfid = self.expr.lenfield_type
lenfield_name = self.expr.lenfield_name
lenwire = self.expr.lenwire
needlen = True
# See if the length field is already in the structure.
for parent in self.parents:
for field in parent.fields:
if field.field_name == lenfield_name:
needlen = False
# It isn't, so we need to add it to the structure ourself.
if needlen:
type = module.get_type(lenfid)
lenfield_type = module.get_type_name(lenfid)
type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False, enum)
# Add ourself to the structure by calling our original method.
Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto, enum)
# size for switch can only be calculated at runtime
def calc_size(self):
pass
# note: switch is _always_ of variable size, but we indicate here wether
# it contains elements that are variable-sized themselves
def fixed_size(self):
return False
# for m in self.fields:
# if not m.type.fixed_size():
# return False
# return True
class Struct(ComplexType):
'''
Derived class representing a struct data type.
'''
out = __main__.output['struct']
class Union(ComplexType):
'''
Derived class representing a union data type.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.is_union = True
out = __main__.output['union']
class BitcaseType(ComplexType):
'''
Derived class representing a struct data type.
'''
def __init__(self, index, name, elt, *parent):
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
ComplexType.__init__(self, name, elts[1:])
self.has_name = True
self.index = 1
self.lenfield_parent = list(parent) + [self]
self.parents = list(parent)
self.is_bitcase = True
def make_member_of(self, module, switch_type, field_type, field_name, visible, wire, auto, enum=None):
'''
register BitcaseType with the corresponding SwitchType
module is the global module object.
complex_type is the structure object.
see Field for the meaning of the other parameters.
'''
new_field = Field(self, field_type, field_name, visible, wire, auto, enum)
# We dump the _placeholder_byte if any bitcases are added.
for (idx, field) in enumerate(switch_type.bitcases):
if field == _placeholder_byte:
switch_type.bitcases[idx] = new_field
return
switch_type.bitcases.append(new_field)
def resolve(self, module):
if self.resolved:
return
self.expr.resolve(module, self.parents+[self])
# Resolve the bitcase expression
ComplexType.resolve(self, module)
class Reply(ComplexType):
'''
Derived class representing a reply. Only found as a field of Request.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.is_reply = True
self.doc = None
for child in list(elt):
if child.tag == 'doc':
self.doc = Doc(name, child)
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
self.fields.append(Field(tcard32, tcard32.name, 'length', False, True, True))
ComplexType.resolve(self, module)
class Request(ComplexType):
'''
Derived class representing a request.
Public fields added:
reply contains the reply datatype or None for void requests.
opcode contains the request number.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.reply = None
self.doc = None
self.opcode = elt.get('opcode')
for child in list(elt):
if child.tag == 'reply':
self.reply = Reply(name, child)
if child.tag == 'doc':
self.doc = Doc(name, child)
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
if module.namespace.is_ext:
self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True))
self.fields.append(Field(tcard8, tcard8.name, 'minor_opcode', False, True, True))
self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True))
ComplexType.resolve(self, module)
else:
self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True))
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True))
ComplexType.resolve(self, module)
if self.reply:
self.reply.resolve(module)
out = __main__.output['request']
class Event(ComplexType):
'''
Derived class representing an event data type.
Public fields added:
opcodes is a dictionary of name -> opcode number, for eventcopies.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.opcodes = {}
tmp = elt.get('no-sequence-number')
self.has_seq = (tmp == None or tmp.lower() == 'false' or tmp == '0')
self.doc = None
for item in list(elt):
if item.tag == 'doc':
self.doc = Doc(name, item)
def add_opcode(self, opcode, name, main):
self.opcodes[name] = opcode
if main:
self.name = name
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
if self.has_seq:
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
ComplexType.resolve(self, module)
out = __main__.output['event']
class Error(ComplexType):
'''
Derived class representing an error data type.
Public fields added:
opcodes is a dictionary of name -> opcode number, for errorcopies.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.opcodes = {}
def add_opcode(self, opcode, name, main):
self.opcodes[name] = opcode
if main:
self.name = name
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
self.fields.append(Field(tcard8, tcard8.name, 'error_code', False, True, True))
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
ComplexType.resolve(self, module)
out = __main__.output['error']
class Doc(object):
'''
Class representing a <doc> tag.
'''
def __init__(self, name, elt):
self.name = name
self.description = None
self.brief = 'BRIEF DESCRIPTION MISSING'
self.fields = {}
self.errors = {}
self.see = {}
self.example = None
for child in list(elt):
text = child.text if child.text else ''
if child.tag == 'description':
self.description = text.strip()
if child.tag == 'brief':
self.brief = text.strip()
if child.tag == 'field':
self.fields[child.get('name')] = text.strip()
if child.tag == 'error':
self.errors[child.get('type')] = text.strip()
if child.tag == 'see':
self.see[child.get('name')] = child.get('type')
if child.tag == 'example':
self.example = text.strip()
_placeholder_byte = Field(PadType(None), tcard8.name, 'pad0', False, True, False)
|
{
"content_hash": "6a57c7eb2eb45dad51d15f70f06d01c5",
"timestamp": "",
"source": "github",
"line_count": 682,
"max_line_length": 115,
"avg_line_length": 34.43255131964809,
"alnum_prop": 0.5698164629732146,
"repo_name": "maseek/rust-xcb",
"id": "f3ad41f9e56079b8c34dd0f5aa7557adff26a9f6",
"size": "23483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xcbgen/xtypes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97496"
},
{
"name": "Rust",
"bytes": "1995387"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
}
|
"""Helper utilities for AOT compilation."""
import collections
import copy
import os
import re
import shlex
from typing import List, Tuple
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import sysconfig as sysconfig_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
try:
from tensorflow.python import _pywrap_tfcompile # pylint: disable=g-import-not-at-top
except ImportError as e:
_pywrap_tfcompile_import_error = ImportError(
'Unable to import _pywrap_tfcompile; you must build TensorFlow '
'with XLA. You may need to build tensorflow with flag '
'--define=with_xla_support=true. Original error: {}'.format(str(e)))
else:
_pywrap_tfcompile_import_error = None
_READ_ONLY_VARIABLE_OPS = (
'ReadVariableOp',
'IsVariableInitializedOp',
'ResourceGather',
'ResourceGatherNd',
'VariableShape',
)
_PASS_THROUGH_VARIABLE_OPS = ('Identity', 'IdentityN')
def _shlex_quote(s):
return shlex.quote(s)
def _sysconfig_module():
"""Load tf.sysconfig if available and working (i.e., inside a pip package)."""
try:
_ = sysconfig_lib.get_include()
except (ImportError, ValueError):
# ValueError may come from saved_model_cli_test trying to enable
# eager mode twice.
return None
return sysconfig_lib
def _parse_tensor_name(name):
"""Convert a tensor name like 'tensor:0' into a tuple ('tensor', 0)."""
if ':' in name and not name.endswith(':'):
node_name = name[:name.rfind(':')]
output_slot = int(name[name.rfind(':') + 1:])
return node_name, output_slot
else:
return name, None
_XLA_MAKEFILE_TEMPLATE = """
INC = -I{tensorflow_includes}
LIB = -L{compiled_dir}
CXXFLAGS = {cxx_flags}
"""
def _xla_makefile_string(output_prefix):
"""Returns a Makefile string with variables for using XLA binary object files.
Attempts to identify the right include header paths when run from either
an installed TensorFlow pip package, or from bazel run.
Args:
output_prefix: A string containing the output prefix for the XLA AOT
compiled header + object files.
Returns:
A string containing a filled out `_XLA_MAKEFILE_TEMPLATE`.
"""
sysconfig = _sysconfig_module()
output_dir, _ = os.path.split(output_prefix)
if sysconfig:
tensorflow_includes = _shlex_quote(sysconfig.get_include())
else:
# Try hard to find the real source directory if this is a local bazel run.
if os.path.islink(__file__):
this_file = __file__
while os.path.islink(this_file):
this_file = os.readlink(this_file)
base = os.path.realpath(
os.path.join(os.path.dirname(this_file), *([os.path.pardir] * 3)))
else:
try:
base = test.test_src_dir_path('')
except KeyError: # Can't find TEST_SRCDIR in environment path.
base = os.path.realpath(
os.path.join(os.path.dirname(__file__), *([os.path.pardir] * 3)))
expected_header = os.path.join(
base, 'tensorflow', 'compiler', 'tf2xla', 'xla_compiled_cpu_function.h')
if not os.path.exists(expected_header):
logging.error(
'Could not find includes path. Missing file: {}'
.format(expected_header))
tensorflow_includes = base
return _XLA_MAKEFILE_TEMPLATE.format(
tensorflow_includes=tensorflow_includes,
compiled_dir=_shlex_quote(output_dir),
cxx_flags='-D_GLIBCXX_USE_CXX11_ABI={}'.format(
versions.CXX11_ABI_FLAG))
def _get_variable_nodes_from_graph_def(graph_def):
"""Get the list of Variable nodes from `graph_def`.
Args:
graph_def: An instance of `GraphDef`. This GraphDef *must*
have already been optimized by Grappler. In particular, function
inlining must have already happened.
Returns:
A dict mapping string names of variables to tuples `(node_def, modified)`,
where `node_def` is the `NodeDef` corresponding to variable, and `modified`
is a python bool describing whether the variable is modified during runtime.
"""
variables = [n for n in graph_def.node if n.op == 'VarHandleOp']
variable_name_map = dict((n.name, n) for n in variables)
child_map = collections.defaultdict(lambda: [])
for n in graph_def.node:
for inp in n.input:
if not inp.startswith('^'):
child_map[inp].append(n)
variables = {}
for (v_name, v_node) in variable_name_map.items():
queue = list(child_map[v_name])
processed = set([])
while queue:
n_current = queue.pop()
if n_current.name in processed:
continue
processed.add(n_current.name)
if n_current.op in _PASS_THROUGH_VARIABLE_OPS:
children = child_map.get(n_current.name, [])
queue.extend(children)
elif n_current.op not in _READ_ONLY_VARIABLE_OPS:
variables[v_name] = (v_node, True)
queue = []
if v_name not in variables:
variables[v_name] = (v_node, False)
return variables
def _prune_removed_feed_nodes(signature_def, graph_def):
"""Identify the inputs in the signature no longer in graph_def, prune them.
Args:
signature_def: A `SignatureDef` instance.
graph_def: A `GraphDef` instance.
Returns:
A new pruned `SignatureDef`.
"""
node_names = set([n.name for n in graph_def.node])
new_signature_def = meta_graph_pb2.SignatureDef()
new_signature_def.CopyFrom(signature_def)
for (k, v) in signature_def.inputs.items():
tensor_name, _ = _parse_tensor_name(v.name)
if tensor_name not in node_names:
logging.warn(
'Signature input key \'{}\', tensor name \'{}\', has been pruned '
'while freezing the graph. Removing it from the compiled signatures.'
.format(k, tensor_name))
del new_signature_def.inputs[k]
return new_signature_def
def freeze_model(checkpoint_path: str,
meta_graph_def: meta_graph_pb2.MetaGraphDef,
output_prefix: str, signature_def_key: str,
variables_to_feed: List[str]) -> Tuple[str, str]:
"""Freeze a `MetaGraphDef` in preparation for tfcompile`.
The graph is always optimized with grappler, and optionally (by default)
variables are frozen as constants, before compilation happens.
Args:
checkpoint_path: Python string. Path to checkpoints/variables.
meta_graph_def: Instance of `MetaGraphDef`.
output_prefix: Python string. Path prefix for outputs.
signature_def_key: String, the signature_def to use in the SavedModel.
variables_to_feed: A list of strings, the variables that will be fed by the
user; these won't be frozen. If `None`, then we will extract all the
variables in the graph and mark them as to-feed. The default behavior is
an empty tuple: all variables must be frozen.
Returns:
a pair containing the path to the frozen model and the path to the config.
Raises:
RuntimeError: If tensorflow was not built with XLA.
ImportError: If tensorflow was built with XLA but there was another
issue importing the tfcompile python wrapper.
ValueError: If `meta_graph_def.signature_def[signature_def_key]` is
missing or has empty outputs.
"""
if _pywrap_tfcompile_import_error:
raise _pywrap_tfcompile_import_error # pylint: disable=raising-bad-type
signature_def_map = meta_graph_def.signature_def
if signature_def_key not in signature_def_map:
raise ValueError(
f"Unable to find signature_def_key '{signature_def_key}' in signature "
'def map of `meta_graph_def`. Available keys: '
f'{list(signature_def_map.keys())}')
signature_def = signature_def_map[signature_def_key]
if not signature_def.outputs:
raise ValueError(
f'Signature key {signature_def_key} must have outputs, but saw none:\n'
f'{str(signature_def)}')
file_io.recursive_create_dir(output_prefix)
if logging.get_verbosity() >= logging.INFO:
original_graph_def_location = os.path.join(output_prefix,
'original_graph.pb')
with file_io.FileIO(original_graph_def_location, 'wb') as graph_writer:
graph_writer.write(meta_graph_def.graph_def.SerializeToString())
# This updates graph_def in place.
_replace_input_placeholders_with_default_values(
meta_graph_def.graph_def, signature_def)
graph_def = _optimize_graph(meta_graph_def, signature_def)
all_variables = _get_variable_nodes_from_graph_def(graph_def)
if variables_to_feed is None:
variable_nodes_to_feed = list(all_variables.values())
else:
not_in_graph = set(variables_to_feed).difference(list(all_variables))
if not_in_graph:
raise ValueError('Asked to feed variables that were not found in graph: '
f'{not_in_graph}. Variables contained in the graph: '
f'{list(all_variables)}')
variable_nodes_to_feed = [
all_variables[name] for name in variables_to_feed
]
if logging.get_verbosity() >= logging.INFO:
prefrozen_graph_def_location = os.path.join(output_prefix,
'prefrozen_graph.pb')
with file_io.FileIO(prefrozen_graph_def_location, 'wb') as graph_writer:
graph_writer.write(graph_def.SerializeToString())
# Load the Variables so that we can freeze the graph.
with session.Session(graph=ops_lib.Graph()) as sess:
restorer = saver_lib.import_meta_graph(meta_graph_def, clear_devices=True)
if restorer is not None:
restorer.restore(sess, checkpoint_path)
graph_def.CopyFrom(
graph_util.convert_variables_to_constants(
sess,
graph_def,
output_node_names=[
_parse_tensor_name(n.name)[0]
for n in signature_def.outputs.values()
],
variable_names_blacklist=[
n.name for n, _ in variable_nodes_to_feed
],
))
signature_def = _prune_removed_feed_nodes(signature_def, graph_def)
frozen_graph_def_location = os.path.join(output_prefix, 'frozen_graph.pb')
config_pbtxt_location = os.path.join(output_prefix, 'config.pbtxt')
logging.info('Writing graph def to: {}'.format(frozen_graph_def_location))
with file_io.FileIO(frozen_graph_def_location, 'wb') as graph_writer:
graph_writer.write(graph_def.SerializeToString())
config = _signature_to_tf2xla_config(
signature_def, variable_nodes_to_feed=variable_nodes_to_feed)
logging.info('Writing config_pbtxt to: {}'.format(config_pbtxt_location))
with file_io.FileIO(config_pbtxt_location, mode='w') as config_writer:
config_writer.write(str(config))
return frozen_graph_def_location, config_pbtxt_location
def aot_compile_cpu_meta_graph_def(checkpoint_path,
meta_graph_def,
output_prefix,
signature_def_key,
cpp_class,
target_triple,
target_cpu,
variables_to_feed=(),
multithreading=False):
"""Compile a `MetaGraphDef` to header+object files in `output_prefix`.
Use XLA AOT (`tfcompile`) to convert the given meta graph and
signature into a header + object files. Also create an include makefile
that helps identify the appropriate necessary include and library paths
to incorporate these files into your C++ program.
Freezing a graph entails restoring the checkpoint and replacing any inputs and
variables with constants. If values are feed, those are used, else inputs are
replaced with default all-zero constants. Finally, the graph is pruned and
then optimized with grappler.
If the `freeze_graph` is `True`, all variables are embedded as constants
into the graph and binary objects. If it is `False`, then the variable
values become inputs and outputs of the compiled class and the C++
caller must set these values manually.
Args:
checkpoint_path: Python string. Path to checkpoints/variables.
meta_graph_def: Instance of `MetaGraphDef`.
output_prefix: Python string. Path prefix for outputs.
signature_def_key: String, the signature_def to use in the SavedModel.
cpp_class: String, Name of output C++ class.
target_triple: String, LLVM target triple.
target_cpu: String, LLVM target cpu name.
variables_to_feed: A list of strings, the variables that will be fed by the
user; these won't be frozen. If `None`, then we will extract all the
variables in the graph and mark them as to-feed. The default behavior is
an empty tuple: all variables must be frozen.
multithreading: Whether to enable multithreading in the compiled
computation. Note that if using this option, the resulting object files
may have external dependencies on multithreading libraries like nsync.
Raises:
RuntimeError: If tensorflow was not built with XLA.
ImportError: If tensorflow was built with XLA but there was another
issue importing the tfcompile python wrapper.
ValueError: If `meta_graph_def.signature_def[signature_def_key]` is
missing or has empty outputs.
"""
if _pywrap_tfcompile_import_error:
raise _pywrap_tfcompile_import_error # pylint: disable=raising-bad-type
else:
# TODO(ebrevdo): Pipe DebugOptions through tfcompile::Main and pywrap
# so that we can set these directly instead of relying on env vars.
xla_flags = os.environ.get('XLA_FLAGS')
if not xla_flags:
xla_flags = '--xla_cpu_multi_thread_eigen={}'.format(
'true' if multithreading else 'false')
else:
xla_flags += ' --xla_cpu_multi_thread_eigen={}'.format(
'true' if multithreading else 'false')
os.environ['XLA_FLAGS'] = xla_flags
temp_dir = test.get_temp_dir()
file_io.recursive_create_dir(temp_dir)
frozen_graph_def_location, config_pbtxt_location = freeze_model(
checkpoint_path=checkpoint_path,
meta_graph_def=meta_graph_def,
output_prefix=temp_dir,
signature_def_key=signature_def_key,
variables_to_feed=variables_to_feed)
output_dir = os.path.dirname(output_prefix)
file_io.recursive_create_dir(output_dir)
entry_point = re.sub(
'[^0-9a-zA-Z]+', '_',
'__xla_' + output_prefix + '__' + cpp_class)
logging.info('Generating XLA AOT artifacts in: {}'.format(output_dir))
makefile_inc_location = '{}_makefile.inc'.format(output_prefix)
with file_io.FileIO(makefile_inc_location, mode='w') as makefile_writer:
makefile_writer.write(_xla_makefile_string(output_prefix))
output_prefix = _shlex_quote(output_prefix)
_pywrap_tfcompile.Compile(
graph=frozen_graph_def_location,
config=config_pbtxt_location,
cpp_class=cpp_class,
target_triple=target_triple,
target_cpu=target_cpu,
entry_point=entry_point,
out_function_object='{}.o'.format(output_prefix),
out_header='{}.h'.format(output_prefix),
out_metadata_object='{}_metadata.o'.format(output_prefix),
gen_name_to_index=True,
# ProgramShape isn't uniquefied by entry_point.
gen_program_shape=False)
def _optimize_graph(meta_graph_def, signature_def):
"""Optimize `meta_graph_def` using grappler. Returns a `GraphDef`."""
# We need to add a collection called 'train_op' so that grappler
# knows what the outputs are.
new_meta_graph_def = copy.deepcopy(meta_graph_def)
fetch_collection = meta_graph_pb2.CollectionDef()
for tensor_info in (
list(signature_def.inputs.values()) +
list(signature_def.outputs.values())):
fetch_collection.node_list.value.append(tensor_info.name)
new_meta_graph_def.collection_def['train_op'].CopyFrom(fetch_collection)
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.min_graph_nodes = -1 # do not skip small graphs
return tf_optimizer.OptimizeGraph(config, new_meta_graph_def)
def _replace_input_placeholders_with_default_values(graph_def, signature_def):
"""Replace graphdef's `tf.placeholder` input ops with all-zero constants."""
name_to_node_map = dict((n.name, n) for n in graph_def.node)
processed_nodes = set([])
for name, input_ in signature_def.inputs.items():
tensor_name, _ = _parse_tensor_name(input_.name)
if tensor_name in processed_nodes:
continue
processed_nodes.add(tensor_name)
if tensor_name not in name_to_node_map:
raise RuntimeError(
f"Unable to find input signature tensor '{tensor_name}' in optimized "
f'GraphDef. Graph nodes are: {list(name_to_node_map.keys())}')
node = name_to_node_map[tensor_name]
if node.op not in ('Placeholder', 'PlaceholderV2'):
logging.info(
'Tried to convert SavedModel input node \'{}\' from a placeholder, '
'but it doesn\'t look like a placeholder: {}'.format(tensor_name,
node))
continue
shape = tensor_shape.TensorShape(input_.tensor_shape)
if not shape.is_fully_defined():
raise ValueError(
f"Expected fully defined input shape for signature_def '{name}', "
f"tensor name: '{tensor_name}'; but shape is: {shape}.")
temp_graph = ops_lib.Graph()
with temp_graph.as_default():
const = array_ops.zeros(
shape, dtype=input_.dtype, name=tensor_name)
node.CopyFrom(const.op.node_def)
# Sometimes zeros() also creates additional nodes
for op in temp_graph.get_operations():
if op.name == const.op.name: # We just inserted this one.
continue
graph_def.node.append(op.node_def)
name_to_node_map[op.node_def.name] = op.node_def
def _signature_to_tf2xla_config(signature_def, variable_nodes_to_feed):
"""Convert `signature_def` to tf2xla config. Returns a `tf2xla.Config` proto.
Args:
signature_def: Instance of `SignatureDef`.
variable_nodes_to_feed: List of tuples of form `(node_def, modified)`
corresponding to VarHandleOp, and a boolean `modified` that describes
whether the variable was modified during execution.
Returns:
An instance of `tf2xla.Config` proto.
Raises:
RuntimeError: If TensorFlow was not compiled with XLA.
"""
from tensorflow.compiler.tf2xla import tf2xla_pb2 # pylint: disable=g-import-not-at-top
config = tf2xla_pb2.Config()
tensor_id = tf2xla_pb2.TensorId
for name, input_ in signature_def.inputs.items():
name = name.replace('/', '_')
name = 'feed_{}'.format(name)
(node_name, output_index) = _parse_tensor_name(input_.name)
output_index = int(output_index)
config.feed.append(
tf2xla_pb2.Feed(
id=tensor_id(node_name=node_name, output_index=output_index),
name=name,
type=input_.dtype,
shape=input_.tensor_shape))
for name, output_ in signature_def.outputs.items():
name = name.replace('/', '_')
name = 'fetch_{}'.format(name)
(node_name, output_index) = _parse_tensor_name(output_.name)
output_index = int(output_index)
config.fetch.append(
tf2xla_pb2.Fetch(
id=tensor_id(node_name=node_name, output_index=output_index),
name=name,
type=output_.dtype,
shape=output_.tensor_shape))
for (node, modified) in variable_nodes_to_feed:
name = node.name.replace('/', '_')
name = 'param_{}'.format(name)
config.variable.append(
tf2xla_pb2.Variable(
node_name=node.name,
name=name,
type=node.attr['dtype'].type,
shape=node.attr['shape'].shape,
readonly=not modified))
return config
|
{
"content_hash": "e7a5b062db31280952361235c6997626",
"timestamp": "",
"source": "github",
"line_count": 509,
"max_line_length": 90,
"avg_line_length": 39.783889980353635,
"alnum_prop": 0.6701728395061728,
"repo_name": "gautam1858/tensorflow",
"id": "4ca907b091e993ec35ad49383d9c1e1868ac4d0c",
"size": "20939",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/tools/saved_model_aot_compile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "47492"
},
{
"name": "C",
"bytes": "1129549"
},
{
"name": "C#",
"bytes": "13496"
},
{
"name": "C++",
"bytes": "116904214"
},
{
"name": "CMake",
"bytes": "165809"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "341994"
},
{
"name": "Go",
"bytes": "2052513"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1053827"
},
{
"name": "JavaScript",
"bytes": "5772"
},
{
"name": "Jupyter Notebook",
"bytes": "787371"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "9549263"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "180638"
},
{
"name": "Objective-C++",
"bytes": "295149"
},
{
"name": "Pawn",
"bytes": "5336"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "43775271"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "7854"
},
{
"name": "Shell",
"bytes": "566970"
},
{
"name": "Smarty",
"bytes": "89664"
},
{
"name": "SourcePawn",
"bytes": "8509"
},
{
"name": "Starlark",
"bytes": "6897556"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from ..mesh import TVTKBaseInterface
def test_TVTKBaseInterface_inputs():
input_map = dict(ignore_exception=dict(nohash=True,
usedefault=True,
),
)
inputs = TVTKBaseInterface.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
|
{
"content_hash": "0b6071c122e40ae7719d985a5dd83dcd",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 28.2,
"alnum_prop": 0.6761229314420804,
"repo_name": "mick-d/nipype",
"id": "d6e38722fe2e31d5c90933814fdf499bbcf91ea7",
"size": "477",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nipype/algorithms/tests/test_auto_TVTKBaseInterface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import cgi
import logging
import os
import urllib
log = logging.getLogger(__name__)
class Request(object):
"""
Reqest object. Describes the request that has been sent in to be
processed.
@param req: the underlying request object from the http server.
@param path: the piece of the path that has already been processed.
The Request object is mostly a data dictionary that contains the following
user-accessible pieces.
path: The entire path - everything to the left of the / after the hostname
in the url.
unparsedPath: The path that is still to be processed.
basePath: the parsed path.
baseUrl: the basePath with the protocol, port and host prepended.
host: the host.
method: the http method that was used - e.g. GET, POST, DELETE, PUT.
headers: request headers that were sent.
GET: any query string passed in, stored in a dictionary
POST: any query string passed in via the request body
Beyond that, request objects also have the following methods:
url, read, _getReadFd, getContentLength.
Url allows you to construct a url that contains the appropriate path
for this request. Read allows you to read the body of the request, if it
has not already been read in as a part of creating the POST field.
_getReadFd allows you to control the reading directly and getContentLength
tells you how much data is expected to be uploaded.
"""
# The root controller currently serving this request. This is set
# externally by the handler.
rootController = None
# The URL prefix at which the controller appears to be rooted.
baseUrl = None
# The path without any prefix or query arguments.
path = None
# Same as above, but elements are removed from the left after a controller
# is assigned to the request. In other words, this is the unstructured
# remainder of the path.
unparsedPath = None
# The full, (approximately) original URL of this request.
thisUrl = None
# Other interesting attributes
headers = None
method = None
remote = None
# Query arguments
GET = POST = None
def __init__(self, req, pathPrefix=''):
self._req = req
rawBase, rawPath = self._getRawPath()
# Normalize and de-prefix the path
path = rawPath
if path.startswith(pathPrefix):
path = path[len(pathPrefix):]
else:
log.warning("Path %r does not start with specified prefix %r",
path, pathPrefix)
if path.startswith('/'):
path = path[1:]
# Parse and remove query arguments.
self.path, self.GET = self._splitQuery(path)
self.unparsedPath = self.path
self.baseUrl = rawBase + pathPrefix
self.thisUrl = rawBase + rawPath
# Fill out the rest of the attributes (headers, method, remote, etc.)
self._setProperties()
if self.getContentLength():
self.POST = self._getPostData()
else:
self.POST = {}
# If the method was passed in the URL as ?_method=GET, then override
# the request's method
if '_method' in self.GET:
self.method = self.GET.pop('_method')
#log.info("Request:\n" + "\n".join(" %s: %r" % x for x in self.__dict__.items()))
def _setProperties(self):
"Fill out extra attributes from the request."
raise NotImplementedError()
def _getRawPath(self):
"Return the current URL of the request, split into host and path."
raise NotImplementedError()
def _getReadFd(self):
"""
Returns a file descriptor for the socket that contains the current
request.
"""
raise NotImplementedError()
def getContentLength(self):
"""
Returns the expected content length to be read from the current
request.
"""
return int(self.headers.get('content-length') or 0)
def _getPostData(self):
"""
Internal. Reads in the body from the current request and converts
it into a form dictionary, which it returns. Only applies if the
content type for the
request is multipart/form-data or application/x-www-form-urlencoded.
"""
# cgi will read the body when it doesn't recognize the content type
ctypes = set(['multipart/form-data',
'application/x-www-form-urlencoded'])
contentType = self.headers.get('content-type', None)
if contentType not in ctypes:
return {}
fs = cgi.FieldStorage(self._getReadFd(), self.headers,
environ = {'REQUEST_METHOD' : self.method})
d = {}
for key in fs.keys():
d[key] = fs.getvalue(key)
return d
@staticmethod
def _splitQuery(path):
"""
Split off any query arguments (GET) from C{path}. Returns the path sans
query, and a dictionary of the parsed arguments.
"""
path, query = urllib.splitquery(path)
args = {}
if query:
# Force FieldStorage to parse the query string for us. We need to
# manufacture a Content-Type that points cgi to the query instead
# of the body
# We use an rfc822.Message instead of a dictionary because of the
# case-insensitive nature of the headers
headers = cgi.rfc822.Message(cgi.StringIO(
'Content-Type: application/x-www-form-urlencoded'))
fs = cgi.FieldStorage(fp = None,
headers = headers,
environ = { 'REQUEST_METHOD' : 'GET',
'QUERY_STRING' : query})
for key in fs.keys():
args[key] = fs.getvalue(key)
return path, args
def url(self, location, *args, **kw):
"""
Takes a location as described by the url dict entries in the root
controller for the request. Traverse controllers building up the
url that is required to get there. If more parameters are presented
than there are location components, the additional parameters will be
appended on as sub directories.
The final position arg may be a tuple instead of a string, in which
case it will be converted into a querystring.
@param baseUrl: allows a different initial url to be started with.
This may be needed if you want to switch from http to https, for
example. Keyword only.
"""
root = self.rootController
params = list(args)
baseUrl = kw.pop('baseUrl', None)
if baseUrl is None:
baseUrl = self.baseUrl
if baseUrl.endswith('/'):
baseUrl = baseUrl[:-1]
url = [baseUrl]
if location:
# traverse controllers, adding in model parameter
# as needed.
for part in location.split('.'):
if root.modelName:
url.append(_encode(params[0]))
params = params[1:]
url.append(_encode(part))
# update what we consider "root" as we traverse the tree.
root = root.urls[part]
if params:
for param in params:
if isinstance(param, (list, tuple)):
# don't create new entry because we don't want an additional
# / before the ? on the end.
url[-1] += _createQuerystring(param)
else:
url.append(_encode(param))
elif getattr(root, 'modelName', None):
# no model or we're getting the index.
url.append('')
return '/'.join(url)
def _encode(param):
"""
Ensures the parameter is url-safe.
"""
if isinstance(param, unicode):
return urllib.quote(param.encode('utf8'))
return urllib.quote(param)
def _createQuerystring(query_tuples):
"""
Given a list of (k,v) query tuples, will convert them into a query string
to be used in a url.
"""
return "?" + ("&".join( "%s=%s" %
(k, _encode(v)) for (k, v) in query_tuples))
|
{
"content_hash": "661cd12edcdb35cf6eb02f9266b2b54a",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 90,
"avg_line_length": 35.70995670995671,
"alnum_prop": 0.6001939629045945,
"repo_name": "sassoftware/restlib",
"id": "8030653700bab7d2857291c9a9d1d59f8a1b753d",
"size": "8836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restlib/http/request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5778"
},
{
"name": "Python",
"bytes": "69680"
},
{
"name": "Shell",
"bytes": "1963"
}
],
"symlink_target": ""
}
|
import hug
@hug.get('/image.png', output=hug.output_format.png_image)
def image():
return '../logo.png'
|
{
"content_hash": "898473eb70600c636b5bb36eb389559f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.6636363636363637,
"repo_name": "giserh/hug",
"id": "1235809d672da43059f51904d00645aca695ec9e",
"size": "110",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/image_serve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101797"
},
{
"name": "Shell",
"bytes": "1597"
}
],
"symlink_target": ""
}
|
from .base import * # NOQA
import logging.config
# For security and performance reasons, DEBUG is turned off
DEBUG = False
TEMPLATE_DEBUG = False
# Must mention ALLOWED_HOSTS in production!
# ALLOWED_HOSTS = ["mysana.com"]
# Cache the templates in memory for speed-up
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
TEMPLATES[0]['OPTIONS'].update({"loaders": loaders})
TEMPLATES[0].update({"APP_DIRS": False})
# Define STATIC_ROOT for the collectstatic command
STATIC_ROOT = join(BASE_DIR, '..', 'site', 'static')
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(dirname(BASE_DIR), 'logs')
# Reset logging
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(pathname)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'proj_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'project.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'project': {
'handlers': ['proj_log_file'],
'level': 'DEBUG',
},
}
}
logging.config.dictConfig(LOGGING)
|
{
"content_hash": "b74335a368b563fc599dbb7b8bc5d6ba",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 90,
"avg_line_length": 26.6984126984127,
"alnum_prop": 0.5600475624256838,
"repo_name": "triump0870/mysana",
"id": "260c853fdf91d39ad4668acb1b3d754d66652326",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mysana/settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3378"
},
{
"name": "HTML",
"bytes": "27777"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "51120"
},
{
"name": "Shell",
"bytes": "14230"
}
],
"symlink_target": ""
}
|
import sys
import os
import shutil
import logging
import errno
import argparse
def clone(in_path, new_path):
names = os.listdir(in_path)
if not os.path.exists(new_path): #create the path if it doesn't exist
os.makedirs(new_path)
for name in names:
in_path_name = os.path.join(in_path, name)
new_path_name = os.path.join(new_path, name)
if os.path.isdir(in_path_name):
clone(in_path_name, new_path_name)
else:
touch(new_path_name)
def touch(path_file):
try:
# open file in append mode in case it exists, then write an empty string
with open(path_file, "a") as f:
f.write("")
except os.error:
# create the directory, if it does not exist
os.makedirs(os.path.dirname(path_file))
with open(path_file, "a") as f:
f.write("")
def main():
# set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# set up argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='Input directory to clone.')
parser.add_argument('-o', '--output', help='Directory to clone into.')
args = parser.parse_args()
if args.input and args.out:
clone(args.input, args.out)
else:
parser.print_help()
sys.exit(1)
if __name__ == "__main__":
main()
|
{
"content_hash": "aae0fb050379333611b1cd3a72441b3d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 86,
"avg_line_length": 28.615384615384617,
"alnum_prop": 0.5934139784946236,
"repo_name": "samcheck/Scripts",
"id": "2368098b5f1db7b2b01b542ff3dd21a598d4d0ce",
"size": "1600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3/paper_dir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32254"
},
{
"name": "Shell",
"bytes": "6565"
}
],
"symlink_target": ""
}
|
import random
import numpy
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", numpy.ndarray, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=100)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def evalOneMax(individual):
return sum(individual),
def cxTwoPointCopy(ind1, ind2):
"""Execute a two points crossover with copy on the input individuals. The
copy is required because the slicing in numpy returns a view of the data,
which leads to a self overwritting in the swap operation. It prevents
::
>>> import numpy
>>> a = numpy.array((1,2,3,4))
>>> b = numpy.array((5.6.7.8))
>>> a[1:3], b[1:3] = b[1:3], a[1:3]
>>> print(a)
[1 6 7 4]
>>> print(b)
[5 6 7 8]
"""
size = len(ind1)
cxpoint1 = random.randint(1, size)
cxpoint2 = random.randint(1, size - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: # Swap the two cx points
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
ind1[cxpoint1:cxpoint2], ind2[cxpoint1:cxpoint2] \
= ind2[cxpoint1:cxpoint2].copy(), ind1[cxpoint1:cxpoint2].copy()
return ind1, ind2
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", cxTwoPointCopy)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
def main():
random.seed(64)
pop = toolbox.population(n=300)
# Numpy equality function (operators.eq) between two arrays returns the
# equality element wise, which raises an exception in the if similar()
# check of the hall of fame. Using a different equality function like
# numpy.array_equal or numpy.allclose solve this issue.
hof = tools.HallOfFame(1, similar=numpy.array_equal)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=40, stats=stats,
halloffame=hof)
return pop, stats, hof
if __name__ == "__main__":
main()
|
{
"content_hash": "13327bdbf0655140f5f31b8cc16a0e8d",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 94,
"avg_line_length": 31.759493670886076,
"alnum_prop": 0.6612196094061379,
"repo_name": "GrimRanger/GeneticAlgorithm",
"id": "e5dc03e99aa02d03908bddbe273f781f9e9f4f72",
"size": "3203",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "helps/deap/deap-master/examples/ga/onemax_numpy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "48558"
},
{
"name": "C++",
"bytes": "24037"
},
{
"name": "Java",
"bytes": "15591"
},
{
"name": "Makefile",
"bytes": "3143"
},
{
"name": "Python",
"bytes": "622361"
},
{
"name": "R",
"bytes": "1032"
}
],
"symlink_target": ""
}
|
import pytest
import srddl.core.exceptions as sce
import srddl.core.nameddict as scnd
class A(scnd.NamedDict):
@scnd.property(flags=['f1', 'f2'])
def _prop1(self, flags):
if flags['f1'] and flags['f2']: return 0
if flags['f1']: return 1
if flags['f2']: return 2
return 3
class B(A):
def _prop1(self, flags):
pass
class C(A):
@scnd.abstractproperty()
def _prop2(self, flags):
pass
class D(C):
def _prop2(self, flags):
pass
class E(C):
def _prop2(self, flags):
return True
class F(C):
class Meta:
init_props = ['prop2']
def __init__(self, prop2):
self._prop2 = prop2
@pytest.mark.parametrize(('attr', 'val'), [
('prop1', 3),
('prop1:f1', 1),
('prop1:f2', 2),
('prop1:f1,f2', 0),
])
def test_property_get(attr, val):
a = A()
assert(a[attr] == val)
def test_unknown_property():
a = A()
with pytest.raises(KeyError):
a['unknwon_prop']
def test_unknown_propflag():
a = A()
with pytest.raises(sce.NamedDictPropertyFlagsError):
a['prop1:funknown']
def test_simple_override():
b = B()
assert(b['prop1'] is None)
def test_abstract_instanciation():
with pytest.raises(sce.NamedDictAbstractPropertyError):
c = C()
@pytest.mark.parametrize(('klass', 'val'), [
(D, None),
(E, True),
])
def test_abstract_success(klass, val):
i = klass()
assert(i['prop2'] == val)
@pytest.mark.parametrize(('val',), [(1,), (2,), (3,)])
def test_abstract_success_constructor(val):
f = F(val)
assert(f['prop2'] is val)
def test_override_with_flags_error():
with pytest.raises(sce.NamedDictPropertyRedefinitionError):
class Failure(A):
@scnd.property(flags=['oops'])
def _prop1(self, flags):
pass
def test_override_to_abstract_error():
with pytest.raises(sce.NamedDictPropertyRedefinitionError):
class Failure(A):
@scnd.abstractproperty()
def _prop1(self, flags):
pass
def test_override_cant_copy():
with pytest.raises(sce.NamedDictPropertyRedefinitionError):
class Failure(C):
@property
def _prop1(self):
pass
|
{
"content_hash": "645a4e0d599e96d44453d8562844fe48",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 63,
"avg_line_length": 23.122448979591837,
"alnum_prop": 0.5820829655781112,
"repo_name": "fmichea/srddl",
"id": "d3c0f889f5c6ad60dc10402be746e9419dbe5330",
"size": "2266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/test_nameddict.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "79"
},
{
"name": "Makefile",
"bytes": "224"
},
{
"name": "Python",
"bytes": "135307"
}
],
"symlink_target": ""
}
|
import os
import scandir
from pontoon.base.models import Resource
from pontoon.base.utils import extension_in, first
def is_hidden(path):
"""
Return true if path contains hidden directory.
"""
for p in path.split(os.sep):
if p.startswith('.'):
return True
return False
def is_resource(filename):
"""
Return True if the filename's extension is a supported Resource
format.
"""
return extension_in(filename, Resource.ALLOWED_EXTENSIONS)
def is_source_resource(filename):
"""
Return True if the filename's extension is a source-only Resource
format.
"""
return extension_in(filename, Resource.SOURCE_EXTENSIONS)
def is_asymmetric_resource(filename):
"""
Return True if the filename's extension is an asymmetric Resource
format.
"""
return extension_in(filename, Resource.ASYMMETRIC_FORMATS)
def get_parent_directory(path):
"""
Get parent directory of the path
"""
return os.path.abspath(os.path.join(path, os.pardir))
def uses_undercore_as_separator(directory):
"""
Return True if any subdirectory contains underscore.
"""
subdirs = os.listdir(directory)
return ''.join(subdirs).count('_') > ''.join(subdirs).count('-')
def directory_contains_resources(directory_path, source_only=False):
"""
Return True if the given directory contains at least one
supported resource file (checked via file extension), or False
otherwise.
:param source_only:
If True, only check for source-only formats.
"""
resource_check = is_source_resource if source_only else is_resource
for root, dirnames, filenames in scandir.walk(directory_path):
# first() avoids checking past the first matching resource.
if first(filenames, resource_check) is not None:
return True
return False
def locale_directory_path(checkout_path, locale_code, parent_directories=None):
"""
Path to the directory where strings for the given locale are
stored.
"""
possible_paths = []
# Check paths that use underscore as locale/country code separator
locale_code_variants = [locale_code, locale_code.replace('-', '_')]
# Optimization for directories with a lot of paths: if parent_directories
# is provided, we simply join it with locale_code and check if path exists
for parent_directory in parent_directories:
for locale in locale_code_variants:
candidate = os.path.join(parent_directory, locale)
if os.path.exists(candidate):
possible_paths.append(candidate)
if not possible_paths:
for root, dirnames, filenames in scandir.walk(checkout_path):
for locale in locale_code_variants:
if locale in dirnames:
possible_paths.append(os.path.join(root, locale))
for possible_path in possible_paths:
if directory_contains_resources(possible_path):
return possible_path
# If locale directory empty (asymmetric formats)
if possible_paths:
return possible_paths[0]
raise IOError('Directory for locale `{0}` not found'.format(
locale_code or 'source'))
def locale_to_source_path(path):
"""
Return source resource path for the given locale resource path.
Source files for .po files are actually .pot.
"""
if path.endswith('po'):
path += 't'
return path
def source_to_locale_path(path):
"""
Return locale resource path for the given source resource path.
Locale files for .pot files are actually .po.
"""
if path.endswith('pot'):
path = path[:-1]
return path
|
{
"content_hash": "db9790901522aa8ea05bc2bab4b418e9",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 29.165354330708663,
"alnum_prop": 0.6630669546436285,
"repo_name": "mastizada/pontoon",
"id": "d175b4239dd51caa0cb2bcee9e0760e03e64dc23",
"size": "3704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pontoon/sync/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "116831"
},
{
"name": "HTML",
"bytes": "131060"
},
{
"name": "JavaScript",
"bytes": "472460"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "841704"
},
{
"name": "Shell",
"bytes": "4616"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0014_remove_customer_type'),
]
operations = [
migrations.AddField(
model_name='servicesettings',
name='error_traceback',
field=models.TextField(blank=True),
),
]
|
{
"content_hash": "de75a9e2eac487792fe5f1621c2cf20a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 22.125,
"alnum_prop": 0.5903954802259888,
"repo_name": "opennode/nodeconductor-assembly-waldur",
"id": "c3ce78b2a049fc5dfa09e5d871e35dcf41345b1c",
"size": "404",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_core/structure/migrations/0015_servicesettings_error_traceback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1624"
},
{
"name": "Python",
"bytes": "412263"
},
{
"name": "Shell",
"bytes": "2031"
}
],
"symlink_target": ""
}
|
"""Support for deCONZ lights."""
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_HS_COLOR,
ATTR_TRANSITION, EFFECT_COLORLOOP, FLASH_LONG, FLASH_SHORT,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT,
SUPPORT_FLASH, SUPPORT_TRANSITION, Light)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from .const import COVER_TYPES, NEW_GROUP, NEW_LIGHT, SWITCH_TYPES
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up deCONZ lights and group."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ lights and groups from a config entry."""
gateway = get_gateway_from_config_entry(hass, config_entry)
@callback
def async_add_light(lights):
"""Add light from deCONZ."""
entities = []
for light in lights:
if light.type not in COVER_TYPES + SWITCH_TYPES:
entities.append(DeconzLight(light, gateway))
async_add_entities(entities, True)
gateway.listeners.append(async_dispatcher_connect(
hass, gateway.async_event_new_device(NEW_LIGHT), async_add_light))
@callback
def async_add_group(groups):
"""Add group from deCONZ."""
entities = []
for group in groups:
if group.lights and gateway.allow_deconz_groups:
entities.append(DeconzLight(group, gateway))
async_add_entities(entities, True)
gateway.listeners.append(async_dispatcher_connect(
hass, gateway.async_event_new_device(NEW_GROUP), async_add_group))
async_add_light(gateway.api.lights.values())
async_add_group(gateway.api.groups.values())
class DeconzLight(DeconzDevice, Light):
"""Representation of a deCONZ light."""
def __init__(self, device, gateway):
"""Set up light and add update callback to get data from websocket."""
super().__init__(device, gateway)
self._features = SUPPORT_BRIGHTNESS
self._features |= SUPPORT_FLASH
self._features |= SUPPORT_TRANSITION
if self._device.ct is not None:
self._features |= SUPPORT_COLOR_TEMP
if self._device.xy is not None:
self._features |= SUPPORT_COLOR
if self._device.effect is not None:
self._features |= SUPPORT_EFFECT
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._device.brightness
@property
def effect_list(self):
"""Return the list of supported effects."""
return [EFFECT_COLORLOOP]
@property
def color_temp(self):
"""Return the CT color value."""
if self._device.colormode != 'ct':
return None
return self._device.ct
@property
def hs_color(self):
"""Return the hs color value."""
if self._device.colormode in ('xy', 'hs') and self._device.xy:
return color_util.color_xy_to_hs(*self._device.xy)
return None
@property
def is_on(self):
"""Return true if light is on."""
return self._device.state
@property
def supported_features(self):
"""Flag supported features."""
return self._features
async def async_turn_on(self, **kwargs):
"""Turn on light."""
data = {'on': True}
if ATTR_COLOR_TEMP in kwargs:
data['ct'] = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
data['xy'] = color_util.color_hs_to_xy(*kwargs[ATTR_HS_COLOR])
if ATTR_BRIGHTNESS in kwargs:
data['bri'] = kwargs[ATTR_BRIGHTNESS]
if ATTR_TRANSITION in kwargs:
data['transitiontime'] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_SHORT:
data['alert'] = 'select'
del data['on']
elif kwargs[ATTR_FLASH] == FLASH_LONG:
data['alert'] = 'lselect'
del data['on']
if ATTR_EFFECT in kwargs:
if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP:
data['effect'] = 'colorloop'
else:
data['effect'] = 'none'
await self._device.async_set_state(data)
async def async_turn_off(self, **kwargs):
"""Turn off light."""
data = {'on': False}
if ATTR_TRANSITION in kwargs:
data['bri'] = 0
data['transitiontime'] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_SHORT:
data['alert'] = 'select'
del data['on']
elif kwargs[ATTR_FLASH] == FLASH_LONG:
data['alert'] = 'lselect'
del data['on']
await self._device.async_set_state(data)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
attributes['is_deconz_group'] = self._device.type == 'LightGroup'
if self._device.type == 'LightGroup':
attributes['all_on'] = self._device.all_on
return attributes
|
{
"content_hash": "82eec17a3274f4a03f8c0844bf160e8d",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 78,
"avg_line_length": 32.241176470588236,
"alnum_prop": 0.6042692939244664,
"repo_name": "auduny/home-assistant",
"id": "7514162fefad44eb0ec1c3d943e273849c9bcb87",
"size": "5481",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/deconz/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15129018"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
import argparse
import fileinput
import re
import sys
#
# This script gets or updates version number. Version number
# is in AssemblyInfo.cs with the following format:
# <major version>.<minor version>.<build number>.<revision>
#
# Example:
# [assembly: AssemblyVersion("0.5.3.0")]
# [assembly: AssemblyFileVersion("0.5.3.0")]
#
def usage(parser) :
parser.print_help()
sys.exit(1);
def main( ) :
VERSION_FILE = 'src\CollectdWinService\Properties\AssemblyInfo.cs'
VERSION_FORMAT = "{0}.{1}.{2}.{3}"
VERSION_PATTERN = '^\[assembly: AssemblyVersion\(\"(\d+).(\d+).(\d+).(\d+)\"\)\]'
REPLACE_PATTERN = r"(^\[assembly: Assembly.*Version\(\").*(\"\)\])"
REPLACE_FORMAT = r"\g<1>{0}\g<2>"
parser = argparse.ArgumentParser()
parser.add_argument("--command", help="get|update")
parser.add_argument("--part", help="major|minor|build|revision")
args = parser.parse_args()
vfile = open(VERSION_FILE)
for line in vfile:
m = re.match(VERSION_PATTERN, line)
if m:
cmajor = int(m.group(1))
cminor = int(m.group(2))
cbuild = int(m.group(3))
crevision = int(m.group(4))
cversion = VERSION_FORMAT.format(cmajor, cminor, cbuild, crevision)
vfile.close()
if args.command == "get" :
print(cversion)
sys.exit(0)
elif args.command != "update" :
print("\nError: Missing or bad COMMAND\n")
usage(parser)
if args.part == "major" :
nmajor = cmajor + 1
nminor = 0
nbuild = 0
nrevision = 0
elif args.part == "minor" :
nmajor = cmajor
nminor = cminor + 1
nbuild = 0
nrevision = 0
elif args.part == "build" :
nmajor = cmajor
nminor = cminor
nbuild = cbuild + 1
nrevision = 0
elif args.part == "revision" :
nmajor = cmajor
nminor = cminor
nbuild = cbuild
nrevision = crevision + 1
else :
print("\nError: Missing or bad PART\n")
usage(parser)
nversion = VERSION_FORMAT.format(nmajor, nminor, nbuild, nrevision)
for line in fileinput.input(files=[VERSION_FILE], inplace=1, backup='.bak'):
line = re.sub(REPLACE_PATTERN, REPLACE_FORMAT.format(nversion), line.rstrip())
print(line)
if __name__ == "__main__":
main( )
#-----------------------------------------------------------------------------
# Copyright (C) 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#------------------------------ END-OF-FILE ----------------------------------
|
{
"content_hash": "1fc269a7d06a03ccbbef9d882d3853d6",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 86,
"avg_line_length": 31.897959183673468,
"alnum_prop": 0.5844529750479847,
"repo_name": "bloomberg/collectdwin",
"id": "d8df99362236075b70fe08f837686ee04ef3820e",
"size": "3126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "138718"
}
],
"symlink_target": ""
}
|
import os
DESCRIPTION = "scicast: Single Cell Iterative Clustering and Statistical Testing. A package for interrogating single cell sequencing data."
LONG_DESCRIPTION = """\
scicast is a python utility that automates many of the repetitive steps of analyzing single cell sequencing data.
-k-means clustering to identify clusters
-Clustering and subclustering of data to identify 'stable' sets of cells.
-Statistical testing to identify top genes that indentify stable cluster.
-Correlation search and analysis to identify gene networks driving cluster identity.
-Outputs both plots for visualization (PCA and heatmap) cell and gene lists that can be used to refine analysis.
"""
DISTNAME = 'scicast'
MAINTAINER = 'Ian Driver'
MAINTAINER_EMAIL = 'ian.driver@ucsf.edu'
URL = 'https://github.com/iandriver/scicast'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/iandriver/scicast'
VERSION = '0.8.27'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import sklearn
except ImportError:
install_requires.append('scikit-learn')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas>=0.19.0')
try:
import seaborn
except ImportError:
install_requires.append('seaborn>=0.7.1')
try:
import rpy2
except ImportError:
install_requires.append('rpy2')
try:
import fastcluster
except ImportError:
install_requires.append('fastcluster')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
py_modules=['scicast.cluster', 'scicast.matrix_filter'],
entry_points={
'console_scripts': ['scicast = scicast.cluster:main']
},
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['scicast'],
keywords='single-cell single cell RNA-seq sequencing clustering PCA k-means',
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS']
)
|
{
"content_hash": "3752e96c90d4a118763f5026ae5f85e2",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 139,
"avg_line_length": 32.373831775700936,
"alnum_prop": 0.6394341801385681,
"repo_name": "iandriver/SCICAST",
"id": "719e1b91a7e5479a9c87c45ac0abc831f246ed0b",
"size": "3543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110030"
},
{
"name": "R",
"bytes": "1653"
}
],
"symlink_target": ""
}
|
import better_exceptions
better_exceptions.hook()
better_exceptions.MAX_LENGTH = None
def div():
var = "9" * 150
return 1 / var
div()
|
{
"content_hash": "15d1ab2d4e5997cfea9c9dc6b875e9e7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 35,
"avg_line_length": 14.272727272727273,
"alnum_prop": 0.6178343949044586,
"repo_name": "Delgan/better-exceptions",
"id": "4d193b85a894ba865f5f9db10b3aa72061930479",
"size": "181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_truncating_disabled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27642"
},
{
"name": "Shell",
"bytes": "3913"
}
],
"symlink_target": ""
}
|
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class Firewall(neutron.NeutronResource):
"""A resource for the Firewall resource in Neutron FWaaS.
Resource for using the Neutron firewall implementation. Firewall is a
network security system that monitors and controls the incoming and
outgoing network traffic based on predetermined security rules.
"""
required_service_extension = 'fwaas'
entity = 'firewall'
PROPERTIES = (
NAME, DESCRIPTION, ADMIN_STATE_UP, FIREWALL_POLICY_ID,
VALUE_SPECS, SHARED,
) = (
'name', 'description', 'admin_state_up', 'firewall_policy_id',
'value_specs', 'shared',
)
ATTRIBUTES = (
NAME_ATTR, DESCRIPTION_ATTR, ADMIN_STATE_UP_ATTR,
FIREWALL_POLICY_ID_ATTR, SHARED_ATTR, STATUS, TENANT_ID,
) = (
'name', 'description', 'admin_state_up',
'firewall_policy_id', 'shared', 'status', 'tenant_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the firewall.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the firewall.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('Administrative state of the firewall. If false (down), '
'firewall does not forward packets and will drop all '
'traffic to/from VMs behind the firewall.'),
default=True,
update_allowed=True
),
FIREWALL_POLICY_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the firewall policy that this firewall is '
'associated with.'),
required=True,
update_allowed=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the request. Parameters '
'are often specific to installed hardware or extensions.'),
support_status=support.SupportStatus(version='5.0.0'),
default={},
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this firewall should be shared across all tenants. '
'NOTE: The default policy setting in Neutron restricts usage '
'of this property to administrative users only.'),
update_allowed=True,
support_status=support.SupportStatus(
status=support.UNSUPPORTED,
message=_('There is no such option during 5.0.0, so need to '
'make this property unsupported while it not used.'),
version='6.0.0',
previous_status=support.SupportStatus(version='2015.1')
)
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name for the firewall.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('Description of the firewall.'),
type=attributes.Schema.STRING
),
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of the firewall.'),
type=attributes.Schema.STRING
),
FIREWALL_POLICY_ID_ATTR: attributes.Schema(
_('Unique identifier of the firewall policy used to create '
'the firewall.'),
type=attributes.Schema.STRING
),
SHARED_ATTR: attributes.Schema(
_('Shared status of this firewall.'),
support_status=support.SupportStatus(
status=support.UNSUPPORTED,
message=_('There is no such option during 5.0.0, so need to '
'make this attribute unsupported, otherwise error '
'will raised.'),
version='6.0.0'
),
type=attributes.Schema.STRING
),
STATUS: attributes.Schema(
_('The status of the firewall.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('Id of the tenant owning the firewall.'),
type=attributes.Schema.STRING
),
}
def check_create_complete(self, data):
attributes = self._show_resource()
status = attributes['status']
if status == 'PENDING_CREATE':
return False
elif status == 'ACTIVE' or status == 'INACTIVE':
return True
elif status == 'ERROR':
raise exception.ResourceInError(
resource_status=status,
status_reason=_('Error in Firewall'))
else:
raise exception.ResourceUnknownStatus(
resource_status=status,
result=_('Firewall creation failed'))
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
firewall = self.client().create_firewall({'firewall': props})[
'firewall']
self.resource_id_set(firewall['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
self.client().update_firewall(
self.resource_id, {'firewall': prop_diff})
def handle_delete(self):
try:
self.client().delete_firewall(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def _resolve_attribute(self, name):
if name == self.SHARED_ATTR:
return ('This attribute is currently unsupported in neutron '
'firewall resource.')
return super(Firewall, self)._resolve_attribute(name)
def parse_live_resource_data(self, resource_properties, resource_data):
result = super(Firewall, self).parse_live_resource_data(
resource_properties, resource_data)
if self.SHARED in result:
result.pop(self.SHARED)
return result
class FirewallPolicy(neutron.NeutronResource):
"""A resource for the FirewallPolicy resource in Neutron FWaaS.
FirewallPolicy resource is an ordered collection of firewall rules. A
firewall policy can be shared across tenants.
"""
required_service_extension = 'fwaas'
entity = 'firewall_policy'
PROPERTIES = (
NAME, DESCRIPTION, SHARED, AUDITED, FIREWALL_RULES,
) = (
'name', 'description', 'shared', 'audited', 'firewall_rules',
)
ATTRIBUTES = (
NAME_ATTR, DESCRIPTION_ATTR, FIREWALL_RULES_ATTR, SHARED_ATTR,
AUDITED_ATTR, TENANT_ID,
) = (
'name', 'description', 'firewall_rules', 'shared',
'audited', 'tenant_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the firewall policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the firewall policy.'),
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this policy should be shared across all tenants.'),
default=False,
update_allowed=True
),
AUDITED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this policy should be audited. When set to True, '
'each time the firewall policy or the associated firewall '
'rules are changed, this attribute will be set to False and '
'will have to be explicitly set to True through an update '
'operation.'),
default=False,
update_allowed=True
),
FIREWALL_RULES: properties.Schema(
properties.Schema.LIST,
_('An ordered list of firewall rules to apply to the firewall. '
'(Prior to version 14.0.0 this was a required property).'),
update_allowed=True
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name for the firewall policy.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('Description of the firewall policy.'),
type=attributes.Schema.STRING
),
FIREWALL_RULES_ATTR: attributes.Schema(
_('List of firewall rules in this firewall policy.'),
type=attributes.Schema.LIST
),
SHARED_ATTR: attributes.Schema(
_('Shared status of this firewall policy.'),
type=attributes.Schema.STRING
),
AUDITED_ATTR: attributes.Schema(
_('Audit status of this firewall policy.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('Id of the tenant owning the firewall policy.'),
type=attributes.Schema.STRING
),
}
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
firewall_policy = self.client().create_firewall_policy(
{'firewall_policy': props})['firewall_policy']
self.resource_id_set(firewall_policy['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.client().update_firewall_policy(
self.resource_id, {'firewall_policy': prop_diff})
def handle_delete(self):
try:
self.client().delete_firewall_policy(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class FirewallRule(neutron.NeutronResource):
"""A resource for the FirewallRule resource in Neutron FWaaS.
FirewallRule represents a collection of attributes like ports,
ip addresses etc. which define match criteria and action (allow, or deny)
that needs to be taken on the matched data traffic.
"""
required_service_extension = 'fwaas'
entity = 'firewall_rule'
PROPERTIES = (
NAME, DESCRIPTION, SHARED, PROTOCOL, IP_VERSION,
SOURCE_IP_ADDRESS, DESTINATION_IP_ADDRESS, SOURCE_PORT,
DESTINATION_PORT, ACTION, ENABLED,
) = (
'name', 'description', 'shared', 'protocol', 'ip_version',
'source_ip_address', 'destination_ip_address', 'source_port',
'destination_port', 'action', 'enabled',
)
ATTRIBUTES = (
NAME_ATTR, DESCRIPTION_ATTR, FIREWALL_POLICY_ID, SHARED_ATTR,
PROTOCOL_ATTR, IP_VERSION_ATTR, SOURCE_IP_ADDRESS_ATTR,
DESTINATION_IP_ADDRESS_ATTR, SOURCE_PORT_ATTR, DESTINATION_PORT_ATTR,
ACTION_ATTR, ENABLED_ATTR, POSITION, TENANT_ID,
) = (
'name', 'description', 'firewall_policy_id', 'shared',
'protocol', 'ip_version', 'source_ip_address',
'destination_ip_address', 'source_port', 'destination_port',
'action', 'enabled', 'position', 'tenant_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the firewall rule.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the firewall rule.'),
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this rule should be shared across all tenants.'),
default=False,
update_allowed=True
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol for the firewall rule.'),
constraints=[
constraints.AllowedValues(['tcp', 'udp', 'icmp', 'any']),
],
default='any',
update_allowed=True,
),
IP_VERSION: properties.Schema(
properties.Schema.STRING,
_('Internet protocol version.'),
default='4',
constraints=[
constraints.AllowedValues(['4', '6']),
],
update_allowed=True
),
SOURCE_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('Source IP address or CIDR.'),
update_allowed=True,
constraints=[
constraints.CustomConstraint('net_cidr')
]
),
DESTINATION_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('Destination IP address or CIDR.'),
update_allowed=True,
constraints=[
constraints.CustomConstraint('net_cidr')
]
),
SOURCE_PORT: properties.Schema(
properties.Schema.STRING,
_('Source port number or a range.'),
update_allowed=True
),
DESTINATION_PORT: properties.Schema(
properties.Schema.STRING,
_('Destination port number or a range.'),
update_allowed=True
),
ACTION: properties.Schema(
properties.Schema.STRING,
_('Action to be performed on the traffic matching the rule.'),
default='deny',
constraints=[
constraints.AllowedValues(['allow', 'deny']),
],
update_allowed=True
),
ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this rule should be enabled.'),
default=True,
update_allowed=True
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name for the firewall rule.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('Description of the firewall rule.'),
type=attributes.Schema.STRING
),
FIREWALL_POLICY_ID: attributes.Schema(
_('Unique identifier of the firewall policy to which this '
'firewall rule belongs.'),
type=attributes.Schema.STRING
),
SHARED_ATTR: attributes.Schema(
_('Shared status of this firewall rule.'),
type=attributes.Schema.STRING
),
PROTOCOL_ATTR: attributes.Schema(
_('Protocol value for this firewall rule.'),
type=attributes.Schema.STRING
),
IP_VERSION_ATTR: attributes.Schema(
_('Ip_version for this firewall rule.'),
type=attributes.Schema.STRING
),
SOURCE_IP_ADDRESS_ATTR: attributes.Schema(
_('Source ip_address for this firewall rule.'),
type=attributes.Schema.STRING
),
DESTINATION_IP_ADDRESS_ATTR: attributes.Schema(
_('Destination ip_address for this firewall rule.'),
type=attributes.Schema.STRING
),
SOURCE_PORT_ATTR: attributes.Schema(
_('Source port range for this firewall rule.'),
type=attributes.Schema.STRING
),
DESTINATION_PORT_ATTR: attributes.Schema(
_('Destination port range for this firewall rule.'),
type=attributes.Schema.STRING
),
ACTION_ATTR: attributes.Schema(
_('Allow or deny action for this firewall rule.'),
type=attributes.Schema.STRING
),
ENABLED_ATTR: attributes.Schema(
_('Indicates whether this firewall rule is enabled or not.'),
type=attributes.Schema.STRING
),
POSITION: attributes.Schema(
_('Position of the rule within the firewall policy.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('Id of the tenant owning the firewall.'),
type=attributes.Schema.STRING
),
}
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
if props.get(self.PROTOCOL) == 'any':
props[self.PROTOCOL] = None
firewall_rule = self.client().create_firewall_rule(
{'firewall_rule': props})['firewall_rule']
self.resource_id_set(firewall_rule['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
if prop_diff.get(self.PROTOCOL) == 'any':
prop_diff[self.PROTOCOL] = None
self.client().update_firewall_rule(
self.resource_id, {'firewall_rule': prop_diff})
def handle_delete(self):
try:
self.client().delete_firewall_rule(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def resource_mapping():
return {
'OS::Neutron::Firewall': Firewall,
'OS::Neutron::FirewallPolicy': FirewallPolicy,
'OS::Neutron::FirewallRule': FirewallRule,
}
|
{
"content_hash": "b9dae3ca34e0d86ab72413a4e30a24a9",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 79,
"avg_line_length": 35.85947046843177,
"alnum_prop": 0.5734083035156472,
"repo_name": "openstack/heat",
"id": "93c865e76f5fba820749e728f1ba1aa7729ff5ab",
"size": "18182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/neutron/firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf.urls import url
from django.template.response import TemplateResponse
from django.views.generic.base import TemplateView
import advanced_reports
from advanced_reports.backoffice.base import BackOfficeBase
from advanced_reports.backoffice.contrib.views import AdvancedReportView, AdvancedReportActionView
from advanced_reports.backoffice.examples.backoffice import UserModel, UserView
from advanced_reports.backoffice.examples.reports import NoModelReport, UserReport, NewStyleReport, TodoListReport
from advanced_reports.backoffice.examples.views import SimpleView
from advreport_examples.views import ExampleIncludePythonView, ExampleIncludeTemplateView
from oemfoe_todos_app.backoffice.definitions import TodoListModel, TodoItemModel, TodoListsView
class TodosBackoffice(BackOfficeBase):
title = 'Oemfoe Todo List Administration'
model_template = 'advreport_examples/page-base.html'
def define_urls(self):
return (
url(r'^users/$',
self.decorate(TemplateView.as_view(template_name='advreport_examples/users.html')),
name='users'),
url(r'^examples/$',
self.decorate(TemplateView.as_view(template_name='advreport_examples/examples.html')),
name='examples'),
)
def page(self, request):
return TemplateResponse(request, 'advanced_reports/backoffice/tests/page.html', {'backoffice': self})
todos_backoffice = TodosBackoffice(name='todos')
todos_backoffice.register_model(UserModel)
todos_backoffice.register_view(UserView)
todos_backoffice.register_view(SimpleView)
todos_backoffice.register_model(TodoListModel)
todos_backoffice.register_model(TodoItemModel)
todos_backoffice.register_view(TodoListsView)
todos_backoffice.register_view(AdvancedReportView)
todos_backoffice.register_view(AdvancedReportActionView)
todos_backoffice.register_view(ExampleIncludeTemplateView)
todos_backoffice.register_view(ExampleIncludePythonView)
advanced_reports.register(NoModelReport)
advanced_reports.register(UserReport)
advanced_reports.register(NewStyleReport)
advanced_reports.register(TodoListReport)
|
{
"content_hash": "1c4406e6dea06f20fc4ebf10a3cf1d17",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 114,
"avg_line_length": 41.264150943396224,
"alnum_prop": 0.7924096936442615,
"repo_name": "vikingco/django-advanced-reports",
"id": "338f0bb7c0b38ff6e87458a62742ebbe4ec33b27",
"size": "2187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "advreport_test_project/advreport_test_project/backoffice_definitions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "59279"
},
{
"name": "HTML",
"bytes": "94949"
},
{
"name": "JavaScript",
"bytes": "104583"
},
{
"name": "Python",
"bytes": "208954"
},
{
"name": "Ruby",
"bytes": "868"
}
],
"symlink_target": ""
}
|
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import functools
import itertools
import warnings
import weakref
import contextlib
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core import overrides
from numpy.core.multiarray import packbits, unpackbits
from numpy.core.overrides import set_module
from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _decode_line
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
if sys.version_info[0] >= 3:
from collections.abc import Mapping
else:
from future_builtins import map
from collections import Mapping
@set_module('numpy')
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
"np.loads is deprecated, use pickle.loads instead",
DeprecationWarning, stacklevel=2)
return pickle.loads(*args, **kwargs)
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return list(object.__getattribute__(self, '_obj').keys())
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(Mapping):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> sorted(npz.files)
['x', 'y']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
# Implement the Mapping ABC
def __iter__(self):
return iter(self.files)
def __len__(self):
return len(self.files)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = False
if key in self._files:
member = True
elif key in self.files:
member = True
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
if sys.version_info.major == 3:
# deprecate the python 2 dict apis that we supported by accident in
# python 3. We forgot to implement itervalues() at all in earlier
# versions of numpy, so no need to deprecated it here.
def iteritems(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iteritems is deprecated in python 3, to match the "
"removal of dict.itertems. Use .items() instead.",
DeprecationWarning, stacklevel=2)
return self.items()
def iterkeys(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iterkeys is deprecated in python 3, to match the "
"removal of dict.iterkeys. Use .keys() instead.",
DeprecationWarning, stacklevel=2)
return self.keys()
@set_module('numpy')
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
.. warning:: Loading files that contain object arrays uses the ``pickle``
module, which is not secure against erroneous or maliciously
constructed data. Consider passing ``allow_pickle=False`` to
load data that is known not to contain object arrays for the
safer handling of untrusted sources.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files in Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
# TODO: Use contextlib.ExitStack once we drop Python 2
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = open(os_fspath(file), "rb")
own_fid = True
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
own_fid = False
return ret
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("Cannot load file containing pickled data "
"when allow_pickle=False")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
return (arr,)
@array_function_dispatch(_save_dispatcher)
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if hasattr(file, 'read'):
fid = file
else:
file = os_fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def _savez_dispatcher(file, *args, **kwds):
for a in args:
yield a
for v in kwds.values():
yield v
@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_0', 'arr_1']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> sorted(npzfile.files)
['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def _savez_compressed_dispatcher(file, *args, **kwds):
for a in args:
yield a
for v in kwds.values():
yield v
@array_function_dispatch(_savez_compressed_dispatcher)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
numpy.save : Save a single array to a binary file in NumPy format.
numpy.savetxt : Save an array to a file as plain text.
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
in ``.npy`` format. For a description of the ``.npy`` format, see
:py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> test_array = np.random.rand(3, 2)
>>> test_vector = np.random.rand(4)
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
>>> loaded = np.load('/tmp/123.npz')
>>> print(np.array_equal(test_array, loaded['a']))
True
>>> print(np.array_equal(test_vector, loaded['b']))
True
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
if not hasattr(file, 'read'):
file = os_fspath(file)
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
if sys.version_info >= (3, 6):
# Since Python 3.6 it is possible to write directly to a ZIP file.
for key, val in namedict.items():
fname = key + '.npy'
val = np.asanyarray(val)
force_zip64 = val.nbytes >= 2**30
with zipf.open(fname, 'w', force_zip64=force_zip64) as fid:
format.write_array(fid, val,
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Stage arrays in a temporary file on disk, before writing to zip.
# Import deferred for startup time improvement
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if '0x' in x:
return float.fromhex(x)
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
return asunicode
else:
return asstr
# amount of lines loadtxt reads in one chunk, can be overridden for testing
_loadtxt_chunksize = 50000
@set_module('numpy')
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
comment. None implies no comments. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will parse the
column string into the desired value. E.g., if column 0 is a date
string: ``converters = {0: datestr2num}``. Converters can also be
used to provide a default value for missing data (but see also
`genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
Skip the first `skiprows` lines, including comments; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionchanged:: 1.11.0
When a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
The special value 'bytes' enables backward compatibility workarounds
that ensures you receive byte arrays as results if possible and passes
'latin1' encoded strings to converters. Override this value to receive
unicode arrays and pass strings as input to converters. If set to None
the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
max_rows : int, optional
Read `max_rows` lines of content after `skiprows` lines. The default
is to read all the lines.
.. versionadded:: 1.16.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[0., 1.],
[2., 3.]])
>>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([(b'M', 21, 72.), (b'F', 35, 58.)],
dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([1., 3.])
>>> y
array([2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [comments]
comments = [_decode_line(x) for x in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile('|'.join(comments))
if delimiter is not None:
delimiter = _decode_line(delimiter)
user_converters = converters
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
fh = iter(fh)
fown = True
else:
fh = iter(fname)
fencoding = getattr(fname, 'encoding', 'latin1')
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
# input may be a python2 io stream
if encoding is not None:
fencoding = encoding
# we must assume local encoding
# TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
# not to be confused with the flatten_dtype we import...
@recursive
def flatten_dtype_internal(self, dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = self(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
@recursive
def pack_items(self, items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(self(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter. """
line = _decode_line(line, encoding=encoding)
if comments is not None:
line = regex_comments.split(line, maxsplit=1)[0]
line = line.strip('\r\n')
if line:
return line.split(delimiter)
else:
return []
def read_data(chunk_size):
"""Parse each line, including the first.
The file read, `fh`, is a global defined above.
Parameters
----------
chunk_size : int
At most `chunk_size` lines are read at a time, with iteration
until all lines are read.
"""
X = []
line_iter = itertools.chain([first_line], fh)
line_iter = itertools.islice(line_iter, max_rows)
for i, line in enumerate(line_iter):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
if len(X) > chunk_size:
yield X
X = []
if X:
yield X
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype_internal(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
if byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
converters[i] = functools.partial(tobytes_first, conv=conv)
else:
converters[i] = conv
converters = [conv if conv is not bytes else
lambda x: x.encode(fencoding) for conv in converters]
# read data in chunks and fill it into an array via resize
# over-allocating and shrinking the array later may be faster but is
# probably not relevant compared to the cost of actually reading and
# converting the data
X = None
for x in read_data(_loadtxt_chunksize):
if X is None:
X = np.array(x, dtype)
else:
nshape = list(X.shape)
pos = nshape[0]
nshape[0] += len(x)
X.resize(nshape, refcheck=False)
X[pos:, ...] = x
finally:
if fown:
fh.close()
if X is None:
X = np.array([], dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
header=None, footer=None, comments=None,
encoding=None):
return (X,)
@array_function_dispatch(_savetxt_dispatcher)
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
* a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
* a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
* a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
encoding : {None, str}, optional
Encoding used to encode the outputfile. Does not apply to output
streams. If the encoding is something other than 'bytes' or 'latin1'
you will not be able to load the file in NumPy versions < 1.14. Default
is 'latin1'.
.. versionadded:: 1.14.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<https://docs.python.org/library/string.html#format-specification-mini-language>`_,
Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
class WriteWrap(object):
"""Convert to unicode in py2 or to bytes on bytestream inputs.
"""
def __init__(self, fh, encoding):
self.fh = fh
self.encoding = encoding
self.do_write = self.first_write
def close(self):
self.fh.close()
def write(self, v):
self.do_write(v)
def write_bytes(self, v):
if isinstance(v, bytes):
self.fh.write(v)
else:
self.fh.write(v.encode(self.encoding))
def write_normal(self, v):
self.fh.write(asunicode(v))
def first_write(self, v):
try:
self.write_normal(v)
self.write = self.write_normal
except TypeError:
# input is probably a bytestream
self.write_bytes(v)
self.write = self.write_bytes
own_fh = False
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
# datasource doesn't support creating a new file ...
open(fname, 'wt').close()
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
own_fh = True
# need to convert str to unicode for text io output
if sys.version_info[0] == 2:
fh = WriteWrap(fh, encoding or 'latin1')
elif hasattr(fname, 'write'):
# wrap to handle byte output streams
fh = WriteWrap(fname, encoding or 'latin1')
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 0 or X.ndim > 2:
raise ValueError(
"Expected 1D or 2D array, got %dD array instead" % X.ndim)
elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.names)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, basestring):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(comments + header + newline)
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
s = format % tuple(row2) + newline
fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
v = format % tuple(row) + newline
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
fh.write(v)
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(comments + footer + newline)
finally:
if own_fh:
fh.close()
@set_module('numpy')
def fromregex(file, regexp, dtype, encoding=None):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
.. versionadded:: 1.14.0
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> _ = f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
dtype=[('num', '<i8'), ('key', 'S3')])
>>> output['num']
array([1312, 1534, 444])
"""
own_fh = False
if not hasattr(file, "read"):
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
try:
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
content = file.read()
if isinstance(content, bytes) and isinstance(regexp, np.unicode):
regexp = asbytes(regexp)
elif isinstance(content, np.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
seq = regexp.findall(content)
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None,
deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first line after
the first `skip_header` lines. This line can optionally be proceeded
by a comment delimiter. If `names` is a sequence or a single-string of
comma-separated names, the names will be used to define the field names
in a structured dtype. If `names` is None, the names of the dtype
fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply when `fname` is
a file object. The special value 'bytes' enables backward compatibility
workarounds that ensure that you receive byte arrays when possible
and passes latin1 encoded strings to converters. Override this value to
receive unicode arrays and pass strings as input to converters. If set
to None the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Using dtype = None
>>> _ = s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Specifying dtype and names
>>> _ = s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
An example with fixed-width columns
>>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, b'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if isinstance(fname, basestring):
fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fid_ctx = contextlib.closing(fid)
else:
fid = fname
fid_ctx = contextlib_nullcontext(fid)
fhd = iter(fid)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
with fid_ctx:
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip, encoding=encoding)
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = _decode_line(next(fhd), encoding)
if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = ''
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if comments is not None:
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([str(_.strip()) for _ in first_values])
first_line = ''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
if isinstance(user_missing_values, bytes):
user_missing_values = user_missing_values.decode('latin1')
# Define the list of missing_values (one column: one list)
missing_values = [list(['']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, basestring):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
if conv is bytes:
user_conv = asbytes
elif byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
user_conv = functools.partial(tobytes_first, conv=conv)
else:
user_conv = conv
converters[i].update(user_conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, user_conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
# miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v == np.unicode_]
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
warnings.warn(
"Reading unicode strings without specifying the encoding "
"argument is deprecated. Set the encoding, use None for the "
"system default.",
np.VisibleDeprecationWarning, stacklevel=2)
def encode_unicode_cols(row_tup):
row = list(row_tup)
for i in strcolidx:
row[i] = row[i].encode('latin1')
return tuple(row)
try:
data = [encode_unicode_cols(r) for r in data]
except UnicodeEncodeError:
pass
else:
for i in strcolidx:
column_types[i] = np.bytes_
# Update string types to be the right length
sized_column_types = column_types[:]
for i, col_type in enumerate(column_types):
if np.issubdtype(col_type, np.character):
n_chars = max(len(row[i]) for row in data)
sized_column_types[i] = (col_type, n_chars)
if names is None:
# If the dtype is uniform (before sizing strings)
base = {
c_type
for c, c_type in zip(converters, column_types)
if c._checked}
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(sized_column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(sized_column_types)]
else:
ddtype = list(zip(names, sized_column_types))
mdtype = list(zip(names, [bool] * len(sized_column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if np.issubdtype(ttype, np.character):
ttype = (ttype, max(len(row[i]) for row in data))
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names, converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
{
"content_hash": "6caeda39a3fb2e0effe722f7925a120c",
"timestamp": "",
"source": "github",
"line_count": 2334,
"max_line_length": 95,
"avg_line_length": 36.99571550985433,
"alnum_prop": 0.5627460971881225,
"repo_name": "shoyer/numpy",
"id": "694ad169471df674e101bde6369db0afb4aed80b",
"size": "86348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/lib/npyio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8752922"
},
{
"name": "C++",
"bytes": "186633"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "7609450"
},
{
"name": "Shell",
"bytes": "9102"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
from __future__ import division
from typing import List, Optional
import configparser
import datetime
import json
import logging
import os
import queue
import re
import threading
import docker
from zabbixdocker.lib.zabbix import ZabbixMetric, ZabbixSender
class DockerDiscoveryService(threading.Thread):
""" This class implements the service which discovers docker resources """
def __init__(self, config: configparser.ConfigParser, stop_event: threading.Event, docker_client: docker.APIClient,
zabbix_sender: ZabbixSender):
"""
Initialize an instance
:param config: the configuration parser
:param stop_event: the event to stop execution
:param docker_client: the docker client
:param zabbix_sender: the zabbix sender
"""
super(DockerDiscoveryService, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._workers = []
self._containers_queue = queue.Queue()
self._config = config
self._stop_event = stop_event
self._docker_client = docker_client
self._zabbix_sender = zabbix_sender
def run(self):
"""
Execute the thread
"""
worker = DockerDiscoveryWorker(self._config, self._docker_client, self._zabbix_sender,
self._containers_queue)
worker.setDaemon(True)
self._workers.append(worker)
if self._config.getboolean("discovery", "poll_events"):
worker = DockerDiscoveryEventsPollerWorker(self._config, self._docker_client, self)
worker.setDaemon(True)
self._workers.append(worker)
self._logger.info("service started")
if self._config.getint("discovery", "startup") > 0:
self._stop_event.wait(self._config.getint("discovery", "startup"))
for worker in self._workers:
worker.start()
while True:
self._execute()
if self._stop_event.wait(self._config.getint("discovery", "interval")):
break
self._logger.info("service stopped")
def _execute(self):
"""
Execute the discovery
"""
self._logger.debug("requesting discovery")
self._containers_queue.put("discovery")
def trigger(self):
"""
Request a new discovery execution
"""
self._logger.debug("triggering discovery execution")
self._execute()
class DockerDiscoveryWorker(threading.Thread):
""" This class implements a discovery worker thread """
def __init__(self, config: configparser.ConfigParser, docker_client: docker.APIClient, zabbix_sender: ZabbixSender,
containers_queue: queue.Queue):
"""
Initialize the instance
:param config: the configuration parser
:param docker_client: the docker client
:param zabbix_sender: the zabbix sender
:param containers_queue: the containers queue
"""
super(DockerDiscoveryWorker, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._config = config
self._docker_client = docker_client
self._zabbix_sender = zabbix_sender
self._containers_queue = containers_queue
def run(self):
"""
Execute the thread
"""
while True:
self._logger.debug("waiting execution queue")
item = self._containers_queue.get()
if item is None:
break
self._logger.info("starting discovery")
try:
metrics = []
if self._config.getboolean("main", "containers") is True:
m = self._discover_containers()
if m is not None:
metrics.extend(m)
if self._config.getboolean("main", "networks") is True:
m = self._discover_networks()
if m is not None:
metrics.extend(m)
if self._config.getboolean("main", "swarm") is True:
if self._config.getboolean("main", "swarm_services") is True:
m = self._discover_swarm_services()
if m is not None:
metrics.extend(m)
if self._config.getboolean("main", "swarm_stacks") is True:
m = self._discover_swarm_stacks()
if m is not None:
metrics.extend(m)
if len(metrics) > 0:
self._logger.debug("sending %d metrics" % len(metrics))
self._zabbix_sender.send(metrics)
except (IOError, OSError, LookupError, ValueError):
self._logger.error("failed to send discovery metrics")
def _discover_containers(self) -> Optional[List[ZabbixMetric]]:
"""
Discover containers
:return: the discovery metrics
"""
metrics = []
discovery_containers = []
discovery_containers_stats = []
discovery_containers_stats_cpus = []
discovery_containers_stats_networks = []
discovery_containers_stats_devices = []
discovery_containers_top = []
device_pattern = re.compile(r'^DEVNAME=(.+)$')
containers = self._docker_client.containers(all=True)
for container in containers:
container_id = container["Id"]
container_name = container["Names"][0][1:]
macros = dict()
if self._config.get("discovery", "containers_labels") != "":
skip = True
for label in str(self._config.get("discovery", "containers_labels")).split(","):
items = label.split("=", maxsplit=2)
label_name = items[0]
label_value = items[1] if len(items) >= 2 else ""
label_default = items[2] if len(items) == 3 else ""
if (
"Labels" in container and
container["Labels"] is not None and
label_name in container["Labels"]
):
skip = False
if (
label_value != "" and
container["Labels"][label_name] == label_value
):
macros["{{#{}}}".format(label_name.upper())] = label_value
else:
macros["{{#{}}}".format(label_name.upper())] = container["Labels"][label_name]
elif label_default != "":
skip = False
macros["{{#{}}}".format(label_name.upper())] = label_default
if skip is True:
continue
if (
"Labels" in container and
container["Labels"] is not None and
"com.docker.stack.namespace" in container["Labels"] and
"com.docker.stack.service.name" in container["Labels"]
):
macros["{#STACK}"] = container["Labels"]["com.docker.stack.namespace"]
macros["{#SERVICE}"] = container["Labels"]["com.docker.stack.service.name"]
discovery_containers.append({
**{
"{#NAME}": container_name,
}, **macros
})
if container["Status"].startswith("Up") is False:
continue
if self._config.getboolean("main", "containers_stats"):
container_stats = self._docker_client.stats(container_id, decode=False, stream=False)
discovery_containers_stats.append({
**{
"{#NAME}": container_name,
}, **macros
})
if (
"cpu_stats" in container_stats and
"cpu_usage" in container_stats["cpu_stats"] and
"percpu_usage" in container_stats["cpu_stats"]["cpu_usage"] and
isinstance(container_stats["cpu_stats"]["cpu_usage"]["percpu_usage"], int)
):
for i in range(len(container_stats["cpu_stats"]["cpu_usage"]["percpu_usage"])):
discovery_containers_stats_cpus.append({
**{
"{#NAME}": container_name,
"{#CPU}": "%d" % i,
}, **macros
})
if "networks" in container_stats:
for container_stats_network_ifname in list(container_stats["networks"].keys()):
discovery_containers_stats_networks.append({
**{
"{#NAME}": container_name,
"{#IFNAME}": container_stats_network_ifname,
}, **macros
})
if (
"blkio_stats" in container_stats and
"io_serviced_recursive" in container_stats["blkio_stats"] and
isinstance(container_stats["blkio_stats"]["io_serviced_recursive"], int)
):
for j in range(len(container_stats["blkio_stats"]["io_serviced_recursive"])):
if container_stats["blkio_stats"]["io_serviced_recursive"][j]["op"] != "Total":
continue
sysfs_file = "%s/dev/block/%s:%s/uevent" % (
os.path.join(self._config.get("main", "rootfs"), "sys"),
container_stats["blkio_stats"]["io_serviced_recursive"][j]["major"],
container_stats["blkio_stats"]["io_serviced_recursive"][j]["minor"])
with open(sysfs_file) as f:
for line in f:
match = re.search(device_pattern, line)
if not match:
continue
discovery_containers_stats_devices.append({
**{
"{#NAME}": container_name,
"{#DEVMAJOR}": container_stats["blkio_stats"]["io_serviced_recursive"][j][
"major"],
"{#DEVMINOR}": container_stats["blkio_stats"]["io_serviced_recursive"][j][
"minor"],
"{#DEVNAME}": match.group(1)
}, **macros
})
if self._config.getboolean("main", "containers_top"):
container_top: dict = dict(self._docker_client.top(container))
if (
"Processes" in container_top and
isinstance(container_top["Processes"], int)
):
for j in range(len(container_top["Processes"])):
discovery_containers_top.append({
**{
"{#NAME}": container_name,
"{#PID}": container_top["Processes"][j][1],
"{#CMD}": container_top["Processes"][j][7],
}, **macros
})
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.discovery.containers",
json.dumps({"data": discovery_containers})))
if self._config.getboolean("main", "containers_stats"):
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.discovery.containers.stats",
json.dumps({"data": discovery_containers_stats})))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.discovery.containers.stats.cpus",
json.dumps({"data": discovery_containers_stats_cpus})))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.discovery.containers.stats.networks",
json.dumps({"data": discovery_containers_stats_networks})))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.discovery.containers.stats.devices",
json.dumps({"data": discovery_containers_stats_devices})))
if self._config.getboolean("main", "containers_top"):
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.discovery.containers.top",
json.dumps({"data": discovery_containers_top})))
return metrics
def _discover_networks(self) -> Optional[List[ZabbixMetric]]:
"""
Discover networks
:return: the discovery metrics
"""
metrics = []
discovery_networks = []
networks = self._docker_client.networks()
for network in networks:
network_name = network["Name"]
macros = dict()
if self._config.get("discovery", "networks_labels") != "":
skip = True
for label in str(self._config.get("discovery", "networks_labels")).split(","):
items = label.split("=", maxsplit=2)
label_name = items[0]
label_value = items[1] if len(items) >= 2 else ""
label_default = items[2] if len(items) == 3 else ""
if (
"Labels" in network and
network["Labels"] is not None and
label_name in network["Labels"]
):
skip = False
if (
label_value != "" and
network["Labels"][label_name] == label_value
):
macros["{{#{}}}".format(label_name.upper())] = label_value
else:
macros["{{#{}}}".format(label_name.upper())] = network["Labels"][label_name]
elif label_default != "":
skip = False
macros["{{#{}}}".format(label_name.upper())] = label_default
if skip is True:
continue
discovery_networks.append({
**{
"{#NAME}": network_name,
}, **macros
})
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.discovery.networks",
json.dumps({"data": discovery_networks})))
return metrics
def _discover_swarm_services(self) -> Optional[List[ZabbixMetric]]:
"""
Discover swarm services
:return: the discovery metrics
"""
if self._check_leader() is False:
self._logger.debug("node is not the swarm leader")
return None
metrics = []
discovery_services = []
services = self._docker_client.services()
for service in services:
service_name = service["Spec"]["Name"]
macros = dict()
if self._config.get("discovery", "swarm_services_labels") != "":
skip = True
for label in str(self._config.get("discovery", "swarm_services_labels")).split(","):
items = label.split("=", maxsplit=2)
label_name = items[0]
label_value = items[1] if len(items) >= 2 else ""
label_default = items[2] if len(items) == 3 else ""
if label_name in service["Spec"]["Labels"]:
skip = False
if (
label_value != "" and
service["Spec"]["Labels"][label_name] == label_value
):
macros["{{#{}}}".format(label_name.upper())] = label_value
else:
macros["{{#{}}}".format(label_name.upper())] = service["Spec"]["Labels"][label_name]
elif label_default != "":
skip = False
macros["{{#{}}}".format(label_name.upper())] = label_default
if skip is True:
continue
if "com.docker.stack.namespace" in service["Spec"]["Labels"]:
macros["{#STACK}"] = service["Spec"]["Labels"]["com.docker.stack.namespace"]
discovery_services.append({
**{
"{#NAME}": service_name,
}, **macros
})
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname_cluster"),
"docker.discovery.swarm.services",
json.dumps({"data": discovery_services})))
return metrics
def _discover_swarm_stacks(self) -> Optional[List[ZabbixMetric]]:
"""
Discover swarm stacks
:return: the discovery metrics
"""
if self._check_leader() is False:
self._logger.debug("node is not the swarm leader")
return None
metrics = []
discovery_stacks = []
services = self._docker_client.services(filters={
"label": "com.docker.stack.namespace"
})
stacks = set()
stacks_macros = dict()
for service in services:
stack_name = service["Spec"]["Labels"]["com.docker.stack.namespace"]
macros = dict()
if self._config.get("discovery", "swarm_stacks_labels") != "":
skip = True
for label in str(self._config.get("discovery", "swarm_stacks_labels")).split(","):
items = label.split("=", maxsplit=2)
label_name = items[0]
label_value = items[1] if len(items) >= 2 else ""
label_default = items[2] if len(items) == 3 else ""
if label_name in service["Spec"]["Labels"]:
skip = False
if (
label_value != "" and
service["Spec"]["Labels"][label_name] == label_value
):
macros["{{#{}}}".format(label_name.upper())] = label_value
else:
macros["{{#{}}}".format(label_name.upper())] = service["Spec"]["Labels"][label_name]
elif label_default != "":
skip = False
macros["{{#{}}}".format(label_name.upper())] = label_default
if skip is True:
continue
stacks.add(stack_name)
stacks_macros[stack_name] = macros
for stack_name in stacks:
discovery_stacks.append({
**{
"{#NAME}": stack_name,
}, **stacks_macros[stack_name]
})
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname_cluster"),
"docker.discovery.swarm.stacks",
json.dumps({"data": discovery_stacks})))
return metrics
def _check_leader(self) -> bool:
"""
Check if the node is the current swarm leader
:return: True if host is the leader; False otherwise
"""
info = self._docker_client.info()
if (
"Swarm" not in info or
info["Swarm"] == "inactive" or
"NodeID" not in info["Swarm"] or
info["Swarm"]["NodeID"] == "" or
"RemoteManagers" not in info["Swarm"] or
info["Swarm"]["RemoteManagers"] is None
):
return False
node_id = info["Swarm"]["NodeID"]
manager = False
for remote_manager in info["Swarm"]["RemoteManagers"]:
if remote_manager["NodeID"] == node_id:
manager = True
if manager is False:
return False
inspect = self._docker_client.inspect_node(node_id)
leader = False
if (
"Leader" in inspect["ManagerStatus"] and
inspect["ManagerStatus"]["Leader"] is True and
inspect["ManagerStatus"]["Reachability"] == "reachable"
):
leader = True
if leader is False:
return False
class DockerDiscoveryEventsPollerWorker(threading.Thread):
""" This class implements a discovery by events worker thread """
def __init__(self, config: configparser.ConfigParser, docker_client: docker.APIClient,
discovery_service: DockerDiscoveryService):
"""
Initialize the instance
:param config: the config parser
:param docker_client: the docker client
:param discovery_service: the discovery service
"""
super(DockerDiscoveryEventsPollerWorker, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._config = config
self._docker_client = docker_client
self._discovery_service = discovery_service
def run(self):
"""
Execute the thread
"""
until = None
while True:
since = datetime.datetime.utcnow() if until is None else until
until = datetime.datetime.utcnow() + datetime.timedelta(seconds=self._config.getint("discovery",
"poll_events_interval"))
containers_start = 0
self._logger.info("querying events")
for event in self._docker_client.events(since, until,
filters={
"type": "container",
"event": "start"
},
decode=True):
self._logger.debug("new docker event: %s" % event)
if event["status"] == "start":
containers_start += 1
if containers_start > 0:
self._discovery_service.trigger()
|
{
"content_hash": "9303f6a7af46146f7a7ec35c1a7cbb58",
"timestamp": "",
"source": "github",
"line_count": 622,
"max_line_length": 120,
"avg_line_length": 37.247588424437296,
"alnum_prop": 0.47362741712707185,
"repo_name": "bhuisgen/zabbix-docker",
"id": "4e5d9a46aa64f7fdc443734f0840be77043f0837",
"size": "23168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zabbixdocker/services/discovery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "42"
},
{
"name": "Makefile",
"bytes": "87"
},
{
"name": "Python",
"bytes": "166513"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
}
|
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7144
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
{
"content_hash": "6126b8f402018527061ab852a4a137b6",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 84,
"avg_line_length": 25.412244897959184,
"alnum_prop": 0.6451975586251205,
"repo_name": "solarcoin/solarcoin",
"id": "19c4f7ec7373389bc8c4c5bf1b24fc1eb2ef9903",
"size": "6434",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/pyminer/pyminer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "92422"
},
{
"name": "C++",
"bytes": "2568885"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Erlang",
"bytes": "6752"
},
{
"name": "JavaScript",
"bytes": "12"
},
{
"name": "Nu",
"bytes": "264"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "PHP",
"bytes": "2969"
},
{
"name": "Perl",
"bytes": "30132"
},
{
"name": "Python",
"bytes": "110674"
},
{
"name": "Shell",
"bytes": "89888"
},
{
"name": "TypeScript",
"bytes": "5240051"
}
],
"symlink_target": ""
}
|
from sys import argv
script, first, second, third, fourth = argv
print "This script is called:", script
print "Name a fruit:", first
#print "Your name:", second
print "Your favorite ice-cream flavor:", third
print "Your pet's name:", fourth
age = raw_input("How old are you? ")
height = raw_input("How tall are you? ")
print "You are %r years old and your height is %r." % (age, height), second
|
{
"content_hash": "e8e1533452cd2cb54ce77cbde165e0eb",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 75,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.6959798994974874,
"repo_name": "CodeCatz/litterbox",
"id": "3e13222587832fde1794f4f62e67e8a931c5bfcb",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pija/LearnPythontheHardWay/File6.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "927158"
},
{
"name": "JavaScript",
"bytes": "796463"
},
{
"name": "Python",
"bytes": "192149"
},
{
"name": "Ruby",
"bytes": "54"
}
],
"symlink_target": ""
}
|
"""
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbor interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(x, y)
Nearest-neighbor interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = np.asarray(y)
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-D data.
Parameters
----------
points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
Data point coordinates.
values : ndarray of float or complex, shape (n,)
Data values.
xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tessellate the input point set to N-D
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Returns
-------
ndarray
Array of interpolated values.
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
|
{
"content_hash": "530bcb5146c2ff2593d04c0ede46f626",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 112,
"avg_line_length": 33.03930131004367,
"alnum_prop": 0.5732223103357124,
"repo_name": "arokem/scipy",
"id": "078aad5daaedcfe25e3f49656e206727c5ab8551",
"size": "7566",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/interpolate/ndgriddata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4399737"
},
{
"name": "C++",
"bytes": "649740"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368728"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12815696"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import re
import unicodedata
import json
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import validate_email, ValidationError
from django.core import urlresolvers
from django.db.models import FieldDoesNotExist
from django.db.models.fields import (DateTimeField, DateField,
EmailField, TimeField)
from django.utils import six, dateparse
from django.utils.datastructures import SortedDict
from django.core.serializers.json import DjangoJSONEncoder
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
import importlib
except:
from django.utils import importlib
def _generate_unique_username_base(txts, regex=None):
username = None
regex = regex or '[^\w\s@+.-]'
for txt in txts:
if not txt:
continue
username = unicodedata.normalize('NFKD', force_text(txt))
username = username.encode('ascii', 'ignore').decode('ascii')
username = force_text(re.sub(regex, '', username).lower())
# Django allows for '@' in usernames in order to accomodate for
# project wanting to use e-mail for username. In allauth we don't
# use this, we already have a proper place for putting e-mail
# addresses (EmailAddress), so let's not use the full e-mail
# address and only take the part leading up to the '@'.
username = username.split('@')[0]
username = username.strip()
username = re.sub('\s+', '_', username)
if username:
break
return username or 'user'
def generate_unique_username(txts, regex=None):
from .account.app_settings import USER_MODEL_USERNAME_FIELD
username = _generate_unique_username_base(txts, regex)
User = get_user_model()
try:
max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length
except FieldDoesNotExist:
raise ImproperlyConfigured(
"USER_MODEL_USERNAME_FIELD does not exist in user-model"
)
i = 0
while True:
try:
if i:
pfx = str(i + 1)
else:
pfx = ''
ret = username[0:max_length - len(pfx)] + pfx
query = {USER_MODEL_USERNAME_FIELD + '__iexact': ret}
User.objects.get(**query)
i += 1
except User.DoesNotExist:
return ret
def valid_email_or_none(email):
ret = None
try:
if email:
validate_email(email)
if len(email) <= EmailField().max_length:
ret = email
except ValidationError:
pass
return ret
def email_address_exists(email, exclude_user=None):
from .account import app_settings as account_settings
from .account.models import EmailAddress
emailaddresses = EmailAddress.objects
if exclude_user:
emailaddresses = emailaddresses.exclude(user=exclude_user)
ret = emailaddresses.filter(email__iexact=email).exists()
if not ret:
email_field = account_settings.USER_MODEL_EMAIL_FIELD
if email_field:
users = get_user_model().objects
if exclude_user:
users = users.exclude(pk=exclude_user.pk)
ret = users.filter(**{email_field+'__iexact': email}).exists()
return ret
def import_attribute(path):
assert isinstance(path, six.string_types)
pkg, attr = path.rsplit('.', 1)
ret = getattr(importlib.import_module(pkg), attr)
return ret
def import_callable(path_or_callable):
if not hasattr(path_or_callable, '__call__'):
ret = import_attribute(path_or_callable)
else:
ret = path_or_callable
return ret
try:
from django.contrib.auth import get_user_model
except ImportError:
# To keep compatibility with Django 1.4
def get_user_model():
from . import app_settings
from django.db.models import get_model
try:
app_label, model_name = app_settings.USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the"
" form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model"
" '%s' that has not been installed"
% app_settings.USER_MODEL)
return user_model
def resolve_url(to):
"""
Subset of django.shortcuts.resolve_url (that one is 1.5+)
"""
try:
return urlresolvers.reverse(to)
except urlresolvers.NoReverseMatch:
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
def serialize_instance(instance):
"""
Since Django 1.6 items added to the session are no longer pickled,
but JSON encoded by default. We are storing partially complete models
in the session (user, account, token, ...). We cannot use standard
Django serialization, as these are models are not "complete" yet.
Serialization will start complaining about missing relations et al.
"""
ret = dict([(k, v)
for k, v in instance.__dict__.items()
if not k.startswith('_')])
return json.loads(json.dumps(ret, cls=DjangoJSONEncoder))
def deserialize_instance(model, data):
ret = model()
for k, v in data.items():
if v is not None:
try:
f = model._meta.get_field(k)
if isinstance(f, DateTimeField):
v = dateparse.parse_datetime(v)
elif isinstance(f, TimeField):
v = dateparse.parse_time(v)
elif isinstance(f, DateField):
v = dateparse.parse_date(v)
except FieldDoesNotExist:
pass
setattr(ret, k, v)
return ret
def set_form_field_order(form, fields_order):
if isinstance(form.fields, SortedDict):
form.fields.keyOrder = fields_order
else:
# Python 2.7+
from collections import OrderedDict
assert isinstance(form.fields, OrderedDict)
form.fields = OrderedDict((f, form.fields[f])
for f in fields_order)
def build_absolute_uri(request, location, protocol=None):
uri = request.build_absolute_uri(location)
if protocol:
uri = protocol + ':' + uri.partition(':')[2]
return uri
def get_form_class(forms, form_id, default_form):
form_class = forms.get(form_id, default_form)
if isinstance(form_class, six.string_types):
form_class = import_attribute(form_class)
return form_class
def get_request_param(request, param, default=None):
return request.POST.get(param) or request.GET.get(param, default)
|
{
"content_hash": "88e32af5060162a389f29742071b1b4a",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 33.55980861244019,
"alnum_prop": 0.6174793270601654,
"repo_name": "nangia/django-allauth",
"id": "821e18907e36ae86f97f94e8ef967d68f1e508f3",
"size": "7014",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allauth/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "44734"
},
{
"name": "JavaScript",
"bytes": "4028"
},
{
"name": "Python",
"bytes": "582488"
}
],
"symlink_target": ""
}
|
"""Functions for transforming encoded taxonomy files into a bycat taxonomy df"""
import pandas as pd
import numpy as np
__author__ = "Peter J Usherwood"
__python_version__ = "3.6"
def domains_to_binary(df_encoded, domain_column_key='Domain', num_domains=10):
"""
Transform the domains column into binary to be used as a cross-sectional category
:param df_encoded: Standard encoded matrix file
:param domain_column_key: String, the name of the current domain column
:param num_domains: Int, the number of domains to use (will take top x)
:return: df_encoded with the additional domain columns prefixed c_ for cross-sectional
"""
top_domains = df_encoded[domain_column_key].value_counts()[:num_domains].index.tolist()
top_domains = ['c_'+domain for domain in top_domains]
domains_df = pd.get_dummies(df_encoded[domain_column_key], prefix='c')
domains_df = domains_df[top_domains]
df_encoded = pd.concat([df_encoded,domains_df], axis=1)
return df_encoded
def date_to_binary_tod(pd_datetime, lower_hour=0, lower_minute=0, upper_hour=23, upper_minute=0):
"""
Turn a pandas datetime value into a binary variable, good if "applied" to pandas column
:param pd_datetime: pandas datetime variable
:param lower_hour: Int, lower hour, 0-23
:param lower_minute: Int, lower minute, 0-59
:param upper_hour: Int, upper hour, 0-23
:param upper_minute: Int, upper minute, 0-59
:return: valid 1 or 0 to be assigned to a binary column
"""
current_minutes = (pd_datetime.hour*60) + pd_datetime.minute
lower_minutes = (lower_hour*60) + lower_minute
upper_minutes = (upper_hour*60) + upper_minute
if (current_minutes >= lower_minutes) and (current_minutes <= upper_minutes):
valid = 1
else:
valid = 0
return valid
def encoded_to_bycat_counts(df_encoded,
tax_col_indicator='e_',
cross_col_indicator='c_',
prediction=True,
include_sentiment=True,
sentiment_column_key='Sentiment',
categorical_sentiment=True,
date_column_key='Date (Local)',
manual_range=pd.date_range('2015-10-31', '2017-11-01')):
"""
Transform a standard encoded file into a bycat (by category) file
:param df_encoded: Standard encoded matrix file, can have additional columns that will be discarded
:param tax_col_indicator: String, the pattern that starts all categories in the taxonomy
:param cross_col_indicator: String, the pattern that starts all cross-sectional categories
:param prediction: Bool, if True adds dVolumedt and dSentimentdt values
:param include_sentiment: Bool, include the sentiment column as a cross sectional column
:param sentiment_column_key: String, the name of the sentiment column
:param categorical_sentiment: Bool, Create dummy variables for the sentiment (one hot encoding)
:param date_column_key: String, name of the date column to use
:param manual_range: pandas date range, manually specify the domain for prediction, this is vital if you are
splitting a big data set in half as keeping the range constant allows the derivatives to be summed. E.g.
bycat1 + bycat2 = bycat_total
:return: bycat_counts df with the taxonomy as rows and counts of the cross sectional variables as columns
"""
if prediction:
include_sentiment = True
df_encoded.fillna(0, inplace=True)
tax_cols = list(df_encoded.columns[pd.Series(df_encoded.columns).str.startswith(tax_col_indicator)])
cross_cols = list(df_encoded.columns[pd.Series(df_encoded.columns).str.startswith(cross_col_indicator)])
if include_sentiment:
cross_cols += [sentiment_column_key]
if categorical_sentiment:
sents = pd.get_dummies(df_encoded[sentiment_column_key])
for col in sents.columns:
sents.rename(columns={col:'Sentiment ' + str(col)}, inplace=True)
cross_cols += ['Sentiment ' + str(col)]
df_encoded = pd.concat([df_encoded, sents], axis=1)
df_cross = df_encoded[tax_cols+cross_cols]
counts = df_cross[tax_cols].sum().values
cooc_full = df_cross.T.dot(df_cross)
bycat_counts = cooc_full.ix[tax_cols,cross_cols]
bycat_counts.insert(0, 'Volume', value=counts)
if prediction:
df_encoded.index = pd.to_datetime(df_encoded[date_column_key])
df_encoded['Volume'] = 1
dvolumedts = []
dsentimentdts = []
for tax in tax_cols:
sub = df_encoded[df_encoded[tax] == 1].ix[:, [date_column_key, 'Volume', sentiment_column_key]]
if manual_range is not None:
df2 = pd.DataFrame(0, index=manual_range, columns=['Volume'])
df2[date_column_key] = df2.index
df2[sentiment_column_key] = 0
df_encoded['Volume'] = 1
sub = sub.combine_first(df2)
#sent = sub.resample('W').mean()[sentiment_column_key]\
# .fillna(sub.resample('W').mean()[sentiment_column_key].mean())
#volume = sub.resample('W').sum()['Volume'].fillna(sub.resample('W').sum()['Volume'].mean())
sent = sub.resample('W').sum()[sentiment_column_key].fillna(0)
volume = sub.resample('W').sum()['Volume'].fillna(0)
x = np.arange(len(volume))
if len(x) <= 1:
mv, ms = 0, 0
else:
mv, cv = np.polyfit(x=x, y=volume, deg=1)
ms, cs = np.polyfit(x=x, y=sent, deg=1)
dvolumedts.append(mv)
dsentimentdts.append(ms)
bycat_counts.insert(1, 'dVolume dt', value=dvolumedts)
bycat_counts.insert(1, 'dSentiment dt', value=dsentimentdts)
return bycat_counts
def bycat_counts_to_bycat_scores(bycat_counts, cross_lists):
"""
Transform a bycat_counts df into a bycat_scores df which gives an index based on sub cross-category groups and
volumes
:param bycat_counts: bycat_counts df
:param cross_lists: A list of lists, where each list is a group of common column names (e.g. brands, moments)
:return: bycat_scores
"""
bycat_scores = bycat_counts.ix[:,list(set(bycat_counts.columns.values.tolist())-
set([l for sub in cross_lists for l in sub]))]
for cross in cross_lists:
M = bycat_counts.ix[:,cross]
sumjM = M.sum(axis=0).values
sumiM = M.sum(axis=1).values
sumijM = M.sum().sum()
cross_scores = M - np.outer(sumjM,sumiM).T/sumijM
bycat_scores = pd.concat([bycat_scores,cross_scores], axis=1)
return bycat_scores
|
{
"content_hash": "6b9a89663a42aa3d9b1c12fd28b09ebe",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 114,
"avg_line_length": 39.674418604651166,
"alnum_prop": 0.6296893317702228,
"repo_name": "Usherwood/usherwood_ds",
"id": "fea843d3d3d1c5d2d6a4e0a1edcc2f6b022361df",
"size": "6847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usherwood_ds/nlp/taxonomy/bycat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76416"
},
{
"name": "Python",
"bytes": "245786"
}
],
"symlink_target": ""
}
|
import os
from django.conf import urls
def register_urlpatterns():
"""
Регистрация конфигурации урлов для приложения m3.contrib.users
"""
return urls.defaults.patterns('',
(r'^op_static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(
os.path.dirname(__file__), 'static')}),
)
|
{
"content_hash": "75ee1c7c141f58c573ec37cc2acfecb1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 28,
"alnum_prop": 0.5934065934065934,
"repo_name": "barsgroup/objectpack",
"id": "398b28d9aee6d0f729e3c5949158e6457b769bcf",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/objectpack/app_meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1132"
},
{
"name": "HTML",
"bytes": "13501"
},
{
"name": "JavaScript",
"bytes": "35443"
},
{
"name": "Python",
"bytes": "220784"
}
],
"symlink_target": ""
}
|
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.http import HttpResponseRedirect
class LoginRequiredMixin(object):
"""
需要有self.model
permission_required: add, delete, change
add: can add, upload , update
delete: can delete
permission_required = None, 只要登录就可以操作
permission_required = add 需要有add权限, ......
**权限不足会跳到login页面**
"""
permission_required = None
@method_decorator(login_required(login_url=reverse_lazy('easyui:login')))
def dispatch(self, request, *args, **kwargs):
"""
增加了权限控制,当self存在model和permission_required时,才会检查权限
"""
if getattr(self, 'model', None) and self.permission_required:
app_label = self.model._meta.app_label
model_name = self.model.__name__.lower()
permission_required = self.permission_required.lower()
permission = '%(app_label)s.%(permission_required)s_%(model_name)s' % {
'app_label':app_label,
'permission_required':permission_required,
'model_name': model_name
}
if not self.request.user.has_perm(permission):
return HttpResponseRedirect(reverse_lazy('easyui:login'))
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
|
{
"content_hash": "1fe0a0ae1f9cbaeac5f719a6762f0363",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 83,
"avg_line_length": 37.94871794871795,
"alnum_prop": 0.6567567567567567,
"repo_name": "xu2243051/easyui-menu",
"id": "ab4245e73e981baa0b965f8d1e44f8aa9e95656c",
"size": "1604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easyui/mixins/permission_mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "105539"
},
{
"name": "JavaScript",
"bytes": "517521"
},
{
"name": "Python",
"bytes": "58367"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import logging
import pprint
import math
import numpy
import os
import operator
import theano
from six.moves import input
from picklable_itertools.extras import equizip
from theano import tensor
from blocks.bricks import Tanh, Initializable
from blocks.bricks.base import application
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import SimpleRecurrent, Bidirectional
from blocks.bricks.attention import SequenceContentAttention
from blocks.bricks.parallel import Fork
from blocks.bricks.sequence_generators import (
SequenceGenerator, Readout, SoftmaxEmitter, LookupFeedback)
from blocks.config import config
from blocks.graph import ComputationGraph
from fuel.transformers import Mapping, Batch, Padding, Filter
from fuel.datasets import OneBillionWord, TextFile
from fuel.schemes import ConstantScheme
from blocks.serialization import load_parameter_values
from blocks.algorithms import (GradientDescent, Scale,
StepClipping, CompositeRule)
from blocks.initialization import Orthogonal, IsotropicGaussian, Constant
from blocks.model import Model
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Printing, Timing
from blocks.extensions.saveload import Checkpoint
from blocks.extensions.monitoring import TrainingDataMonitoring
from blocks.main_loop import MainLoop
from blocks.filter import VariableFilter
from blocks.utils import named_copy, dict_union
from blocks.search import BeamSearch
config.recursion_limit = 100000
floatX = theano.config.floatX
logger = logging.getLogger(__name__)
# Dictionaries
all_chars = ([chr(ord('a') + i) for i in range(26)] +
[chr(ord('0') + i) for i in range(10)] +
[',', '.', '!', '?', '<UNK>'] +
[' ', '<S>', '</S>'])
code2char = dict(enumerate(all_chars))
char2code = {v: k for k, v in code2char.items()}
def reverse_words(sample):
sentence = sample[0]
result = []
word_start = -1
for i, code in enumerate(sentence):
if code >= char2code[' ']:
if word_start >= 0:
result.extend(sentence[i - 1:word_start - 1:-1])
word_start = -1
result.append(code)
else:
if word_start == -1:
word_start = i
return (result,)
def _lower(s):
return s.lower()
def _transpose(data):
return tuple(array.T for array in data)
def _filter_long(data):
return len(data[0]) <= 100
def _is_nan(log):
return math.isnan(log.current_row['total_gradient_norm'])
class WordReverser(Initializable):
"""The top brick.
It is often convenient to gather all bricks of the model under the
roof of a single top brick.
"""
def __init__(self, dimension, alphabet_size, **kwargs):
super(WordReverser, self).__init__(**kwargs)
encoder = Bidirectional(
SimpleRecurrent(dim=dimension, activation=Tanh()))
fork = Fork([name for name in encoder.prototype.apply.sequences
if name != 'mask'])
fork.input_dim = dimension
fork.output_dims = [dimension for name in fork.input_names]
lookup = LookupTable(alphabet_size, dimension)
transition = SimpleRecurrent(
activation=Tanh(),
dim=dimension, name="transition")
attention = SequenceContentAttention(
state_names=transition.apply.states,
attended_dim=2 * dimension, match_dim=dimension, name="attention")
readout = Readout(
readout_dim=alphabet_size,
source_names=[transition.apply.states[0],
attention.take_glimpses.outputs[0]],
emitter=SoftmaxEmitter(name="emitter"),
feedback_brick=LookupFeedback(alphabet_size, dimension),
name="readout")
generator = SequenceGenerator(
readout=readout, transition=transition, attention=attention,
name="generator")
self.lookup = lookup
self.fork = fork
self.encoder = encoder
self.generator = generator
self.children = [lookup, fork, encoder, generator]
@application
def cost(self, chars, chars_mask, targets, targets_mask):
return self.generator.cost_matrix(
targets, targets_mask,
attended=self.encoder.apply(
**dict_union(
self.fork.apply(self.lookup.apply(chars), as_dict=True),
mask=chars_mask)),
attended_mask=chars_mask)
@application
def generate(self, chars):
return self.generator.generate(
n_steps=3 * chars.shape[0], batch_size=chars.shape[1],
attended=self.encoder.apply(
**dict_union(
self.fork.apply(self.lookup.apply(chars), as_dict=True))),
attended_mask=tensor.ones(chars.shape))
def main(mode, save_path, num_batches, data_path=None):
reverser = WordReverser(100, len(char2code), name="reverser")
if mode == "train":
# Data processing pipeline
dataset_options = dict(dictionary=char2code, level="character",
preprocess=_lower)
if data_path:
dataset = TextFile(data_path, **dataset_options)
else:
dataset = OneBillionWord("training", [99], **dataset_options)
data_stream = dataset.get_example_stream()
data_stream = Filter(data_stream, _filter_long)
data_stream = Mapping(data_stream, reverse_words,
add_sources=("targets",))
data_stream = Batch(data_stream, iteration_scheme=ConstantScheme(10))
data_stream = Padding(data_stream)
data_stream = Mapping(data_stream, _transpose)
# Initialization settings
reverser.weights_init = IsotropicGaussian(0.1)
reverser.biases_init = Constant(0.0)
reverser.push_initialization_config()
reverser.encoder.weights_init = Orthogonal()
reverser.generator.transition.weights_init = Orthogonal()
# Build the cost computation graph
chars = tensor.lmatrix("features")
chars_mask = tensor.matrix("features_mask")
targets = tensor.lmatrix("targets")
targets_mask = tensor.matrix("targets_mask")
batch_cost = reverser.cost(
chars, chars_mask, targets, targets_mask).sum()
batch_size = named_copy(chars.shape[1], "batch_size")
cost = aggregation.mean(batch_cost, batch_size)
cost.name = "sequence_log_likelihood"
logger.info("Cost graph is built")
# Give an idea of what's going on
model = Model(cost)
parameters = model.get_parameter_dict()
logger.info("Parameters:\n" +
pprint.pformat(
[(key, value.get_value().shape) for key, value
in parameters.items()],
width=120))
# Initialize parameters
for brick in model.get_top_bricks():
brick.initialize()
# Define the training algorithm.
cg = ComputationGraph(cost)
algorithm = GradientDescent(
cost=cost, parameters=cg.parameters,
step_rule=CompositeRule([StepClipping(10.0), Scale(0.01)]))
# Fetch variables useful for debugging
generator = reverser.generator
(energies,) = VariableFilter(
applications=[generator.readout.readout],
name_regex="output")(cg.variables)
(activations,) = VariableFilter(
applications=[generator.transition.apply],
name=generator.transition.apply.states[0])(cg.variables)
max_length = named_copy(chars.shape[0], "max_length")
cost_per_character = named_copy(
aggregation.mean(batch_cost, batch_size * max_length),
"character_log_likelihood")
min_energy = named_copy(energies.min(), "min_energy")
max_energy = named_copy(energies.max(), "max_energy")
mean_activation = named_copy(abs(activations).mean(),
"mean_activation")
observables = [
cost, min_energy, max_energy, mean_activation,
batch_size, max_length, cost_per_character,
algorithm.total_step_norm, algorithm.total_gradient_norm]
for name, parameter in parameters.items():
observables.append(named_copy(
parameter.norm(2), name + "_norm"))
observables.append(named_copy(
algorithm.gradients[parameter].norm(2), name + "_grad_norm"))
# Construct the main loop and start training!
average_monitoring = TrainingDataMonitoring(
observables, prefix="average", every_n_batches=10)
main_loop = MainLoop(
model=model,
data_stream=data_stream,
algorithm=algorithm,
extensions=[
Timing(),
TrainingDataMonitoring(observables, after_batch=True),
average_monitoring,
FinishAfter(after_n_batches=num_batches)
# This shows a way to handle NaN emerging during
# training: simply finish it.
.add_condition(["after_batch"], _is_nan),
# Saving the model and the log separately is convenient,
# because loading the whole pickle takes quite some time.
Checkpoint(save_path, every_n_batches=500,
save_separately=["model", "log"]),
Printing(every_n_batches=1)])
main_loop.run()
elif mode == "sample" or mode == "beam_search":
chars = tensor.lmatrix("input")
generated = reverser.generate(chars)
model = Model(generated)
logger.info("Loading the model..")
model.set_parameter_values(load_parameter_values(save_path))
def generate(input_):
"""Generate output sequences for an input sequence.
Incapsulates most of the difference between sampling and beam
search.
Returns
-------
outputs : list of lists
Trimmed output sequences.
costs : list
The negative log-likelihood of generating the respective
sequences.
"""
if mode == "beam_search":
samples, = VariableFilter(
bricks=[reverser.generator], name="outputs")(
ComputationGraph(generated[1]))
# NOTE: this will recompile beam search functions
# every time user presses Enter. Do not create
# a new `BeamSearch` object every time if
# speed is important for you.
beam_search = BeamSearch(samples)
outputs, costs = beam_search.search(
{chars: input_}, char2code['</S>'],
3 * input_.shape[0])
else:
_1, outputs, _2, _3, costs = (
model.get_theano_function()(input_))
outputs = list(outputs.T)
costs = list(costs.T)
for i in range(len(outputs)):
outputs[i] = list(outputs[i])
try:
true_length = outputs[i].index(char2code['</S>']) + 1
except ValueError:
true_length = len(outputs[i])
outputs[i] = outputs[i][:true_length]
costs[i] = costs[i][:true_length].sum()
return outputs, costs
while True:
line = input("Enter a sentence\n")
message = ("Enter the number of samples\n" if mode == "sample"
else "Enter the beam size\n")
batch_size = int(input(message))
encoded_input = [char2code.get(char, char2code["<UNK>"])
for char in line.lower().strip()]
encoded_input = ([char2code['<S>']] + encoded_input +
[char2code['</S>']])
print("Encoder input:", encoded_input)
target = reverse_words((encoded_input,))[0]
print("Target: ", target)
samples, costs = generate(
numpy.repeat(numpy.array(encoded_input)[:, None],
batch_size, axis=1))
messages = []
for sample, cost in equizip(samples, costs):
message = "({})".format(cost)
message += "".join(code2char[code] for code in sample)
if sample == target:
message += " CORRECT!"
messages.append((cost, message))
messages.sort(key=operator.itemgetter(0), reverse=True)
for _, message in messages:
print(message)
|
{
"content_hash": "4edeb82f3c242499e7236854e3774b0f",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 78,
"avg_line_length": 39.84,
"alnum_prop": 0.5876583256101329,
"repo_name": "dmitriy-serdyuk/blocks-examples",
"id": "b52278df4cb1d68aac2f822c5b9178dd85a6dc90",
"size": "12948",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reverse_words/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40956"
}
],
"symlink_target": ""
}
|
from itertools import chain
import logging
import os
import sys
import cdec.configobj
from cdec.sa._sa import gzip_or_text
from cdec.sa.features import EgivenFCoherent, SampleCountF, CountEF,\
MaxLexEgivenF, MaxLexFgivenE, IsSingletonF, IsSingletonFE,\
IsSupportedOnline
import cdec.sa
# maximum span of a grammar rule in TEST DATA
MAX_INITIAL_SIZE = 15
class GrammarExtractor:
def __init__(self, config, online=False, features=None):
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('cdec.sa')
if isinstance(config, basestring):
if not os.path.exists(config):
raise IOError('cannot read configuration from {0}'.format(config))
config = cdec.configobj.ConfigObj(config, unrepr=True)
logger.info('Loading alignment...')
alignment = cdec.sa.Alignment(from_binary=config['a_file'])
# lexical weighting tables
if not online:
logger.info('Loading bilexical dictionary...')
tt = cdec.sa.BiLex(from_binary=config['lex_file'])
else:
logger.info('Loading online bilexical dictionary...')
tt = cdec.sa.online.Bilex(config['bilex_file'])
self.factory = cdec.sa.HieroCachingRuleFactory(
# compiled alignment object (REQUIRED)
alignment,
# bilexical dictionary if online
bilex=tt if online else None,
# name of generic nonterminal used by Hiero
category="[X]",
# maximum number of contiguous chunks of terminal symbols in RHS of a rule
max_chunks=config['max_nt']+1,
# maximum span of a grammar rule in TEST DATA
max_initial_size=MAX_INITIAL_SIZE,
# maximum number of symbols (both T and NT) allowed in a rule
max_length=config['max_len'],
# maximum number of nonterminals allowed in a rule (set >2 at your own risk)
max_nonterminals=config['max_nt'],
# maximum number of contiguous chunks of terminal symbols
# in target-side RHS of a rule.
max_target_chunks=config['max_nt']+1,
# maximum number of target side symbols (both T and NT) allowed in a rule.
max_target_length=MAX_INITIAL_SIZE,
# minimum span of a nonterminal in the RHS of a rule in TEST DATA
min_gap_size=1,
# filename of file containing precomputed collocations
precompute_file=config['precompute_file'],
# maximum frequency rank of patterns used to compute triples (< 20)
precompute_secondary_rank=config['rank2'],
# maximum frequency rank of patterns used to compute collocations (< 300)
precompute_rank=config['rank1'],
# require extracted rules to have at least one aligned word
require_aligned_terminal=True,
# require each contiguous chunk of extracted rules
# to have at least one aligned word
require_aligned_chunks=False,
# maximum span of a grammar rule extracted from TRAINING DATA
train_max_initial_size=config['max_size'],
# minimum span of an RHS nonterminal in a rule extracted from TRAINING DATA
train_min_gap_size=config['min_gap'],
# False if phrases should be loose (better but slower), True otherwise
tight_phrases=config.get('tight_phrases', True),
)
# TODO: clean this up
# Load data and add features for online grammar extraction
extended_features = []
if online:
extended_features.append(IsSupportedOnline)
# TODO: use @cdec.sa.features decorator for standard features too
# + add a mask to disable features
for f in cdec.sa._SA_FEATURES:
extended_features.append(f)
scorer = cdec.sa.Scorer(EgivenFCoherent, SampleCountF, CountEF,
MaxLexFgivenE(tt), MaxLexEgivenF(tt), IsSingletonF, IsSingletonFE,
*extended_features)
fsarray = cdec.sa.SuffixArray(from_binary=config['f_sa_file'])
edarray = cdec.sa.DataArray(from_binary=config['e_file'])
# lower=faster, higher=better; improvements level off above 200-300 range,
# -1 = don't sample, use all data (VERY SLOW!)
sampler = cdec.sa.Sampler(300, fsarray)
self.factory.configure(fsarray, edarray, sampler, scorer)
# Initialize feature definitions with configuration
for fn in cdec.sa._SA_CONFIGURE:
fn(config)
def grammar(self, sentence, ctx_name=None):
if isinstance(sentence, unicode):
sentence = sentence.encode('utf8')
words = tuple(chain(('<s>',), sentence.split(), ('</s>',)))
meta = cdec.sa.annotate(words)
cnet = cdec.sa.make_lattice(words)
return self.factory.input(cnet, meta, ctx_name)
# Add training instance to data
def add_instance(self, sentence, reference, alignment, ctx_name=None):
f_words = cdec.sa.encode_words(sentence.split())
e_words = cdec.sa.encode_words(reference.split())
al = sorted(tuple(int(i) for i in pair.split('-')) for pair in alignment.split())
self.factory.add_instance(f_words, e_words, al, ctx_name)
# Remove all incremental data for a context
def drop_ctx(self, ctx_name=None):
self.factory.drop_ctx(ctx_name)
|
{
"content_hash": "8caeeb632b81562f20fdb546e5f6b01e",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 92,
"avg_line_length": 46.18852459016394,
"alnum_prop": 0.6120674356699202,
"repo_name": "carhaas/cdec-semparse",
"id": "777f5afd6c7ee07c818a2358662db8d21eb13fed",
"size": "5635",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/cdec/sa/extractor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "100229"
},
{
"name": "C++",
"bytes": "2954921"
},
{
"name": "CMake",
"bytes": "50773"
},
{
"name": "LLVM",
"bytes": "11021"
},
{
"name": "Perl",
"bytes": "206272"
},
{
"name": "Python",
"bytes": "428404"
},
{
"name": "Roff",
"bytes": "10569"
},
{
"name": "Ruby",
"bytes": "8455"
},
{
"name": "Shell",
"bytes": "11480"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("shs_auth", "0003_auto_20150906_0833")]
operations = [
migrations.AlterModelOptions(
name="user",
options={
"ordering": ("first_name",),
"verbose_name": "User",
"verbose_name_plural": "Users",
},
),
migrations.AlterField(
model_name="user",
name="username",
field=models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=30,
unique=True,
validators=[
django.core.validators.RegexValidator(
"^[\\w.@+-]+$",
"Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.",
)
],
verbose_name="username",
),
),
]
|
{
"content_hash": "32062eadc85e0e4b4019fb58dc83c82d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 121,
"avg_line_length": 32.891891891891895,
"alnum_prop": 0.4897288414133114,
"repo_name": "JanMalte/secondhandshop_server",
"id": "f9c04f6b87f073c36cf9314bea9c9e1f44a655f5",
"size": "1289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/shs_auth/migrations/0004_auto_20160403_1848.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2000"
},
{
"name": "HTML",
"bytes": "117043"
},
{
"name": "JavaScript",
"bytes": "11561"
},
{
"name": "Python",
"bytes": "229093"
}
],
"symlink_target": ""
}
|
from test.test_support import verbose, verify, TestFailed
import sys
import new
class Eggs:
def get_yolks(self):
return self.yolks
print 'new.module()'
m = new.module('Spam')
if verbose:
print m
m.Eggs = Eggs
sys.modules['Spam'] = m
import Spam
def get_more_yolks(self):
return self.yolks + 3
print 'new.classobj()'
C = new.classobj('Spam', (Spam.Eggs,), {'get_more_yolks': get_more_yolks})
if verbose:
print C
print 'new.instance()'
c = new.instance(C, {'yolks': 3})
if verbose:
print c
o = new.instance(C)
verify(o.__dict__ == {},
"new __dict__ should be empty")
del o
o = new.instance(C, None)
verify(o.__dict__ == {},
"new __dict__ should be empty")
del o
def break_yolks(self):
self.yolks = self.yolks - 2
print 'new.instancemethod()'
im = new.instancemethod(break_yolks, c, C)
if verbose:
print im
verify(c.get_yolks() == 3 and c.get_more_yolks() == 6,
'Broken call of hand-crafted class instance')
im()
verify(c.get_yolks() == 1 and c.get_more_yolks() == 4,
'Broken call of hand-crafted instance method')
# It's unclear what the semantics should be for a code object compiled at
# module scope, but bound and run in a function. In CPython, `c' is global
# (by accident?) while in Jython, `c' is local. The intent of the test
# clearly is to make `c' global, so let's be explicit about it.
codestr = '''
global c
a = 1
b = 2
c = a + b
'''
ccode = compile(codestr, '<string>', 'exec')
# Jython doesn't have a __builtins__, so use a portable alternative
import __builtin__
g = {'c': 0, '__builtins__': __builtin__}
# this test could be more robust
print 'new.function()'
func = new.function(ccode, g)
if verbose:
print func
func()
verify(g['c'] == 3,
'Could not create a proper function object')
# test the various extended flavors of function.new
def f(x):
def g(y):
return x + y
return g
g = f(4)
new.function(f.func_code, {}, "blah")
g2 = new.function(g.func_code, {}, "blah", (2,), g.func_closure)
verify(g2() == 6)
g3 = new.function(g.func_code, {}, "blah", None, g.func_closure)
verify(g3(5) == 9)
def test_closure(func, closure, exc):
try:
new.function(func.func_code, {}, "", None, closure)
except exc:
pass
else:
print "corrupt closure accepted"
test_closure(g, None, TypeError) # invalid closure
test_closure(g, (1,), TypeError) # non-cell in closure
test_closure(g, (1, 1), ValueError) # closure is wrong size
test_closure(f, g.func_closure, ValueError) # no closure needed
print 'new.code()'
# bogus test of new.code()
# Note: Jython will never have new.code()
if hasattr(new, 'code'):
def f(a): pass
c = f.func_code
argcount = c.co_argcount
nlocals = c.co_nlocals
stacksize = c.co_stacksize
flags = c.co_flags
codestring = c.co_code
constants = c.co_consts
names = c.co_names
varnames = c.co_varnames
filename = c.co_filename
name = c.co_name
firstlineno = c.co_firstlineno
lnotab = c.co_lnotab
freevars = c.co_freevars
cellvars = c.co_cellvars
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab, freevars, cellvars)
# test backwards-compatibility version with no freevars or cellvars
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab)
try: # this used to trigger a SystemError
d = new.code(-argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab)
except ValueError:
pass
else:
raise TestFailed, "negative co_argcount didn't trigger an exception"
try: # this used to trigger a SystemError
d = new.code(argcount, -nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab)
except ValueError:
pass
else:
raise TestFailed, "negative co_nlocals didn't trigger an exception"
try: # this used to trigger a Py_FatalError!
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, (5,), varnames, filename, name,
firstlineno, lnotab)
except TypeError:
pass
else:
raise TestFailed, "non-string co_name didn't trigger an exception"
# new.code used to be a way to mutate a tuple...
class S(str): pass
t = (S("ab"),)
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, t, varnames, filename, name,
firstlineno, lnotab)
verify(type(t[0]) is S, "eek, tuple changed under us!")
if verbose:
print d
|
{
"content_hash": "11556d7f46835cd754e223f18f4d66f2",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 76,
"avg_line_length": 29.587878787878786,
"alnum_prop": 0.6257681278164686,
"repo_name": "MalloyPower/parsing-python",
"id": "f022f7e843fb85ac6905d55efeebcadf2ba4442a",
"size": "4882",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/test/test_new.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
import pytest
from facts.user_data import UserFacts
def test_user(tmpdir):
filename = str(tmpdir.join('user.yml'))
obj = UserFacts(filename)
assert obj.data == {}
obj.write('foo', 'bar')
assert obj.data == {'foo': 'bar'}
assert obj.read('foo') == 'bar'
obj.delete('foo')
assert obj.data == {}
|
{
"content_hash": "3a0dc548cbcebf47e561fb8e9d1024b0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 43,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.6085626911314985,
"repo_name": "johnnoone/facts",
"id": "9ef23773f587c1c3b0bb5be738be98e47c42a409",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_user_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22436"
}
],
"symlink_target": ""
}
|
import urlparse
import requests
__author__ = 'ola'
class BoomerangClient:
BASE_URL = 'http://api.boomerang.io/v1'
def __init__(self, project_id, api_key):
self.url = "%s/api_key/%s/projects/%s/boomerangs/" % (self.BASE_URL, api_key, project_id)
def boomerang_url(self, bid):
return urlparse.urljoin(self.url, bid)
def get_all_boomerangs(self):
res = requests.get(self.url)
return res
def get_one_boomerang(self, boomerang_id):
res = requests.get(self.boomerang_url(boomerang_id))
return res
def create_boomerang(self, params):
res = requests.post(self.url, params)
return res
def update_boomerang(self, boomerang_id):
res = requests.put(self.boomerang_url(boomerang_id))
return res
def delete_boomerang(self, boomerang_id):
res = requests.delete(self.boomerang_url(boomerang_id))
return res
|
{
"content_hash": "1eba9f040bd5514b3cd505be82136d0c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 97,
"avg_line_length": 23.375,
"alnum_prop": 0.6342245989304813,
"repo_name": "olalidmark/boomerang-client",
"id": "4a05e53a1cb60e71bfce4960cd0a5062bcd88462",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boomerang/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1638"
}
],
"symlink_target": ""
}
|
def Square(x):
return x * x
# Now call it!
print Square(3)
def foo(x=2, y=3):
print "foo(x = %d, y = %d)" % (x, y)
print foo() # x=2, y=3
print foo(4) # x=4, y=3
print foo(5, 6) # x=5, y=6
print foo(y=7) # x=2, y=7 <- NEW
def poor_man_printf(fmt, *args):
# Operator % substitutes format string with args (similar to C printf)
print fmt % args # 'args' is a tuple
poor_man_printf("%d %d", 89, 56)
# For the curious - read about kwargs:
# http://stackoverflow.com/questions/1098549/proper-way-to-use-kwargs-in-python
def foo(**kwargs): # kwargs means "keyworded args"
# `kwargs` is a dictionary of all extra args.
# See the 'dictionary' demos later.
print kwargs
foo(x=42, y=72, name=112, fasdfasdf=1431)
# -------------------------------
# Access to global variables (discouraged)
glob = 42
def change_glob():
#global glob # un-comment to use a global var
#print "glob = %d" % glob
glob = 13
print "glob = %d" % glob
change_glob()
print "glob = %d" % glob
# Returns multiple values as a tuple.
# Tuples are like lists but immutable, see
# http://rgruet.free.fr/PQR27/PQR2.7.html#SequenceTypes
def powers(x):
return (x, x*x, x*x*x)
print "Some powers of 2 are: %s" % str(powers(2))
# NEW! Lambda functions
# You can use function as argument for another function
def Map(array, function):
# Equivalent to the code below:
# return [function(element) for element in array]
result = []
for element in array:
result.append(function(element))
return result
# Call Map - using other function
print Map(range(0, 5), powers)
# Call Map - using a lambda function
print Map(range(0, 5), lambda x: x*x)
# Closure demo
a = 3
print Map(range(0, 5), lambda x: x**a)
# EXCERCISES:
# 1)
def bad_foo(arg=[]):
arg.append(42)
print "arg = %s" % arg
bad_foo([1])
bad_foo([1])
z = [0]
bad_foo(z)
bad_foo(z) # okay, bad_foo changes the argument...
bad_foo()
bad_foo() # baaaah!
# 2)
def foo2(closure):
print closure(42)
a = 3
z = lambda x: x + a
a = 4
foo2(z)
|
{
"content_hash": "5c3e817cba5c27ffe15ed29631fd58f5",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 21.978494623655912,
"alnum_prop": 0.6267123287671232,
"repo_name": "denfromufa/mipt-course",
"id": "eb0be804e03d51ba23e40b572efa3027593a2657",
"size": "2105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/python/6_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2826"
},
{
"name": "C++",
"bytes": "77578"
},
{
"name": "CMake",
"bytes": "10252"
},
{
"name": "Makefile",
"bytes": "951"
},
{
"name": "Python",
"bytes": "277579"
},
{
"name": "Shell",
"bytes": "7001"
},
{
"name": "VimL",
"bytes": "5366"
}
],
"symlink_target": ""
}
|
import logging
import threading
import json
import socket
from socketserver import (ThreadingTCPServer, StreamRequestHandler)
from time import sleep
from typing import (Callable, Dict, Optional, TYPE_CHECKING, Tuple, cast)
from .shared_probe_proxy import SharedDebugProbeProxy
from ..core import exceptions
from .debug_probe import DebugProbe
from ..coresight.ap import (APVersion, APv1Address, APv2Address)
if TYPE_CHECKING:
from ..core.session import Session
from ..core.memory_interface import MemoryInterface
LOG = logging.getLogger(__name__)
TRACE = LOG.getChild("trace")
TRACE.setLevel(logging.CRITICAL)
class DebugProbeServer(threading.Thread):
"""@brief Shares a debug probe over a TCP server.
When the start() method is called, a new daemon thread is created to run the server. The server
can be terminated by calling the stop() method, which will also kill the server thread.
"""
def __init__(
self,
session: "Session",
probe: DebugProbe,
port: Optional[int] = None,
serve_local_only: Optional[bool] = None
) -> None:
"""@brief Constructor.
@param self The object.
@param session A @ref pyocd.core.session.Session "Session" object. Does not need to have a
probe assigned to it.
@param probe Either the @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" object to serve
or a @ref pyocd.probe.shared_probe_proxy.SharedDebugProbeProxy "SharedDebugProbeProxy".
Doesn't have to be associated with a session, and should not be opened already. If not
already an instance of
@ref pyocd.probe.shared_probe_proxy.SharedDebugProbeProxy "SharedDebugProbeProxy"
then a new proxy is created to allow the probe to be shared by multiple connections.
@param port The TCP port number. Defaults to the 'probeserver.port' option if not provided.
@param serve_local_only Optional Boolean. Whether to restrict the server to be accessible only from
localhost. If not specified (set to None), then the 'serve_local_only' session option is used.
"""
super().__init__()
# Configure the server thread.
self.name = "debug probe %s server" % probe.unique_id
self.daemon = True
# Init instance variables.
self._session = session
self._probe = probe
self._did_start: bool = False
self._is_running: bool = False
# Make sure we have a shared proxy for the probe.
if isinstance(probe, SharedDebugProbeProxy):
self._proxy = probe
else:
self._proxy = SharedDebugProbeProxy(probe)
# Get the port from options if not specified.
if port is None:
self._port = cast(int, session.options.get('probeserver.port'))
else:
self._port = port
# Default to the serve_local_only session option.
if serve_local_only is None:
serve_local_only = session.options.get('serve_local_only')
host = 'localhost' if serve_local_only else ''
address = (host, self._port)
# Create the server and bind to the address, but don't start running yet.
self._server = TCPProbeServer(address, session, cast(DebugProbe, self._proxy))
self._server.server_bind()
def start(self) -> None:
"""@brief Start the server thread and begin listening.
Returns once the server thread has begun executing.
"""
self._server.server_activate()
super().start()
while not self._did_start:
sleep(0.005)
def stop(self) -> None:
"""@brief Shut down the server.
Any open connections will be forcibly closed. This function does not return until the
server thread has exited.
"""
self._server.shutdown()
self.join()
@property
def is_running(self) -> bool:
"""@brief Whether the server thread is running."""
return self._is_running
@property
def port(self) -> int:
"""@brief The server's port.
If port 0 was specified in the constructor, then, after start() is called, this will reflect the actual port
on which the server is listening.
"""
return self._port
def run(self) -> None:
"""@brief The server thread implementation."""
self._did_start = True
self._is_running = True
# Read back the actual port if 0 was specified.
if self._port == 0:
self._port = self._server.socket.getsockname()[1]
LOG.info("Serving debug probe %s (%s) on port %i",
self._probe.description, self._probe.unique_id, self._port)
self._server.serve_forever()
self._is_running = False
class TCPProbeServer(ThreadingTCPServer):
"""@brief TCP server subclass that carries the session and probe being served."""
# Change the default SO_REUSEADDR setting.
allow_reuse_address = True
def __init__(self, server_address: Tuple[str, int], session: "Session", probe: DebugProbe):
self._session = session
self._probe = probe
super().__init__(server_address, DebugProbeRequestHandler,
bind_and_activate=False)
@property
def session(self) -> "Session":
return self._session
@property
def probe(self) -> DebugProbe:
return self._probe
def handle_error(self, request, client_address):
LOG.error("Error while handling client request (client address %s):", client_address,
exc_info=self._session.log_tracebacks)
class DebugProbeRequestHandler(StreamRequestHandler):
"""@brief Probe server request handler.
This class implements the server side for the remote probe protocol.
request:
````
{
"id": <int>,
"request": <str>,
["arguments": <list>]
}
````
response:
````
{
"id": <int>,
"status": <int>,
["error": <str>,]
["response": <value>]
}
````
"""
## Current version of the remote probe protocol.
PROTOCOL_VERSION = 1
class StatusCode:
"""@brief Constants for errors reported from the server."""
GENERAL_ERROR = 1
PROBE_DISCONNECTED = 2
PROBE_ERROR = 3
TRANSFER_ERROR = 10
TRANSFER_TIMEOUT = 11
TRANSFER_FAULT = 12
def setup(self):
# Do a DNS lookup on the client.
try:
info = socket.gethostbyaddr(self.client_address[0])
self._client_domain = info[0]
except socket.herror:
self._client_domain = self.client_address[0]
# Get the session and probe we're serving from the server.
self._session = cast(TCPProbeServer, self.server).session
self._probe = cast(TCPProbeServer, self.server).probe
LOG.info("Client %s (port %i) connected to probe %s",
self._client_domain, self.client_address[1], self._probe.unique_id)
# Give the probe a session if it doesn't have one, in case it needs to access settings.
# TODO: create a session proxy so client-side options can be accessed
if self._probe.session is None:
self._probe.session = self._session
# Dict to store handles for AP memory interfaces.
self._next_ap_memif_handle: int = 0
self._ap_memif_handles: Dict[int, "MemoryInterface"] = {}
# Create the request handlers dict here so we can reference bound probe methods.
self._REQUEST_HANDLERS: Dict[str, Tuple[Callable, int]] = {
# Command Handler Arg count
'hello': (self._request__hello, 1 ),
'readprop': (self._request__read_property, 1 ),
'open': (self._probe.open, 0 ), # 'open'
'close': (self._probe.close, 0 ), # 'close'
'lock': (self._probe.lock, 0 ), # 'lock'
'unlock': (self._probe.unlock, 0 ), # 'unlock'
'connect': (self._request__connect, 1 ), # 'connect', protocol:str
'disconnect': (self._probe.disconnect, 0 ), # 'disconnect'
'swj_sequence': (self._probe.swj_sequence, 2 ), # 'swj_sequence', length:int, bits:int
'swd_sequence': (self._probe.swd_sequence, 1 ), # 'swd_sequence', sequences:List[Union[Tuple[int], Tuple[int, int]]] -> Tuple[int, List[bytes]]
'jtag_sequence': (self._probe.jtag_sequence, 4 ), # 'jtag_sequence', cycles:int, tms:int, read_tdo:bool, tdi:int -> Union[None, int]
'set_clock': (self._probe.set_clock, 1 ), # 'set_clock', freq:int
'reset': (self._probe.reset, 0 ), # 'reset'
'assert_reset': (self._probe.assert_reset, 1 ), # 'assert_reset', asserted:bool
'is_reset_asserted': (self._probe.is_reset_asserted, 0 ), # 'is_reset_asserted'
'flush': (self._probe.flush, 0 ), # 'flush'
'read_dp': (self._probe.read_dp, 1 ), # 'read_dp', addr:int -> int
'write_dp': (self._probe.write_dp, 2 ), # 'write_dp', addr:int, data:int
'read_ap': (self._probe.read_ap, 1 ), # 'read_ap', addr:int -> int
'write_ap': (self._probe.write_ap, 2 ), # 'write_ap', addr:int, data:int
'read_ap_multiple': (self._probe.read_ap_multiple, 2 ), # 'read_ap_multiple', addr:int, count:int -> List[int]
'write_ap_multiple': (self._probe.write_ap_multiple, 2 ), # 'write_ap_multiple', addr:int, data:List[int]
'get_memory_interface_for_ap': (self._request__get_memory_interface_for_ap, 2), # 'get_memory_interface_for_ap', ap_address_version:int, ap_nominal_address:int -> handle:int|null
'swo_start': (self._probe.swo_start, 1 ), # 'swo_start', baudrate:int
'swo_stop': (self._probe.swo_stop, 0 ), # 'swo_stop'
'swo_read': (self._request__swo_read, 0 ), # 'swo_read' -> List[int]
'read_mem': (self._request__read_mem, 3 ), # 'read_mem', handle:int, addr:int, xfer_size:int -> int
'write_mem': (self._request__write_mem, 4 ), # 'write_mem', handle:int, addr:int, value:int, xfer_size:int
'read_block32': (self._request__read_block32, 3 ), # 'read_block32', handle:int, addr:int, word_count:int -> List[int]
'write_block32': (self._request__write_block32, 3 ), # 'write_block32', handle:int, addr:int, data:List[int]
'read_block8': (self._request__read_block8, 3 ), # 'read_block8', handle:int, addr:int, word_count:int -> List[int]
'write_block8': (self._request__write_block8, 3 ), # 'write_block8', handle:int, addr:int, data:List[int]
}
# Let superclass do its thing.
super().setup()
def finish(self):
LOG.info("Client %s (port %i) disconnected from probe %s",
self._client_domain, self.client_address[1], self._probe.unique_id)
# Flush the probe and ignore any lingering errors.
try:
self._probe.flush()
except exceptions.Error as err:
LOG.debug("exception while flushing probe on disconnect: %s", err)
super().finish()
def _send_error_response(self, status=1, message=""):
response_dict = {
"id": self._current_request_id,
"status": status,
"error": message,
}
response = json.dumps(response_dict)
TRACE.debug("response: %s", response)
response_encoded = response.encode('utf-8')
self.wfile.write(response_encoded + b"\n")
def _send_response(self, result):
response_dict = {
"id": self._current_request_id,
"status": 0,
}
if result is not None:
response_dict["result"] = result
response = json.dumps(response_dict)
TRACE.debug("response: %s", response)
response_encoded = response.encode('utf-8')
self.wfile.write(response_encoded + b"\n")
def handle(self):
# Process requests until the connection is closed.
while True:
request = None
request_type = "<missing>"
try:
request_dict = None
self._current_request_id = -1
# Read request line.
request = self.rfile.readline()
TRACE.debug("request: %s", request)
if len(request) == 0:
LOG.debug("empty request, closing connection")
return
try:
request_dict = json.loads(request)
except json.JSONDecodeError:
self._send_error_response(message="invalid request format")
continue
if not isinstance(request_dict, dict):
self._send_error_response(message="invalid request format")
continue
if 'id' not in request_dict:
self._send_error_response(message="missing request ID")
continue
self._current_request_id = request_dict['id']
if 'request' not in request_dict:
self._send_error_response(message="missing request field")
continue
request_type = request_dict['request']
# Get arguments. If the key isn't present then there are no arguments.
request_args = request_dict.get('arguments', [])
if not isinstance(request_args, list):
self._send_error_response(message="invalid request arguments format")
continue
if request_type not in self._REQUEST_HANDLERS:
self._send_error_response(message="unknown request type")
continue
handler, arg_count = self._REQUEST_HANDLERS[request_type]
self._check_args(request_args, arg_count)
result = handler(*request_args)
# Send a success response.
self._send_response(result)
# Catch all exceptions so that an error response can be returned, to not leave the client hanging.
except Exception as err:
# Only send an error response if we received an request.
if request is not None:
LOG.error("Error processing '%s' request (ID %i, client %s, probe %s): %s",
request_type, self._current_request_id, self._client_domain, self._probe.unique_id, err,
exc_info=self._session.log_tracebacks)
LOG.debug("Full request from error: %s", request.decode('utf-8', 'replace'))
self._send_error_response(status=self._get_exception_status_code(err),
message=str(err))
else:
LOG.error("Error before request was received: %s", err,
exc_info=self._session.log_tracebacks)
# Reraise non-pyocd errors.
if not isinstance(err, exceptions.Error):
raise
def _get_exception_status_code(self, err):
"""@brief Convert an exception class into a status code."""
# Must test the exception class in order of specific to general.
if isinstance(err, exceptions.ProbeDisconnected):
return self.StatusCode.PROBE_DISCONNECTED
elif isinstance(err, exceptions.ProbeError):
return self.StatusCode.PROBE_ERROR
elif isinstance(err, exceptions.TransferFaultError):
return self.StatusCode.TRANSFER_FAULT
elif isinstance(err, exceptions.TransferTimeoutError):
return self.StatusCode.TRANSFER_TIMEOUT
elif isinstance(err, exceptions.TransferError):
return self.StatusCode.TRANSFER_ERROR
else:
return self.StatusCode.GENERAL_ERROR
def _check_args(self, args, count):
if len(args) != count:
raise exceptions.Error("malformed request; invalid number of arguments")
def _request__hello(self, version):
# 'hello', protocol-version:int
if version != self.PROTOCOL_VERSION:
raise exceptions.Error("client requested unsupported protocol version %i (expected %i)" %
(version, self.PROTOCOL_VERSION))
def _request__read_property(self, name):
# 'readprop', name:str
if not hasattr(self._probe, name):
raise exceptions.Error("unknown property name '%s' requested" % name)
value = getattr(self._probe, name)
# Run the property value through a value transformer if one is defined for this property.
if name in self._PROPERTY_CONVERTERS:
value = self._PROPERTY_CONVERTERS[name](value)
return value
def _request__connect(self, protocol_name):
# 'connect', protocol:str
try:
protocol = DebugProbe.Protocol[protocol_name]
except KeyError:
raise exceptions.Error("invalid protocol name %s" % protocol_name)
self._probe.connect(protocol)
def _request__get_memory_interface_for_ap(self, ap_address_version, ap_nominal_address):
# 'get_memory_interface_for_ap', ap_address_version:int, ap_nominal_address:int -> handle:int|null
ap_version = APVersion(ap_address_version)
if ap_version == APVersion.APv1:
ap_address = APv1Address(ap_nominal_address)
elif ap_version == APVersion.APv2:
ap_address = APv2Address(ap_nominal_address)
else:
raise exceptions.Error("invalid AP version in remote get_memory_interface_for_ap request")
memif = self._probe.get_memory_interface_for_ap(ap_address)
if memif is not None:
handle = self._next_ap_memif_handle
self._next_ap_memif_handle += 1
self._ap_memif_handles[handle] = memif
LOG.debug("creating memif for AP%s (handle %i)", ap_address, handle)
else:
handle = None
return handle
def _request__swo_read(self):
return list(self._probe.swo_read())
def _request__read_mem(self, handle, addr, xfer_size):
# 'read_mem', handle:int, addr:int, xfer_size:int -> int
if handle not in self._ap_memif_handles:
raise exceptions.Error("invalid handle received from remote memory access")
return self._ap_memif_handles[handle].read_memory(addr, xfer_size, now=True)
def _request__write_mem(self, handle, addr, value, xfer_size):
# 'write_mem', handle:int, addr:int, value:int, xfer_size:int
if handle not in self._ap_memif_handles:
raise exceptions.Error("invalid handle received from remote memory access")
self._ap_memif_handles[handle].write_memory(addr, value, xfer_size)
def _request__read_block32(self, handle, addr, word_count):
# 'read_block32', handle:int, addr:int, word_count:int -> List[int]
# TODO use base64 data
if handle not in self._ap_memif_handles:
raise exceptions.Error("invalid handle received from remote memory access")
return self._ap_memif_handles[handle].read_memory_block32(addr, word_count)
def _request__write_block32(self, handle, addr, data):
# 'write_block32', handle:int, addr:int, data:List[int]
# TODO use base64 data
if handle not in self._ap_memif_handles:
raise exceptions.Error("invalid handle received from remote memory access")
self._ap_memif_handles[handle].write_memory_block32(addr, data)
def _request__read_block8(self, handle, addr, word_count):
# 'read_block8', handle:int, addr:int, word_count:int -> List[int]
# TODO use base64 data
if handle not in self._ap_memif_handles:
raise exceptions.Error("invalid handle received from remote memory access")
return self._ap_memif_handles[handle].read_memory_block8(addr, word_count)
def _request__write_block8(self, handle, addr, data):
# 'write_block8', handle:int, addr:int, data:List[int]
# TODO use base64 data
if handle not in self._ap_memif_handles:
raise exceptions.Error("invalid handle received from remote memory access")
self._ap_memif_handles[handle].write_memory_block8(addr, data)
_PROPERTY_CONVERTERS = {
'capabilities': lambda value: [v.name for v in value],
'supported_wire_protocols': lambda value: [v.name for v in value],
'wire_protocol': lambda value: value.name if (value is not None) else None,
}
|
{
"content_hash": "afdbbf829c002577329073f56e7fcfc5",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 194,
"avg_line_length": 45.612631578947365,
"alnum_prop": 0.5736638050401551,
"repo_name": "flit/pyOCD",
"id": "3954ca4422141eb4e81557fd8ed193bd328572f2",
"size": "22343",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pyocd/probe/tcp_probe_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1410"
},
{
"name": "Batchfile",
"bytes": "741"
},
{
"name": "C",
"bytes": "3904"
},
{
"name": "Makefile",
"bytes": "4391"
},
{
"name": "Python",
"bytes": "3603471"
}
],
"symlink_target": ""
}
|
import os
import sys
import timeit
import openpyxl
def writer(optimised, cols, rows):
"""
Create a worksheet with variable width rows. Because data must be
serialised row by row it is often the width of the rows which is most
important.
"""
wb = openpyxl.Workbook(optimized_write=optimised)
ws = wb.create_sheet()
row = range(rows)
for idx in xrange(rows):
if not (idx + 1) % rows/10:
progress = "." * ((idx + 1) / (1 + rows/10))
sys.stdout.write("\r" + progress)
sys.stdout.flush()
ws.append(row)
folder = os.path.split(__file__)[0]
print
wb.save(os.path.join(folder, "files", "large.xlsx"))
def timer(fn, **kw):
"""
Create a timeit call to a function and pass in keyword arguments.
The function is called twice, once using the standard workbook, then with the optimised one.
Time from the best of three is taken.
"""
result = []
cols = kw.get("cols", 0)
rows = kw.get("rows", 0)
for opt in (False, True):
kw.update(optimised=opt)
print "{} cols {} rows, Worksheet is {}".format(cols, rows,
opt and "optimised" or "not optimised")
times = timeit.repeat("{}(**{})".format(fn.func_name, kw),
setup="from __main__ import {}".format(fn.func_name),
number = 1,
repeat = 3
)
print "{:.2f}s".format(min(times))
result.append(min(times))
std, opt = result
print "Optimised takes {:.2%} time\n".format(opt/std)
return std, opt
if __name__ == "__main__":
timer(writer, cols=100, rows=100)
timer(writer, cols=1000, rows=100)
timer(writer, cols=4000, rows=100)
timer(writer, cols=8192, rows=100)
timer(writer, cols=10, rows=10000)
timer(writer, cols=4000, rows=1000)
|
{
"content_hash": "987dbb3b4ccdf6d77898a7b4b4ac97fe",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 96,
"avg_line_length": 32.69491525423729,
"alnum_prop": 0.55883877656817,
"repo_name": "benpruitt/customarrayformatter",
"id": "1ed65b9597fbd7a1500db629e35f14d649e5b57a",
"size": "1929",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openpyxl/benchmarks/writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "554753"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from localflavor.us.models import PhoneNumberField
from markdown_deux.templatetags.markdown_deux_tags import markdown_allowed
from profiles import constants
from profiles import access
def filter_access_levels(query, field, access_levels, owner_field = None,
owner_object = None):
"""Given a query, add an OR filter for the list of valid access levels
applied to the given field. Can optionally add in an owner field and
owner object that will be added, so that a user can see their own
items regardless of access level"""
access_filter = reduce(
lambda q,access_level: q|Q(**{field: access_level}), access_levels, Q())
if owner_field and owner_object:
access_filter = access_filter | Q(**{owner_field: owner_object})
return query.filter(access_filter)
class UserProfile(models.Model):
"""Models the information we need for a user to be a member."""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
status = models.CharField(max_length=20, choices=constants.STATUS_LEVELS, null=True)
profile_access = models.CharField(max_length=20, choices=constants.BASIC_ACCESS_LEVELS, \
default=constants.MEMBERS_ACCESS,
help_text = """This determines who can see your profile.""")
display_name = models.CharField(max_length=100, blank = True,
help_text="Your display name throughout the site, which can be different from the default of your username.")
legal_name = models.CharField(max_length=255, blank = True,
help_text="Your legal name, which could be useful for administrative purposes.")
legal_name_access = models.CharField(max_length=20, choices=constants.ACCESS_LEVELS, \
default=constants.MEMBERS_ACCESS,
help_text="Restrict who has access to your legal name.")
public_about = models.TextField(blank = True,
help_text="This about section will always be public.")
about = models.TextField(blank = True,
help_text= "You can customize this area to tell others more about yourself." )
about_access = models.CharField(max_length=20, choices=constants.BASIC_ACCESS_LEVELS,
default=constants.MEMBERS_ACCESS,
help_text="Restrict who has access to your about text.")
dietary_considerations = models.TextField(blank = True,
help_text="Do you have any dietary restrictions people should be aware of?")
dietary_access = models.CharField(max_length=20, choices=constants.ACCESS_LEVELS,
default=constants.MEMBERS_ACCESS,
help_text="Restrict who has access to your dietary considerations.")
preferred_contact_method = models.CharField(max_length=20,
choices=constants.CONTACT_METHODS, default=constants.EMAIL_CONTACT,
help_text="This lists your preferred contact method, so people know the best way to get in touch with you.")
preferred_phone = models.ForeignKey('UserPhone', blank = True, null = True,
help_text="This sets your preferred phone number, so if you have more than one you can say which one to use.",
on_delete=models.SET_NULL)
preferred_email = models.ForeignKey('UserEmail', blank = True, null = True,
help_text="This sets your preferred email, so if you have more than one you can say which one to use.",
on_delete=models.SET_NULL)
preferred_address = models.ForeignKey('UserAddress', blank = True, null = True,
help_text="This sets your preferred address, so if you have more than one you can say which one to use.",
on_delete=models.SET_NULL)
emergency_contact = models.TextField(blank = True, default="",
help_text="Please describe who to contact in an emergency and how to best reach them. This is members only information.")
became_member_on = models.DateField(null = True, blank = True)
created_on = models.DateTimeField(auto_now_add=True)
last_modified_on = models.DateTimeField(auto_now=True)
# TODO: add
# avatar
# portrait
# using an access based media system
def __unicode__(self):
if self.display_name:
return self.display_name
else:
return self.user.username
def get_absolute_url(self):
return reverse('user_profile', kwargs={'username': self.user.username})
# TODO: override save function or add a listener; ensure that status changes
# add a member status changes. this can be on on creation of new profile
# or modification of old one.
# also need to make sure that became_member_on is set to an
# appropriate value or delete it entirely and rely only on status changes
def _is_member(self):
return self.status == constants.ACTIVE_STATUS
is_member = property(_is_member)
def _is_admin(self):
return self.user.is_staff
is_admin = property(_is_admin)
def _latest_status(self):
try:
return MemberStatusChange.objects.filter().order_by('-changed_on')[0]
except IndexError:
return None
latest_status = property(_latest_status)
@staticmethod
def get_profile(user):
if not user or not user.is_authenticated():
return None
try:
return UserProfile.objects.get(user = user)
except ObjectDoesNotExist:
return None
@staticmethod
def get_directory(viewer_profile = None, status = None):
"""Returns a list of profiles in the directory. Giving a viewer_profile
allows the viewer to see profiles that they have access to. Giving
a status filters the list to that type of membership status."""
# we don't have any particular owner here, so get general access levels
# for the viewer
valid_access_levels = access.access_levels(None, viewer_profile)
# okay, so if we're making a list of profiles we can show in this directory
# view, we want items both in the valid access levels and in the
# BASIC_ACCESS_LEVELS set that profile_access can be in
# see this for more information on set operations:
# http://docs.python.org/2/library/sets.html
directory_access_levels = valid_access_levels.intersection(
set([access_level[0] for access_level in
constants.BASIC_ACCESS_LEVELS]))
# optionally filter by status; exclude status by prefixing with "-"
if status:
if status.startswith("-"):
query = UserProfile.objects.exclude(status = status[1:])
else:
query = UserProfile.objects.filter(status = status)
return filter_access_levels(query, "profile_access", directory_access_levels)
def access_strip(self, access_levels = (constants.PUBLIC_ACCESS,),
viewer_profile = None):
"""Strip away information from the model that does not have the given
valid access levels."""
# if the viewer is the owner of the profile, they can observe all the
# current fields of data
if viewer_profile == self:
return
if not self.legal_name_access in access_levels:
self.legal_name = ""
if not self.about_access in access_levels:
self.about = ""
if not constants.MEMBERS_ACCESS in access_levels:
self.became_member_on = None
self.emergency_contact = None
def get_preferred_phone(self, access_levels = (constants.PUBLIC_ACCESS,),
viewer_profile = None):
if self.preferred_phone and \
self.preferred_phone.access in access_levels:
return self.preferred_phone
return None
def get_preferred_email(self, access_levels = (constants.PUBLIC_ACCESS,),
viewer_profile = None):
if self.preferred_email and \
self.preferred_email.access in access_levels:
return self.preferred_email
return None
def get_preferred_address(self, access_levels = (constants.PUBLIC_ACCESS,),
viewer_profile = None):
if self.preferred_address and \
self.preferred_address.access in access_levels:
return self.preferred_address
return None
def get_phone_contacts(self, access_levels = (constants.PUBLIC_ACCESS,),
viewer_profile = None):
"""Fetch a list of phone contacts, given an access level. The default
access level is public."""
query = UserPhone.objects.filter(profile = self)
return filter_access_levels(query, "access", access_levels, "profile",
viewer_profile)
def get_address_contacts(self, access_levels = (constants.PUBLIC_ACCESS,),
viewer_profile = None):
"""Fetch a list of address contacts, given an access level. The default
access level is public."""
query = UserAddress.objects.filter(profile = self)
return filter_access_levels(query, "access", access_levels, "profile",
viewer_profile)
def get_email_contacts(self, access_levels = (constants.PUBLIC_ACCESS,),
viewer_profile = None):
"""Fetch a list of email contacts, given an access level. The default
access level is public."""
query = UserEmail.objects.filter(profile = self)
return filter_access_levels(query, "access", access_levels, "profile",
viewer_profile)
def get_external_sites(self, access_levels = (constants.PUBLIC_ACCESS,),
viewer_profile = None):
"""Fetch a list of external sites, given an access level. The default
access level is public."""
query = UserExternalSite.objects.filter(profile = self)
return filter_access_levels(query, "access", access_levels, "profile",
viewer_profile).order_by('order')
class MemberStatusChange(models.Model):
"""A history of user status changes."""
profile = models.ForeignKey('UserProfile')
changed_on = models.DateTimeField(auto_now_add=True)
# old status can be blank because a profile could previously not exist
old_status = models.CharField(max_length=20, choices=constants.STATUS_LEVELS, \
blank = True, null = True)
new_status = models.CharField(max_length=20, choices=constants.STATUS_LEVELS)
notes = models.TextField(blank=True, default="")
class Meta:
ordering = ['-changed_on']
def save(self, *args, **kwargs):
super(MemberStatusChange, self).save(*args, **kwargs)
self.profile.status = self.new_status
if self.new_status == constants.ACTIVE_STATUS and \
not self.profile.became_member_on:
self.profile.became_member_on = datetime.date.today()
self.profile.save()
def get_absolute_url(self):
return reverse('member_status_change_detail', kwargs = {'username': self.profile.user.username, 'pk': self.pk})
class UserExternalSite(models.Model):
profile = models.ForeignKey('UserProfile')
handle = models.CharField(max_length=50, blank = True)
link = models.URLField(blank = True)
site_category = models.ForeignKey('othersites.SiteInfo', blank = True, null = True)
custom_label = models.CharField(max_length=50, blank=True)
order = models.PositiveIntegerField(default=100)
access = models.CharField(max_length=20, choices=constants.ACCESS_LEVELS, \
default = constants.MEMBERS_ACCESS)
notes = models.TextField(blank = True, default="")
def _get_label(self):
if self.site_category and self.custom_label:
return "%s (%s)" % (self.site_category.name, self.custom_label)
elif self.site_category:
return self.site_category.name
elif self.custom_label:
return self.custom_label
return ""
label = property(_get_label)
class Meta:
ordering = ['profile', '-order']
index_together = [('profile', 'order')]
def __unicode__(self):
if self.site_category:
return "%s (%s)" % (self.site_category, self.profile)
if self.custom_label:
return "%s (%s)" % (self.custom_label, self.profile)
return "(No label) (%s)" % self.profile
class UserContactInfo(models.Model):
profile = models.ForeignKey('UserProfile')
label = models.CharField(max_length=30, blank = True)
access = models.CharField(max_length=20, choices=constants.ACCESS_LEVELS, \
default=constants.MEMBERS_ACCESS)
notes = models.TextField(blank = True, default="")
class Meta:
abstract = True
class UserPhone(UserContactInfo):
phone = PhoneNumberField()
def __unicode__(self):
return self.phone
def get_absolute_url(self):
return reverse('user_profile_phone_detail',
kwargs = {'username': self.profile.user.username, 'pk': self.pk})
def _is_preferred(self):
if self.profile.preferred_phone_id == self.id:
return True
return False
is_preferred = property(_is_preferred)
class Meta:
unique_together = (('profile', 'phone'),)
class UserEmail(UserContactInfo):
email = models.EmailField()
def __unicode__(self):
return self.email
def get_absolute_url(self):
return reverse('user_profile_email_detail',
kwargs = {'username': self.profile.user.username, 'pk': self.pk})
def _is_preferred(self):
if self.profile.preferred_email_id == self.id:
return True
return False
is_preferred = property(_is_preferred)
class Meta:
unique_together = (("profile", "email"),)
class UserAddress(UserContactInfo):
address = models.TextField()
def __unicode__(self):
if self.label:
return self.label
return "address"
def get_absolute_url(self):
return reverse('user_profile_address_detail',
kwargs = {'username': self.profile.user.username, 'pk': self.pk})
def _is_preferred(self):
if self.profile.preferred_address_id == self.id:
return True
return False
is_preferred = property(_is_preferred)
|
{
"content_hash": "2f949878ef91a87c30e902258939be07",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 129,
"avg_line_length": 39.206989247311824,
"alnum_prop": 0.6515598217346589,
"repo_name": "SeattleAttic/HedyNet",
"id": "1bb439bae96b421205211e80aa6c5c980eea1dc6",
"size": "14585",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "HedyNet/profiles/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "382271"
},
{
"name": "HTML",
"bytes": "93617"
},
{
"name": "JavaScript",
"bytes": "623"
},
{
"name": "Python",
"bytes": "88382"
},
{
"name": "Ruby",
"bytes": "897"
}
],
"symlink_target": ""
}
|
from drift import management
import argparse, os, sys
sys.dont_write_bytecode = True
if __name__ == "__main__":
path = os.path.dirname(__file__)
sys.path.insert(0, path)
management.execute_cmd()
|
{
"content_hash": "646bc84f362c5bd2fa2876ec95977e10",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 36,
"avg_line_length": 26,
"alnum_prop": 0.6586538461538461,
"repo_name": "1939Games/drift",
"id": "1ce5328c3fd3bb322a510956422f87032b6b62ee",
"size": "233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/drift-admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2371"
},
{
"name": "Python",
"bytes": "274810"
},
{
"name": "Shell",
"bytes": "3540"
}
],
"symlink_target": ""
}
|
import json
import logging
from datetime import datetime
from typing import List, Optional
import requests
from authlib.jose import JWTClaims, jwt
from authlib.jose.errors import DecodeError, JoseError
from authlib.oidc.core import CodeIDToken
from django.contrib.auth.models import Permission
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.db.models import QuerySet
from django.utils.timezone import make_aware
from jwt import PyJWTError
from ...account.models import User
from ...core.jwt import (
JWT_ACCESS_TYPE,
JWT_OWNER_FIELD,
JWT_REFRESH_TYPE,
PERMISSIONS_FIELD,
jwt_decode,
jwt_encode,
jwt_user_payload,
)
from ...core.permissions import get_permission_names, get_permissions_from_codenames
from ...graphql.account.mutations.authentication import (
_does_token_match,
_get_new_csrf_token,
)
from ..error_codes import PluginErrorCode
from ..models import PluginConfiguration
from . import PLUGIN_ID
from .const import SALEOR_STAFF_PERMISSION
from .exceptions import AuthenticationError
JWKS_KEY = "oauth_jwks"
JWKS_CACHE_TIME = 60 * 60 # 1 hour
USER_INFO_DEFAULT_CACHE_TIME = 60 * 60 # 1 hour
REQUEST_TIMEOUT = 5
OAUTH_TOKEN_REFRESH_FIELD = "oauth_refresh_token"
CSRF_FIELD = "csrf_token"
logger = logging.getLogger(__name__)
def fetch_jwks(jwks_url) -> Optional[dict]:
"""Fetch JSON Web Key Sets from a provider.
Fetched keys will be stored in the cache to the reduced amount of possible
requests.
:raises AuthenticationError
"""
response = None
try:
response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
jwks = response.json()
except requests.exceptions.RequestException:
logger.exception("Unable to fetch jwks from %s", jwks_url)
raise AuthenticationError("Unable to finalize the authentication process.")
except json.JSONDecodeError:
content = response.content if response else "Unable to find the response"
logger.exception(
"Unable to decode the response from auth service with jwks. "
"Response: %s",
content,
)
raise AuthenticationError("Unable to finalize the authentication process.")
keys = jwks.get("keys", [])
if not keys:
logger.warning("List of JWKS keys is empty")
cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME)
return keys
def get_jwks_keys_from_cache_or_fetch(jwks_url: str) -> dict:
jwks_keys = cache.get(JWKS_KEY)
if jwks_keys is None:
jwks_keys = fetch_jwks(jwks_url)
return jwks_keys
def get_user_info_from_cache_or_fetch(
user_info_url: str, access_token: str, exp_time: Optional[int]
) -> Optional[dict]:
user_info_data = cache.get(f"{PLUGIN_ID}.{access_token}", None)
if not user_info_data:
user_info_data = get_user_info(user_info_url, access_token)
cache_time = USER_INFO_DEFAULT_CACHE_TIME
if exp_time:
now_ts = int(datetime.now().timestamp())
exp_delta = exp_time - now_ts
cache_time = exp_delta if exp_delta > 0 else cache_time
if user_info_data:
cache.set(f"{PLUGIN_ID}.{access_token}", user_info_data, cache_time)
# user_info_data is None when we were not able to use an access token to fetch
# the user info data
return user_info_data
def get_user_info(user_info_url, access_token) -> Optional[dict]:
try:
response = requests.get(
user_info_url,
headers={"Authorization": f"Bearer {access_token}"},
timeout=REQUEST_TIMEOUT,
)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
logger.warning(
"Fetching OIDC user info failed. HTTP error occurred",
extra={"user_info_url": user_info_url, "error": e},
)
return None
except requests.exceptions.RequestException as e:
logger.warning(
"Fetching OIDC user info failed",
extra={"user_info_url": user_info_url, "error": e},
)
return None
except json.JSONDecodeError as e:
logger.warning(
"Invalid OIDC user info response",
extra={"user_info_url": user_info_url, "error": e},
)
return None
def decode_access_token(token, jwks_url):
try:
return get_decoded_token(token, jwks_url)
except (JoseError, ValueError) as e:
logger.info(
"Invalid OIDC access token format", extra={"error": e, "jwks_url": jwks_url}
)
return None
def get_user_from_oauth_access_token_in_jwt_format(
token_payload: JWTClaims,
user_info_url: str,
access_token: str,
use_scope_permissions: bool,
audience: str,
):
try:
token_payload.validate()
except (JoseError, ValueError) as e:
logger.info(
"OIDC access token validation failed",
extra={"error": e, "user_info_url": user_info_url},
)
return None
user_info = get_user_info_from_cache_or_fetch(
user_info_url,
access_token,
token_payload["exp"],
)
if not user_info:
logger.info(
"Failed to fetch user info for a valid OIDC access token",
extra={"token_exp": token_payload["exp"], "user_info_url": user_info_url},
)
return None
try:
user = get_or_create_user_from_payload(
user_info, user_info_url, last_login=token_payload.get("iat")
)
except AuthenticationError as e:
logger.info("Unable to create a user object", extra={"error": e})
return None
scope = token_payload.get("scope")
token_permissions = token_payload.get("permissions", [])
# check if token contains expected aud
aud = token_payload.get("aud")
if not audience:
audience_in_token = False
elif isinstance(aud, list):
audience_in_token = audience in aud
else:
audience_in_token = audience == aud
is_staff_id = SALEOR_STAFF_PERMISSION
if use_scope_permissions and audience_in_token:
permissions = get_saleor_permissions_qs_from_scope(scope)
if not permissions and token_permissions:
permissions = get_saleor_permissions_from_list(token_permissions)
user.effective_permissions = permissions
is_staff_in_scope = is_staff_id in scope
is_staff_in_token_permissions = is_staff_id in token_permissions
if is_staff_in_scope or is_staff_in_token_permissions or permissions:
if not user.is_staff:
user.is_staff = True
user.save(update_fields=["is_staff"])
elif user.is_staff:
user.is_staff = False
user.save(update_fields=["is_staff"])
else:
user.is_staff = False
return user
def get_user_from_oauth_access_token(
access_token: str,
jwks_url: str,
user_info_url: str,
use_scope_permissions: bool,
audience: str,
):
# we try to decode token to define if the structure is a jwt format.
access_token_jwt_payload = decode_access_token(access_token, jwks_url)
if access_token_jwt_payload:
return get_user_from_oauth_access_token_in_jwt_format(
access_token_jwt_payload,
user_info_url=user_info_url,
access_token=access_token,
use_scope_permissions=use_scope_permissions,
audience=audience,
)
user_info = get_user_info_from_cache_or_fetch(
user_info_url, access_token, exp_time=None
)
if not user_info:
logger.info(
"Failed to fetch OIDC user info", extra={"user_info_url": user_info_url}
)
return None
user = get_or_create_user_from_payload(user_info, oauth_url=user_info_url)
if not use_scope_permissions:
user.is_staff = False
return user
def create_jwt_token(
id_payload: CodeIDToken,
user: User,
access_token: str,
permissions: Optional[List[str]],
owner: str,
) -> str:
additional_payload = {
"exp": id_payload["exp"],
"oauth_access_key": access_token,
}
if permissions is not None:
additional_payload[PERMISSIONS_FIELD] = permissions
jwt_payload = jwt_user_payload(
user,
JWT_ACCESS_TYPE,
exp_delta=None, # we pass exp from auth service, in additional_payload
additional_payload=additional_payload,
token_owner=owner,
)
return jwt_encode(jwt_payload)
def create_jwt_refresh_token(user: User, refresh_token: str, csrf: str, owner: str):
additional_payload = {
OAUTH_TOKEN_REFRESH_FIELD: refresh_token,
CSRF_FIELD: csrf,
}
jwt_payload = jwt_user_payload(
user,
JWT_REFRESH_TYPE,
# oauth_refresh_token has own expiration time. No need to duplicate it here
exp_delta=None,
additional_payload=additional_payload,
token_owner=owner,
)
return jwt_encode(jwt_payload)
def get_decoded_token(token, jwks_url, claims_cls=None):
keys = get_jwks_keys_from_cache_or_fetch(jwks_url)
decoded_token = jwt.decode(token, keys, claims_cls=claims_cls)
return decoded_token
def get_parsed_id_token(token_data, jwks_url) -> CodeIDToken:
id_token = token_data.get("id_token")
if not id_token:
raise AuthenticationError("Missing ID Token.")
try:
decoded_token = get_decoded_token(id_token, jwks_url, CodeIDToken)
decoded_token.validate()
return decoded_token
except DecodeError:
logger.warning("Unable to decode provided token", exc_info=True)
raise AuthenticationError("Unable to decode provided token")
except (JoseError, ValueError):
logger.warning("Token validation failed", exc_info=True)
raise AuthenticationError("Token validation failed")
def get_or_create_user_from_payload(
payload: dict, oauth_url: str, last_login: Optional[int] = None
) -> User:
oidc_metadata_key = f"oidc-{oauth_url}"
user_email = payload.get("email")
if not user_email:
raise AuthenticationError("Missing user's email.")
sub = payload.get("sub")
get_kwargs = {"private_metadata__contains": {oidc_metadata_key: sub}}
if not sub:
get_kwargs = {"email": user_email}
logger.warning("Missing sub section in OIDC payload")
defaults_create = {
"is_active": True,
"email": user_email,
"first_name": payload.get("given_name", ""),
"last_name": payload.get("family_name", ""),
"private_metadata": {oidc_metadata_key: sub},
}
try:
user = User.objects.get(**get_kwargs)
except User.DoesNotExist:
user, _ = User.objects.get_or_create(
email=user_email,
defaults=defaults_create,
)
except User.MultipleObjectsReturned:
logger.warning("Multiple users returned for single OIDC sub ID")
user, _ = User.objects.get_or_create(
email=user_email,
defaults=defaults_create,
)
if not user.is_active: # it is true only if we fetch disabled user.
raise AuthenticationError("Unable to log in.")
_update_user_details(
user=user,
oidc_key=oidc_metadata_key,
user_email=user_email,
sub=sub, # type: ignore
last_login=last_login,
)
return user
def _update_user_details(
user: User, oidc_key: str, user_email: str, sub: str, last_login: Optional[int]
):
user_sub = user.get_value_from_private_metadata(oidc_key)
fields_to_save = []
if user_sub != sub:
user.store_value_in_private_metadata({oidc_key: sub})
fields_to_save.append("private_metadata")
if user.email != user_email:
if User.objects.filter(email=user_email).exists():
logger.warning(
"Unable to update user email as the new one already exists in DB",
extra={"oidc_key": oidc_key},
)
return
user.email = user_email
fields_to_save.append("email")
if last_login:
if not user.last_login or user.last_login.timestamp() < last_login:
login_time = make_aware(datetime.fromtimestamp(last_login))
user.last_login = login_time
fields_to_save.append("last_login")
if fields_to_save:
user.save(update_fields=fields_to_save)
def get_user_from_token(claims: CodeIDToken) -> User:
user_email = claims.get("email")
if not user_email:
raise AuthenticationError("Missing user's email.")
user = User.objects.filter(email=user_email, is_active=True).first()
if not user:
raise AuthenticationError("User does not exist.")
return user
def is_owner_of_token_valid(token: str, owner: str) -> bool:
try:
payload = jwt_decode(token, verify_expiration=False)
return payload.get(JWT_OWNER_FIELD, "") == owner
except Exception:
return False
def create_tokens_from_oauth_payload(
token_data: dict,
user: User,
claims: CodeIDToken,
permissions: Optional[List[str]],
owner: str,
):
refresh_token = token_data.get("refresh_token")
access_token = token_data.get("access_token", "")
tokens = {
"token": create_jwt_token(claims, user, access_token, permissions, owner),
}
if refresh_token:
csrf_token = _get_new_csrf_token()
tokens["refresh_token"] = create_jwt_refresh_token(
user, refresh_token, csrf_token, owner
)
tokens["csrf_token"] = csrf_token
return tokens
def validate_refresh_token(refresh_token, data):
csrf_token = data.get("csrfToken")
if not refresh_token:
raise ValidationError(
{
"refreshToken": ValidationError(
"Missing token.", code=PluginErrorCode.NOT_FOUND.value
)
}
)
try:
refresh_payload = jwt_decode(refresh_token, verify_expiration=True)
except PyJWTError:
raise ValidationError(
{
"refreshToken": ValidationError(
"Unable to decode the refresh token.",
code=PluginErrorCode.INVALID.value,
)
}
)
if not data.get("refreshToken"):
if not refresh_payload.get(CSRF_FIELD):
raise ValidationError(
{
CSRF_FIELD: ValidationError(
"Missing CSRF token in refresh payload.",
code=PluginErrorCode.INVALID.value,
)
}
)
if not csrf_token:
raise ValidationError(
{
"csrfToken": ValidationError(
"CSRF token needs to be provided.",
code=PluginErrorCode.INVALID.value,
)
}
)
is_valid = _does_token_match(csrf_token, refresh_payload[CSRF_FIELD])
if not is_valid:
raise ValidationError(
{
"csrfToken": ValidationError(
"CSRF token doesn't match.",
code=PluginErrorCode.INVALID.value,
)
}
)
def get_incorrect_or_missing_urls(urls: dict) -> List[str]:
validator = URLValidator()
incorrect_urls = []
for field, url in urls.items():
try:
validator(url)
except ValidationError:
incorrect_urls.append(field)
return incorrect_urls
def get_incorrect_fields(plugin_configuration: "PluginConfiguration"):
"""Return missing or incorrect configuration fields for OpenIDConnectPlugin."""
configuration = plugin_configuration.configuration
configuration = {item["name"]: item["value"] for item in configuration}
incorrect_fields = []
if plugin_configuration.active:
urls_to_validate = {}
if any(
[configuration["oauth_authorization_url"], configuration["oauth_token_url"]]
):
urls_to_validate.update(
{
"json_web_key_set_url": configuration["json_web_key_set_url"],
"oauth_authorization_url": configuration["oauth_authorization_url"],
"oauth_token_url": configuration["oauth_token_url"],
}
)
elif configuration["user_info_url"]:
urls_to_validate.update(
{
"json_web_key_set_url": configuration["json_web_key_set_url"],
"user_info_url": configuration["user_info_url"],
}
)
else:
incorrect_fields.extend(
[
"json_web_key_set_url",
"oauth_authorization_url",
"oauth_token_url",
"user_info_url",
]
)
incorrect_fields.extend(get_incorrect_or_missing_urls(urls_to_validate))
if not configuration["client_id"]:
incorrect_fields.append("client_id")
if not configuration["client_secret"]:
incorrect_fields.append("client_secret")
return incorrect_fields
def get_saleor_permissions_qs_from_scope(scope: str) -> QuerySet[Permission]:
scope_list = scope.lower().strip().split()
return get_saleor_permissions_from_list(scope_list)
def get_saleor_permissions_from_list(permissions: list) -> QuerySet[Permission]:
saleor_permissions_str = [s for s in permissions if s.startswith("saleor:")]
if SALEOR_STAFF_PERMISSION in saleor_permissions_str:
saleor_permissions_str.remove(SALEOR_STAFF_PERMISSION)
if not saleor_permissions_str:
return Permission.objects.none()
permission_codenames = list(
map(lambda perm: perm.replace("saleor:", ""), saleor_permissions_str)
)
permissions = get_permissions_from_codenames(permission_codenames)
return permissions
def get_saleor_permission_names(permissions: QuerySet) -> List[str]:
permission_names = get_permission_names(permissions)
return list(permission_names)
|
{
"content_hash": "51e8fdc2a9189981082c6c971eabbcb6",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 88,
"avg_line_length": 32.75579322638146,
"alnum_prop": 0.6140618197649107,
"repo_name": "mociepka/saleor",
"id": "f4c2ee0369abb90ea4f68c42173c09535a474170",
"size": "18376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/plugins/openid_connect/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
"""
@author: stoberblog
@detail: Functions in this file deal with storing and retrieval
of data to a database. Generic functions are used for
abstraction, allowing ease to change database backend.
@created: Friday 17th Feburary 2017
@modified: Saturday 25th Feburary 2017
@version: 0.1
@change:
@license: MIT License
Copyright (c) 2017 stoberblog
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import gc # Garbage collection - Clean close of database
import time
import configuration
if configuration.DATABASE_TYPE == "mariadb":
import mysql.connector as mariadb
class interval_struct:
epoch = 0
DC_s1_v = 0.0
DC_s2_v = 0.0
pf_feed = 0.0
pf_inv = 0.0
pow_prod = 0.0
pow_feed = 0.0
eng_tot_prod= 0
eng_tot_out = 0
eng_tot_in = 0
volt_feed = 0.0
cur_inv = 0.0
freq_feed = 50.0
class daily_struct:
epoch = 0
thres_rise_epoch = 0
thres_fall_epoch = 0
thres_perc_exp = 0.0
pow_max = 0.0
eng_day = 0
eng_tot_prod = 0
eng_tot_out = 0
eng_tot_in = 0
error_flag = 0
class log_struct:
epoch = 0
level = 0
message = ''
databaseCursor = None
databaseConnection = None
"""
@brief: Open connection to database
@created: 18th Feb 2017
@return: True: Success
False: Failed
"""
def openConnection():
if configuration.DATABASE_TYPE == "mariadb":
return maria_Open()
else:
return True
"""
@brief: Close connection to database
@created: 18th Feb 2017
@return: True: Success
False: Failed
"""
def closeConnection():
if configuration.DATABASE_TYPE == "mariadb":
return maria_Close()
else:
return True
"""
@brief: Store Interval Data to database
@created: 18th Feb 2017
@return: True: Success
False: Failed
"""
def storeInterval(dataStructure):
if configuration.DATABASE_TYPE == "mariadb":
return maria_storeInterval(dataStructure)
else:
return False
"""
@brief: Store Daily Data to database
@created: 19th Feb 2017
@return: True: Success
False: Failed
"""
def storeDaily(dataStructure):
if configuration.DATABASE_TYPE == "mariadb":
return maria_storeDaily(dataStructure)
else:
return False
"""
@brief: Get feed in power from interval database, with a specified time
@created: 18th Feb 2017
@return: None: Failure
array: retuned data, in rows of [id, epoch, pow_feed]
"""
def getPowEpoch(epochStart, epochEnd):
if configuration.DATABASE_TYPE == "mariadb":
return maria_getPowEpoch(epochStart, epochEnd)
else:
return None
"""
@brief: Get the maximum produced energy with a time period
@created: 18th Feb 2017
@return: None: Failure
array: retuned maximum
"""
def getMaxProduced(epochStart, epochEnd):
if configuration.DATABASE_TYPE == "mariadb":
return maria_getMaxProduced(epochStart, epochEnd)
else:
return None
"""
@brief: Log to database
@created: 25th Feb 2017
@return: None: Failure
"""
def logMsg(level, message):
if configuration.DATABASE_TYPE == "mariadb":
maria_logMsg(level, message)
else:
return None
"""
#####################################################################
Maria DB / MySQL
#####################################################################
"""
"""
@brief: Open connection to database
@created: 18th Feb 2017
@return: True: Success
False: Failed
"""
def maria_Open():
global databaseConnection
global databaseCursor
databaseConnection = mariadb.connect(user=configuration.DATABASE_USER, password=configuration.DATABASE_PASSWD, database=configuration.DATABASE_DB)
databaseCursor = databaseConnection.cursor(buffered=True)
# a try and catch are needed here
return True
"""
@brief: Close connection to database
@created: 18th Feb 2017
@return: True: Success
False: Failed
"""
def maria_Close():
ret=databaseConnection.close()
gc.collect() # Garbage collection - https://ianhowson.com/blog/a-quick-guide-to-using-mysql-in-python/
return ret
"""
@brief: Store Interval Data to database
@created: 18th Feb 2017
@return: True: Success
False: Failed
"""
def maria_storeInterval(dataStructure):
if not hasattr(dataStructure, 'epoch'):
return False
try:
databaseCursor.execute("INSERT INTO `interval` (epoch,DC_s1_v,DC_s2_v,pf_feed,pf_inv,pow_prod,pow_feed,eng_tot_prod,eng_tot_out,eng_tot_in,volt_feed,cur_inv,freq_feed) VALUES ("+
str(dataStructure.epoch)+","+str(dataStructure.DC_s1_v)+","+str(dataStructure.DC_s2_v)+","+str(dataStructure.pf_feed)+","+str(dataStructure.pf_inv)+","+str(dataStructure.pow_prod)+","+
str(dataStructure.pow_feed)+","+str(dataStructure.eng_tot_prod)+","+str(dataStructure.eng_tot_out)+","+str(dataStructure.eng_tot_in)+","+
str(dataStructure.volt_feed)+","+str(dataStructure.cur_inv)+","+str(dataStructure.freq_feed)+")")
except mariadb.Error as error:
print("Error: {}".format(error))
return False
databaseConnection.commit()
return True
"""
@brief: Store Daily Data to database
@created: 18th Feb 2017
@return: True: Success
False: Failed
"""
def maria_storeDaily(dataStructure):
if not hasattr(dataStructure, 'epoch'):
return False
try:
databaseCursor.execute("INSERT INTO `daily` (epoch,thres_rise_epoch,thres_fall_epoch,thres_perc_exp,pow_max,eng_day,eng_tot_prod,eng_tot_out,eng_tot_in,error_flag) VALUES ("+
str(dataStructure.epoch)+","+str(dataStructure.thres_rise_epoch)+","+str(dataStructure.thres_fall_epoch)+","+str(dataStructure.thres_perc_exp)+","+str(dataStructure.pow_max)+","+str(dataStructure.eng_day)+","+
str(dataStructure.eng_tot_prod)+","+str(dataStructure.eng_tot_out)+","+str(dataStructure.eng_tot_in)+","+str(dataStructure.error_flag)+")")
except mariadb.Error as error:
print("Error: {}".format(error))
return False
databaseConnection.commit()
return True
"""
@brief: Log Message to database
@created: 18th Feb 2017
@return: True: Success
False: Failed
"""
def maria_logMsg(level, message):
try:
databaseCursor.execute( "INSERT INTO `log` (`epoch`,`level`,`message`) VALUES ("+str(time.time())+","+str(level)+",\""+str(message)+"\")" )
except mariadb.Error as error:
print("Error: {}".format(error))
return False
databaseConnection.commit()
return True
"""
@brief: Get feed in power from interval database, with a specified time
@created: 18th Feb 2017
@return: None: Failure
array: retuned data, in rows of [id, epoch, pow_feed]
"""
def maria_getPowEpoch(epochStart, epochEnd):
try:
databaseCursor.execute("SELECT `id`, `epoch`, `pow_feed`, `pow_prod` FROM `interval` WHERE `epoch` BETWEEN "+str(epochStart)+" AND "+str(epochEnd))
except mariadb.Error as error:
print("Error: {}".format(error))
return None
databaseConnection.commit()
return databaseCursor.fetchall()
"""
@brief: Get the maximum produced energy with a time period
@created: 18th Feb 2017
@return: None: Failure
array: retuned maximum
"""
def maria_getMaxProduced(epochStart, epochEnd):
try:
databaseCursor.execute("SELECT MAX(`pow_prod`) AS `pow_prod` FROM `interval` WHERE `epoch` BETWEEN "+str(epochStart)+" AND "+str(epochEnd))
except mariadb.Error as error:
print("Error: {}".format(error))
return None
databaseConnection.commit()
return databaseCursor.fetchall()
|
{
"content_hash": "4b2e74d723854662fd3bf4f5c0d8a334",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 236,
"avg_line_length": 32.472131147540985,
"alnum_prop": 0.5806744749596123,
"repo_name": "stoberblog/sunspec-modbus",
"id": "d4f0c7a5cb8554eedb6d315c9e9d7b05a4cbcebe",
"size": "9929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47372"
}
],
"symlink_target": ""
}
|
from django.db import models
from django_thumborstorage.storages import ThumborStorage, ThumborMigrationStorage
class PersonManager(models.Manager):
def get_by_natural_key(self, first_name, last_name):
return self.get(first_name=first_name, last_name=last_name)
class Person(models.Model):
"""A model that used to store images on the file-system and has been moved to Thumbor."""
objects = PersonManager()
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
def upload_path(instance, filename):
return 'people/%s' % filename
photo = models.ImageField('image', upload_to=upload_path,
storage=ThumborMigrationStorage(),
height_field='photo_height',
width_field='photo_width')
photo_height = models.IntegerField(blank=True, null=True)
photo_width = models.IntegerField(blank=True, null=True)
class Meta:
unique_together = (('first_name', 'last_name'),)
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
def natural_key(self):
return (self.first_name, self.last_name)
def get_full_name(self):
return u"%s %s" % (self.first_name, self.last_name)
class PersonNew(models.Model):
"""A model that always stored images on Thumbor."""
objects = PersonManager()
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
def upload_path(instance, filename):
return 'people/new/%s' % filename
photo = models.ImageField('image', upload_to=upload_path,
storage=ThumborStorage(),
height_field='photo_height',
width_field='photo_width')
photo_height = models.IntegerField(blank=True, null=True)
photo_width = models.IntegerField(blank=True, null=True)
class Meta:
unique_together = (('first_name', 'last_name'),)
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
def natural_key(self):
return (self.first_name, self.last_name)
def get_full_name(self):
return u"%s %s" % (self.first_name, self.last_name)
class PersonFileSystem(models.Model):
"""A model that still store images on the file-system."""
objects = PersonManager()
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
def upload_path(instance, filename):
return 'people/fs/%s' % filename
photo = models.ImageField('image', upload_to=upload_path,
height_field='photo_height',
width_field='photo_width')
photo_height = models.IntegerField(blank=True, null=True)
photo_width = models.IntegerField(blank=True, null=True)
class Meta:
unique_together = (('first_name', 'last_name'),)
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
def natural_key(self):
return (self.first_name, self.last_name)
def get_full_name(self):
return u"%s %s" % (self.first_name, self.last_name)
|
{
"content_hash": "8fd36592d04838b8b37001a6a756daee",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 93,
"avg_line_length": 37.160919540229884,
"alnum_prop": 0.6226415094339622,
"repo_name": "lCharlie123l/django-thumborstorage",
"id": "ef1024c5a0fbac670954b3a3dd38fdffad5b9740",
"size": "3233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/thumbor_project/my_app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "207"
},
{
"name": "Python",
"bytes": "35434"
},
{
"name": "Shell",
"bytes": "29"
}
],
"symlink_target": ""
}
|
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('corporate', '0002_customer_default_discount'),
]
operations = [
migrations.CreateModel(
name='CustomerPlan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('licenses', models.IntegerField()),
('automanage_licenses', models.BooleanField(default=False)),
('charge_automatically', models.BooleanField(default=False)),
('price_per_license', models.IntegerField(null=True)),
('fixed_price', models.IntegerField(null=True)),
('discount', models.DecimalField(decimal_places=4, max_digits=6, null=True)),
('billing_cycle_anchor', models.DateTimeField()),
('billing_schedule', models.SmallIntegerField()),
('billed_through', models.DateTimeField()),
('next_billing_date', models.DateTimeField(db_index=True)),
('tier', models.SmallIntegerField()),
('status', models.SmallIntegerField(default=1)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='corporate.Customer')),
],
),
]
|
{
"content_hash": "7b9603687bf78967d250570a51aff347",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 118,
"avg_line_length": 45.064516129032256,
"alnum_prop": 0.5891195418754474,
"repo_name": "timabbott/zulip",
"id": "50457b446df757bcd5ca5b3283de8e894359661e",
"size": "1448",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "corporate/migrations/0003_customerplan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "429356"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "844217"
},
{
"name": "JavaScript",
"bytes": "3259448"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "74427"
},
{
"name": "Python",
"bytes": "7825440"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "123706"
},
{
"name": "TSQL",
"bytes": "314"
},
{
"name": "TypeScript",
"bytes": "22102"
}
],
"symlink_target": ""
}
|
meal = raw_input("Meal")
tax = raw_input("Tax")
tip = raw_input("Tip")
meal = meal + meal * tax
total = meal + meal * tip
print("%.2f" % total)
|
{
"content_hash": "9ed2d844d99ff09d3604949ec7738477",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 25,
"avg_line_length": 18.25,
"alnum_prop": 0.5958904109589042,
"repo_name": "ArcherCraftStore/Tip-Calculator",
"id": "134ba1830bee3655ed9a14449763d5002e763b13",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "146"
}
],
"symlink_target": ""
}
|
import unittest
from telemetry.testing import simple_mock
_ = simple_mock.DONT_CARE
# pylint: disable=no-member
class SimpleMockUnitTest(unittest.TestCase):
def testBasic(self):
mock = simple_mock.MockObject()
mock.ExpectCall('foo')
mock.foo()
def testReturn(self):
mock = simple_mock.MockObject()
mock.ExpectCall('foo').WillReturn(7)
ret = mock.foo()
self.assertEquals(ret, 7)
def testArgs(self):
mock = simple_mock.MockObject()
mock.ExpectCall('foo').WithArgs(3, 4)
mock.foo(3, 4)
def testArgs2(self):
mock = simple_mock.MockObject()
mock.ExpectCall('foo', 3, 4)
mock.foo(3, 4)
def testArgsMismatch(self):
mock = simple_mock.MockObject()
mock.ExpectCall('foo').WithArgs(3, 4)
self.assertRaises(Exception,
lambda: mock.foo(4, 4))
def testArgsDontCare(self):
mock = simple_mock.MockObject()
mock.ExpectCall('foo').WithArgs(_, 4)
mock.foo(4, 4)
def testOnCall(self):
mock = simple_mock.MockObject()
handler_called = []
def Handler(arg0):
assert arg0 == 7
handler_called.append(True)
mock.ExpectCall('baz', 7).WhenCalled(Handler)
mock.baz(7)
self.assertTrue(len(handler_called) > 0)
def testSubObject(self):
mock = simple_mock.MockObject()
mock.bar = simple_mock.MockObject(mock)
mock.ExpectCall('foo').WithArgs(_, 4)
mock.bar.ExpectCall('baz')
mock.foo(0, 4)
mock.bar.baz()
def testSubObjectMismatch(self):
mock = simple_mock.MockObject()
mock.bar = simple_mock.MockObject(mock)
mock.ExpectCall('foo').WithArgs(_, 4)
mock.bar.ExpectCall('baz')
self.assertRaises(
Exception,
lambda: mock.bar.baz()) # pylint: disable=unnecessary-lambda
|
{
"content_hash": "690c2ee2e21038e8a674c5a6c842f65d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 66,
"avg_line_length": 22.0875,
"alnum_prop": 0.6485568760611206,
"repo_name": "sahiljain/catapult",
"id": "67cee584ac6f02d63280e456690b1a7dc3a628e3",
"size": "1929",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/testing/simple_mock_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6390"
},
{
"name": "CSS",
"bytes": "24751"
},
{
"name": "HTML",
"bytes": "14570791"
},
{
"name": "JavaScript",
"bytes": "511007"
},
{
"name": "Python",
"bytes": "5842419"
},
{
"name": "Shell",
"bytes": "2834"
}
],
"symlink_target": ""
}
|
class WrongArgumentsError(Exception):
"""
The program was called with incorrect arguments or an incorrect combination of them.
"""
pass
class WrongShapeError(Exception):
"""
A sequence has the wrong shape.
"""
pass
class ClassNotRegisteredError(Exception):
"""
Tried to create an environment or agent instance that is not registered.
"""
pass
|
{
"content_hash": "764fdbd88cfb51ccaff377b53ec0cda0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 88,
"avg_line_length": 23.11764705882353,
"alnum_prop": 0.6793893129770993,
"repo_name": "arnomoonens/DeepRL",
"id": "dbf81f10c847b2bd8b07ebb36a467d16e8f26ae6",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yarll/misc/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6107"
},
{
"name": "Python",
"bytes": "236593"
}
],
"symlink_target": ""
}
|
"""sympify -- convert objects SymPy internal format"""
from __future__ import print_function, division
from inspect import getmro
from .core import all_classes as sympy_classes
from .compatibility import iterable, string_types, range
from .evaluate import global_evaluate
class SympifyError(ValueError):
def __init__(self, expr, base_exc=None):
self.expr = expr
self.base_exc = base_exc
def __str__(self):
if self.base_exc is None:
return "SympifyError: %r" % (self.expr,)
return ("Sympify of expression '%s' failed, because of exception being "
"raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__,
str(self.base_exc)))
converter = {} # See sympify docstring.
class CantSympify(object):
"""
Mix in this trait to a class to disallow sympification of its instances.
Examples
========
>>> from sympy.core.sympify import sympify, CantSympify
>>> class Something(dict):
... pass
...
>>> sympify(Something())
{}
>>> class Something(dict, CantSympify):
... pass
...
>>> sympify(Something())
Traceback (most recent call last):
...
SympifyError: SympifyError: {}
"""
pass
def sympify(a, locals=None, convert_xor=True, strict=False, rational=False,
evaluate=None):
"""Converts an arbitrary expression to a type that can be used inside SymPy.
For example, it will convert Python ints into instance of sympy.Rational,
floats into instances of sympy.Float, etc. It is also able to coerce symbolic
expressions which inherit from Basic. This can be useful in cooperation
with SAGE.
It currently accepts as arguments:
- any object defined in sympy
- standard numeric python types: int, long, float, Decimal
- strings (like "0.09" or "2e-19")
- booleans, including ``None`` (will leave ``None`` unchanged)
- lists, sets or tuples containing any of the above
.. warning::
Note that this function uses ``eval``, and thus shouldn't be used on
unsanitized input.
If the argument is already a type that SymPy understands, it will do
nothing but return that value. This can be used at the beginning of a
function to ensure you are working with the correct type.
>>> from sympy import sympify
>>> sympify(2).is_integer
True
>>> sympify(2).is_real
True
>>> sympify(2.0).is_real
True
>>> sympify("2.0").is_real
True
>>> sympify("2e-45").is_real
True
If the expression could not be converted, a SympifyError is raised.
>>> sympify("x***2")
Traceback (most recent call last):
...
SympifyError: SympifyError: "could not parse u'x***2'"
Locals
------
The sympification happens with access to everything that is loaded
by ``from sympy import *``; anything used in a string that is not
defined by that import will be converted to a symbol. In the following,
the ``bitcount`` function is treated as a symbol and the ``O`` is
interpreted as the Order object (used with series) and it raises
an error when used improperly:
>>> s = 'bitcount(42)'
>>> sympify(s)
bitcount(42)
>>> sympify("O(x)")
O(x)
>>> sympify("O + 1")
Traceback (most recent call last):
...
TypeError: unbound method...
In order to have ``bitcount`` be recognized it can be imported into a
namespace dictionary and passed as locals:
>>> from sympy.core.compatibility import exec_
>>> ns = {}
>>> exec_('from sympy.core.evalf import bitcount', ns)
>>> sympify(s, locals=ns)
6
In order to have the ``O`` interpreted as a Symbol, identify it as such
in the namespace dictionary. This can be done in a variety of ways; all
three of the following are possibilities:
>>> from sympy import Symbol
>>> ns["O"] = Symbol("O") # method 1
>>> exec_('from sympy.abc import O', ns) # method 2
>>> ns.update(dict(O=Symbol("O"))) # method 3
>>> sympify("O + 1", locals=ns)
O + 1
If you want *all* single-letter and Greek-letter variables to be symbols
then you can use the clashing-symbols dictionaries that have been defined
there as private variables: _clash1 (single-letter variables), _clash2
(the multi-letter Greek names) or _clash (both single and multi-letter
names that are defined in abc).
>>> from sympy.abc import _clash1
>>> _clash1
{'C': C, 'E': E, 'I': I, 'N': N, 'O': O, 'Q': Q, 'S': S}
>>> sympify('I & Q', _clash1)
And(I, Q)
Strict
------
If the option ``strict`` is set to ``True``, only the types for which an
explicit conversion has been defined are converted. In the other
cases, a SympifyError is raised.
>>> print(sympify(None))
None
>>> sympify(None, strict=True)
Traceback (most recent call last):
...
SympifyError: SympifyError: None
Evaluation
----------
If the option ``evaluate`` is set to ``False``, then arithmetic and
operators will be converted into their SymPy equivalents and the
``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
be denested first. This is done via an AST transformation that replaces
operators with their SymPy equivalents, so if an operand redefines any
of those operations, the redefined operators will not be used.
>>> sympify('2**2 / 3 + 5')
19/3
>>> sympify('2**2 / 3 + 5', evaluate=False)
2**2/3 + 5
Extending
---------
To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
just define a ``_sympy_`` method to your class. You can do that even to
classes that you do not own by subclassing or adding the method at runtime.
>>> from sympy import Matrix
>>> class MyList1(object):
... def __iter__(self):
... yield 1
... yield 2
... return
... def __getitem__(self, i): return list(self)[i]
... def _sympy_(self): return Matrix(self)
>>> sympify(MyList1())
Matrix([
[1],
[2]])
If you do not have control over the class definition you could also use the
``converter`` global dictionary. The key is the class and the value is a
function that takes a single argument and returns the desired SymPy
object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.
>>> class MyList2(object): # XXX Do not do this if you control the class!
... def __iter__(self): # Use _sympy_!
... yield 1
... yield 2
... return
... def __getitem__(self, i): return list(self)[i]
>>> from sympy.core.sympify import converter
>>> converter[MyList2] = lambda x: Matrix(x)
>>> sympify(MyList2())
Matrix([
[1],
[2]])
Notes
=====
Sometimes autosimplification during sympification results in expressions
that are very different in structure than what was entered. Until such
autosimplification is no longer done, the ``kernS`` function might be of
some use. In the example below you can see how an expression reduces to
-1 by autosimplification, but does not do so when ``kernS`` is used.
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x
>>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
-1
>>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
>>> sympify(s)
-1
>>> kernS(s)
-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
"""
if evaluate is None:
if global_evaluate[0] is False:
evaluate = global_evaluate[0]
else:
evaluate = True
try:
if a in sympy_classes:
return a
except TypeError: # Type of a is unhashable
pass
try:
cls = a.__class__
except AttributeError: # a is probably an old-style class object
cls = type(a)
if cls in sympy_classes:
return a
if cls is type(None):
if strict:
raise SympifyError(a)
else:
return a
try:
return converter[cls](a)
except KeyError:
for superclass in getmro(cls):
try:
return converter[superclass](a)
except KeyError:
continue
if isinstance(a, CantSympify):
raise SympifyError(a)
try:
return a._sympy_()
except AttributeError:
pass
if not isinstance(a, string_types):
for coerce in (float, int):
try:
return sympify(coerce(a))
except (TypeError, ValueError, AttributeError, SympifyError):
continue
if strict:
raise SympifyError(a)
if iterable(a):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
if isinstance(a, dict):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a.items()])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
# At this point we were given an arbitrary expression
# which does not inherit from Basic and doesn't implement
# _sympy_ (which is a canonical and robust way to convert
# anything to SymPy expression).
#
# As a last chance, we try to take "a"'s normal form via unicode()
# and try to parse it. If it fails, then we have no luck and
# return an exception
try:
from .compatibility import unicode
a = unicode(a)
except Exception as exc:
raise SympifyError(a, exc)
from sympy.parsing.sympy_parser import (parse_expr, TokenError,
standard_transformations)
from sympy.parsing.sympy_parser import convert_xor as t_convert_xor
from sympy.parsing.sympy_parser import rationalize as t_rationalize
transformations = standard_transformations
if rational:
transformations += (t_rationalize,)
if convert_xor:
transformations += (t_convert_xor,)
try:
a = a.replace('\n', '')
expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
except (TokenError, SyntaxError) as exc:
raise SympifyError('could not parse %r' % a, exc)
return expr
def _sympify(a):
"""
Short version of sympify for internal usage for __add__ and __eq__ methods
where it is ok to allow some things (like Python integers and floats) in
the expression. This excludes things (like strings) that are unwise to
allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy.abc import x
>>> x + 1
x + 1
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True)
def kernS(s):
"""Use a hack to try keep autosimplification from joining Integer or
minus sign into an Add of a Mul; this modification doesn't
prevent the 2-arg Mul from becoming an Add, however.
Examples
========
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x, y, z
The 2-arg Mul allows a leading Integer to be distributed but kernS will
prevent that:
>>> 2*(x + y)
2*x + 2*y
>>> kernS('2*(x + y)')
2*(x + y)
If use of the hack fails, the un-hacked string will be passed to sympify...
and you get what you get.
XXX This hack should not be necessary once issue 4596 has been resolved.
"""
import re
from sympy.core.symbol import Symbol
hit = False
if '(' in s:
if s.count('(') != s.count(")"):
raise SympifyError('unmatched left parenthesis')
kern = '_kern'
while kern in s:
kern += "_"
olds = s
# digits*( -> digits*kern*(
s = re.sub(r'(\d+)( *\* *)\(', r'\1*%s\2(' % kern, s)
# negated parenthetical
kern2 = kern + "2"
while kern2 in s:
kern2 += "_"
# step 1: -(...) --> kern-kern*(...)
target = r'%s-%s*(' % (kern, kern)
s = re.sub(r'- *\(', target, s)
# step 2: double the matching closing parenthesis
# kern-kern*(...) --> kern-kern*(...)kern2
i = nest = 0
while True:
j = s.find(target, i)
if j == -1:
break
j = s.find('(')
for j in range(j, len(s)):
if s[j] == "(":
nest += 1
elif s[j] == ")":
nest -= 1
if nest == 0:
break
s = s[:j] + kern2 + s[j:]
i = j
# step 3: put in the parentheses
# kern-kern*(...)kern2 --> (-kern*(...))
s = s.replace(target, target.replace(kern, "(", 1))
s = s.replace(kern2, ')')
hit = kern in s
for i in range(2):
try:
expr = sympify(s)
break
except: # the kern might cause unknown errors, so use bare except
if hit:
s = olds # maybe it didn't like the kern; use un-kerned s
hit = False
continue
expr = sympify(s) # let original error raise
if not hit:
return expr
rep = {Symbol(kern): 1}
def _clear(expr):
if isinstance(expr, (list, tuple, set)):
return type(expr)([_clear(e) for e in expr])
if hasattr(expr, 'subs'):
return expr.subs(rep, hack2=True)
return expr
expr = _clear(expr)
# hope that kern is not there anymore
return expr
|
{
"content_hash": "f8dec7de8344fcb9c4e35b3e6b01114a",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 99,
"avg_line_length": 30.785087719298247,
"alnum_prop": 0.5809944436529421,
"repo_name": "yashsharan/sympy",
"id": "194d2bd880e6dd114b7f67bd9abf62467015a8df",
"size": "14038",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sympy/core/sympify.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "GCC Machine Description",
"bytes": "101"
},
{
"name": "Python",
"bytes": "15146815"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3087"
},
{
"name": "XSLT",
"bytes": "366200"
}
],
"symlink_target": ""
}
|
"""
Measure resonators, one at a time, with the readout tone centered in the filterbank bin.
"""
from __future__ import division
import time
import numpy as np
from kid_readout.roach import analog, calculate, hardware_tools, tools
from kid_readout.measurement import acquire, basic
from kid_readout.equipment import hardware, starcryo_temps
from equipment.srs import lockin
from equipment.custom import mmwave_source
from kid_readout.settings import LOCKIN_SERIAL_PORT
import resonances
acquire.show_settings()
acquire.show_git_status()
logger = acquire.get_script_logger(__file__)
# Parameters
suffix = 'mmw'
df_baseband_target = 30e3
f_sweep_span = 3e6 # The total span of the baseband tones
f_lo_spacing = 2.5e3 # This is the smallest resolution available
f_baseband_minimum = 100e6 # Keep the tones away from the LO by at least this frequency.
sweep_length_seconds = 0.05
stream_length_seconds = 30
# Resonance frequencies
band_dict = resonances.dict_180_mK
fractional_frequency_shift = 0
band_name = '2758' # '3317'
all_f_initial = (1 + fractional_frequency_shift) * band_dict[band_name][1:3]
attenuations_list = [all_f_initial.size * (25, 35, 45)]
# Hardware
temperature = starcryo_temps.Temperature()
lock = lockin.SR830(serial_device=LOCKIN_SERIAL_PORT)
lock.identification # This seems to be necessary to wake up the lockin
mmw = mmwave_source.MMWaveSource()
mmw.set_attenuator_ticks(0, 0)
mmw.multiplier_input = 'thermal'
mmw.ttl_modulation_source = "roach_2"
mmw.waveguide_twist_angle = 0
conditioner = analog.HeterodyneMarkII()
hw = hardware.Hardware(temperature, lock, mmw, conditioner)
ri = hardware_tools.r2h11_with_mk2(initialize=True, use_config=False)
ri.set_modulation_output('high')
ri.iq_delay = -1
ri.adc_valon.set_ref_select(1) # external
assert np.all(ri.adc_valon.get_phase_locks())
ri.lo_valon.set_ref_select(1) # external
assert np.all(ri.lo_valon.get_phase_locks())
# Calculate sweep parameters, LO and baseband sweep frequencies
ri_state = ri.state
tone_sample_exponent = int(np.round(np.log2(ri_state.adc_sample_rate / df_baseband_target)))
df_baseband = ri_state.adc_sample_rate / 2 ** tone_sample_exponent
df_filterbank = ri_state.adc_sample_rate / ri_state.num_filterbank_channels
num_sweep_tones = int(f_sweep_span / df_baseband)
logger.info("Sweeps will use {:d} tones spanning {:.1f} MHz with resolution {:.0f} Hz (2^{:d} samples)".format(
num_sweep_tones, 1e-6 * f_sweep_span, df_baseband, tone_sample_exponent))
n_baseband = (f_baseband_minimum + f_sweep_span / 2) // df_baseband + np.arange(num_sweep_tones)
f_baseband = df_baseband * n_baseband
# Run
npd = acquire.new_npy_directory(suffix=suffix)
tic = time.time()
try:
for f_index, (f_initial, attenuations) in enumerate(zip(all_f_initial, attenuations_list)):
logger.info("Measuring resonator {:d} of {:d}".format(f_index + 1, all_f_initial.size))
f_lo_initial = f_initial - f_baseband.mean()
assert np.all(ri.adc_valon.get_phase_locks())
assert np.all(ri.lo_valon.get_phase_locks())
#tools.set_and_attempt_external_phase_lock(ri=ri, f_lo=1e-6 * f_lo_initial, f_lo_spacing=1e-6 * f_lo_spacing)
ri.set_lo(lomhz=1e-6 * f_lo_initial, chan_spacing=1e-6 * f_lo_spacing)
# Take the initial sweep using the minimum power
ri.set_dac_attenuator(max(attenuations))
ri.set_tone_baseband_freqs(freqs=1e-6 * np.array([f_baseband[0]]), nsamp=2 ** tone_sample_exponent)
time.sleep(1)
tools.optimize_fft_gain(ri, fraction_of_maximum=0.5)
time.sleep(1)
initial_state = hw.state()
initial_state['f_index'] = f_index
initial_sweep = acquire.run_sweep(ri=ri, tone_banks=1e-6 * (f_lo_initial + f_baseband[:, np.newaxis]),
num_tone_samples=2 ** tone_sample_exponent,
length_seconds=sweep_length_seconds, state=initial_state, verbose=True)[0]
npd.write(initial_sweep)
f_fit = initial_sweep.resonator.f_0
logger.info("Initial sweep f_r = {:.3f} MHz +/- {:.0f} Hz".format(1e-6 * f_fit,
initial_sweep.resonator.f_0_error))
logger.info("Initial sweep Q = {:.0f} +/- {:.0f}".format(
initial_sweep.resonator.Q, initial_sweep.resonator.Q_error))
f_baseband_bin_center = df_filterbank * np.round(f_baseband.mean() / df_filterbank)
f_lo_final = f_lo_spacing * np.round((f_fit - f_baseband_bin_center) / f_lo_spacing)
logger.info("f_lo_final + f_baseband_bin_center - f_r_initial = {:.3f} Hz".format(
f_lo_final + f_baseband_bin_center - f_fit))
#tools.set_and_attempt_external_phase_lock(ri=ri, f_lo=1e-6 * f_lo_final, f_lo_spacing=1e-6 * f_lo_spacing)
ri.set_lo(lomhz=1e-6 * f_lo_final, chan_spacing=1e-6 * f_lo_spacing)
for attenuation_index, attenuation in enumerate(attenuations):
ri.set_dac_attenuator(attenuation)
ri.set_tone_baseband_freqs(freqs=1e-6 * np.array([f_baseband[0]]), nsamp=2 ** tone_sample_exponent)
time.sleep(1)
tools.optimize_fft_gain(ri, fraction_of_maximum=0.5)
time.sleep(1)
sweep = acquire.run_sweep(ri=ri, tone_banks=1e-6 * (f_lo_final + f_baseband[:, np.newaxis]),
num_tone_samples=2 ** tone_sample_exponent,
length_seconds=sweep_length_seconds, state=hw.state(), verbose=True)[0]
ri.set_tone_baseband_freqs(freqs=np.array([1e-6 * f_baseband_bin_center]), nsamp=2 ** tone_sample_exponent)
logger.info("f_lo_final + f_baseband_bin_center - f_r = {:.3f} Hz".format(
f_lo_final + f_baseband_bin_center- sweep.resonator.f_0))
logger.info("Recording {:.1f} s stream with source off".format(stream_length_seconds))
off_stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True, state=hw.state())[0]
ri.set_modulation_output(7)
time.sleep(3) # Let the lock-in catch up
logger.info("Recording {:.1f} s stream with source modulating".format(stream_length_seconds))
mod_stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True, state=hw.state())[0]
ri.set_modulation_output('low')
logger.info("Recording {:.1f} s stream with source on".format(stream_length_seconds))
on_stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True, state=hw.state())[0]
ri.set_modulation_output('high')
sssl = basic.SingleSweepStreamList(single_sweep=sweep, stream_list=[off_stream, mod_stream, on_stream],
state={'f_index': f_index, 'attenuation_index': attenuation_index})
npd.write(sssl)
npd.write(ri.get_adc_measurement())
finally:
ri.set_modulation_output('high')
npd.close()
print("Wrote {}".format(npd.root_path))
print("Elapsed time {:.0f} minutes.".format((time.time() - tic) / 60))
|
{
"content_hash": "45847f396d51c3222ea63d22f9b2706d",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 119,
"avg_line_length": 53.90151515151515,
"alnum_prop": 0.6543921293042867,
"repo_name": "ColumbiaCMB/kid_readout",
"id": "7f810f0d5ccf4e869c34adf4930e5969377c3962",
"size": "7115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/data_taking_scripts/cooldown/2017-11-23_starcryo/r2h11_singlesweepstreamlist_mmw.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "13672"
},
{
"name": "Python",
"bytes": "2033932"
}
],
"symlink_target": ""
}
|
"""Tests for vocab_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from ..utils import misc_utils
class MiscUtilsTest(tf.test.TestCase):
def testFormatBpeText(self):
bpe_line = (
b"En@@ ough to make already reluc@@ tant men hesitate to take screening"
b" tests ."
)
expected_result = (
b"Enough to make already reluctant men hesitate to take screening tests"
b" ."
)
self.assertEqual(expected_result,
misc_utils.format_bpe_text(bpe_line.split(b" ")))
def testFormatSPMText(self):
spm_line = u"\u2581This \u2581is \u2581a \u2581 te st .".encode("utf-8")
expected_result = b"This is a test."
self.assertEqual(expected_result,
misc_utils.format_spm_text(spm_line.split(b" ")))
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "db34cd443ebecf8eff7b192d2180859c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 27.41176470588235,
"alnum_prop": 0.6341201716738197,
"repo_name": "tensorflow/nmt",
"id": "cae2173c2995dce164d32f2e42f176d6bc6b2a92",
"size": "1610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nmt/utils/misc_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "275345"
},
{
"name": "Shell",
"bytes": "7648"
}
],
"symlink_target": ""
}
|
__author__ = 'falmeida@google.com (Filipe Almeida)'
class OrderedDict:
"""Ordered dictionary implementation."""
# Define the minimum functionality we need for our application.
# Easiser would be to subclass from UserDict.DictMixin, and only
# define __getitem__, __setitem__, __delitem__, and keys, but that's
# not as portable. We don't need to define much more, so we just do.
def __init__(self):
self._dict = {}
self._keys = []
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __delitem__(self, key):
self._keys.remove(key)
del self._dict[key]
def keys(self):
return self._keys
# Below are all we have to define in addition to what DictMixin would need
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self.has_key(key)
def __iter__(self):
# It's not as portable -- though it would be more space-efficient -- to do
# for k in self.keys(): yield k
return iter(self.keys())
class State(object):
"""Contains information about a specific state."""
def __init__(self):
pass
name = None
external_name = None
transitions = []
class Transition(object):
"""Contains information about a specific transition."""
def __init__(self, condition, source, destination):
self.condition = condition
self.source = source
self.destination = destination
class FSMConfig(object):
"""Container for the statemachine definition."""
sm = {} # dictionary that contains the finite state machine definition
# loaded from a config file.
transitions = [] # List of transitions.
conditions = {} # Mapping between the condition name and the bracket
# expression.
states = OrderedDict() # Ordered dictionary of states.
name = None
comment = None
def AddState(self, **dic):
"""Called from the definition file with the description of the state.
Receives a dictionary and populates internal structures based on it. The
dictionary is in the following format:
{'name': state_name,
'external': exposed state name,
'transitions': [
[condition, destination_state ],
[condition, destination_state ]
]
}
"""
state = State()
state.name = dic['name']
state.external_name = dic['external']
state_transitions = []
for (condition, destination) in dic['transitions']:
transition = Transition(condition, state.name, destination)
state_transitions.append(transition)
self.transitions.extend(state_transitions)
state.transitions = state_transitions
self.states[state.name] = state
def AddCondition(self, name, expression):
"""Called from the definition file with the definition of a condition.
Receives the name of the condition and it's expression.
"""
self.conditions[name] = expression
def Load(self, filename):
"""Load the state machine definition file.
In the definition file, which is based on the python syntax, the following
variables and functions are defined.
name: Name of the state machine
comment: Comment line on the generated file.
condition(): A mapping between condition names and bracket expressions.
state(): Defines a state and it's transitions. It accepts the following
attributes:
name: name of the state
external: exported name of the state. The exported name can be used
multiple times in order to create a super state.
transitions: List of pairs containing the condition for the transition
and the destination state. Transitions are ordered so if
a default rule is used, it must be the last one in the list.
Example:
name = 'c comment parser'
condition('/', '/')
condition('*', '*')
condition('linefeed', '\\n')
condition('default', '[:default:]')
state(name = 'text',
external = 'comment',
transitions = [
[ '/', 'comment_start' ],
[ 'default', 'text' ]
])
state(name = 'comment_start',
external = 'comment',
transitions = [
[ '/', 'comment_line' ],
[ '*', 'comment_multiline' ],
[ 'default', 'text' ]
])
state(name = 'comment_line',
external = 'comment',
transitions = [
[ 'linefeed', 'text' ],
[ 'default', 'comment_line' ]
])
state(name = 'comment_multiline',
external = 'comment',
transitions = [
[ '*', 'comment_multiline_close' ],
[ 'default', 'comment_multiline' ]
])
state(name = 'comment_multiline_close',
external = 'comment',
transitions = [
[ '/', 'text' ],
[ 'default', 'comment_multiline' ]
])
"""
self.sm['state'] = self.AddState
self.sm['condition'] = self.AddCondition
execfile(filename, self.sm)
self.name = self.sm['name']
if not self.name.isalnum():
raise Exception("State machine name must consist of only alphanumeric"
"characters.")
self.comment = self.sm['comment']
def __init__(self):
pass
|
{
"content_hash": "d3b2233f189450f055dd2e3a50049da3",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 79,
"avg_line_length": 28.849462365591396,
"alnum_prop": 0.6097651882221394,
"repo_name": "google/streamhtmlparser",
"id": "6046621edeedf3366c9d9afdadb485bc6218529c",
"size": "7019",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "src/tools/fsm_config.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "839885"
},
{
"name": "C++",
"bytes": "25190"
},
{
"name": "HTML",
"bytes": "25497"
},
{
"name": "Makefile",
"bytes": "2914"
},
{
"name": "Python",
"bytes": "29522"
},
{
"name": "Shell",
"bytes": "249996"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
import h2o
class H2OXGBoostEstimator(H2OEstimator):
"""
XGBoost
Builds a eXtreme Gradient Boosting model using the native XGBoost backend.
"""
algo = "xgboost"
def __init__(self, **kwargs):
super(H2OXGBoostEstimator, self).__init__()
self._parms = {}
names_list = {"model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_models",
"keep_cross_validation_predictions", "keep_cross_validation_fold_assignment",
"score_each_iteration", "fold_assignment", "fold_column", "response_column", "ignored_columns",
"ignore_const_cols", "offset_column", "weights_column", "stopping_rounds", "stopping_metric",
"stopping_tolerance", "max_runtime_secs", "seed", "distribution", "tweedie_power",
"categorical_encoding", "quiet_mode", "export_checkpoints_dir", "ntrees", "max_depth", "min_rows",
"min_child_weight", "learn_rate", "eta", "sample_rate", "subsample", "col_sample_rate",
"colsample_bylevel", "col_sample_rate_per_tree", "colsample_bytree", "max_abs_leafnode_pred",
"max_delta_step", "monotone_constraints", "score_tree_interval", "min_split_improvement", "gamma",
"nthread", "max_bins", "max_leaves", "min_sum_hessian_in_leaf", "min_data_in_leaf", "sample_type",
"normalize_type", "rate_drop", "one_drop", "skip_drop", "tree_method", "grow_policy", "booster",
"reg_lambda", "reg_alpha", "dmatrix_type", "backend", "gpu_id"}
if "Lambda" in kwargs: kwargs["lambda_"] = kwargs.pop("Lambda")
for pname, pvalue in kwargs.items():
if pname == 'model_id':
self._id = pvalue
self._parms["model_id"] = pvalue
elif pname in names_list:
# Using setattr(...) will invoke type-checking of the arguments
setattr(self, pname, pvalue)
else:
raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue))
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
assert_is_type(training_frame, None, H2OFrame)
self._parms["training_frame"] = training_frame
@property
def validation_frame(self):
"""
Id of the validation data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("validation_frame")
@validation_frame.setter
def validation_frame(self, validation_frame):
assert_is_type(validation_frame, None, H2OFrame)
self._parms["validation_frame"] = validation_frame
@property
def nfolds(self):
"""
Number of folds for K-fold cross-validation (0 to disable or >= 2).
Type: ``int`` (default: ``0``).
"""
return self._parms.get("nfolds")
@nfolds.setter
def nfolds(self, nfolds):
assert_is_type(nfolds, None, int)
self._parms["nfolds"] = nfolds
@property
def keep_cross_validation_models(self):
"""
Whether to keep the cross-validation models.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("keep_cross_validation_models")
@keep_cross_validation_models.setter
def keep_cross_validation_models(self, keep_cross_validation_models):
assert_is_type(keep_cross_validation_models, None, bool)
self._parms["keep_cross_validation_models"] = keep_cross_validation_models
@property
def keep_cross_validation_predictions(self):
"""
Whether to keep the predictions of the cross-validation models.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("keep_cross_validation_predictions")
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, keep_cross_validation_predictions):
assert_is_type(keep_cross_validation_predictions, None, bool)
self._parms["keep_cross_validation_predictions"] = keep_cross_validation_predictions
@property
def keep_cross_validation_fold_assignment(self):
"""
Whether to keep the cross-validation fold assignment.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("keep_cross_validation_fold_assignment")
@keep_cross_validation_fold_assignment.setter
def keep_cross_validation_fold_assignment(self, keep_cross_validation_fold_assignment):
assert_is_type(keep_cross_validation_fold_assignment, None, bool)
self._parms["keep_cross_validation_fold_assignment"] = keep_cross_validation_fold_assignment
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def fold_assignment(self):
"""
Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
the folds based on the response variable, for classification problems.
One of: ``"auto"``, ``"random"``, ``"modulo"``, ``"stratified"`` (default: ``"auto"``).
"""
return self._parms.get("fold_assignment")
@fold_assignment.setter
def fold_assignment(self, fold_assignment):
assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified"))
self._parms["fold_assignment"] = fold_assignment
@property
def fold_column(self):
"""
Column with cross-validation fold index assignment per observation.
Type: ``str``.
"""
return self._parms.get("fold_column")
@fold_column.setter
def fold_column(self, fold_column):
assert_is_type(fold_column, None, str)
self._parms["fold_column"] = fold_column
@property
def response_column(self):
"""
Response variable column.
Type: ``str``.
"""
return self._parms.get("response_column")
@response_column.setter
def response_column(self, response_column):
assert_is_type(response_column, None, str)
self._parms["response_column"] = response_column
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def offset_column(self):
"""
Offset column. This will be added to the combination of columns before applying the link function.
Type: ``str``.
"""
return self._parms.get("offset_column")
@offset_column.setter
def offset_column(self, offset_column):
assert_is_type(offset_column, None, str)
self._parms["offset_column"] = offset_column
@property
def weights_column(self):
"""
Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the
dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative
weights are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data
frame. This is typically the number of times a row is repeated, but non-integer values are supported as well.
During training, rows with higher weights matter more, due to the larger loss function pre-factor.
Type: ``str``.
"""
return self._parms.get("weights_column")
@weights_column.setter
def weights_column(self, weights_column):
assert_is_type(weights_column, None, str)
self._parms["weights_column"] = weights_column
@property
def stopping_rounds(self):
"""
Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
Type: ``int`` (default: ``0``).
"""
return self._parms.get("stopping_rounds")
@stopping_rounds.setter
def stopping_rounds(self, stopping_rounds):
assert_is_type(stopping_rounds, None, int)
self._parms["stopping_rounds"] = stopping_rounds
@property
def stopping_metric(self):
"""
Metric to use for early stopping (AUTO: logloss for classification, deviance for regression). Note that custom
and custom_increasing can only be used in GBM and DRF with the Python client.
One of: ``"auto"``, ``"deviance"``, ``"logloss"``, ``"mse"``, ``"rmse"``, ``"mae"``, ``"rmsle"``, ``"auc"``,
``"lift_top_group"``, ``"misclassification"``, ``"mean_per_class_error"``, ``"custom"``, ``"custom_increasing"``
(default: ``"auto"``).
"""
return self._parms.get("stopping_metric")
@stopping_metric.setter
def stopping_metric(self, stopping_metric):
assert_is_type(stopping_metric, None, Enum("auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"))
self._parms["stopping_metric"] = stopping_metric
@property
def stopping_tolerance(self):
"""
Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
Type: ``float`` (default: ``0.001``).
"""
return self._parms.get("stopping_tolerance")
@stopping_tolerance.setter
def stopping_tolerance(self, stopping_tolerance):
assert_is_type(stopping_tolerance, None, numeric)
self._parms["stopping_tolerance"] = stopping_tolerance
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def seed(self):
"""
Seed for pseudo random number generator (if applicable)
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def distribution(self):
"""
Distribution function
One of: ``"auto"``, ``"bernoulli"``, ``"multinomial"``, ``"gaussian"``, ``"poisson"``, ``"gamma"``,
``"tweedie"``, ``"laplace"``, ``"quantile"``, ``"huber"`` (default: ``"auto"``).
"""
return self._parms.get("distribution")
@distribution.setter
def distribution(self, distribution):
assert_is_type(distribution, None, Enum("auto", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber"))
self._parms["distribution"] = distribution
@property
def tweedie_power(self):
"""
Tweedie power for Tweedie regression, must be between 1 and 2.
Type: ``float`` (default: ``1.5``).
"""
return self._parms.get("tweedie_power")
@tweedie_power.setter
def tweedie_power(self, tweedie_power):
assert_is_type(tweedie_power, None, numeric)
self._parms["tweedie_power"] = tweedie_power
@property
def categorical_encoding(self):
"""
Encoding scheme for categorical features
One of: ``"auto"``, ``"enum"``, ``"one_hot_internal"``, ``"one_hot_explicit"``, ``"binary"``, ``"eigen"``,
``"label_encoder"``, ``"sort_by_response"``, ``"enum_limited"`` (default: ``"auto"``).
"""
return self._parms.get("categorical_encoding")
@categorical_encoding.setter
def categorical_encoding(self, categorical_encoding):
assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"))
self._parms["categorical_encoding"] = categorical_encoding
@property
def quiet_mode(self):
"""
Enable quiet mode
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("quiet_mode")
@quiet_mode.setter
def quiet_mode(self, quiet_mode):
assert_is_type(quiet_mode, None, bool)
self._parms["quiet_mode"] = quiet_mode
@property
def export_checkpoints_dir(self):
"""
Automatically export generated models to this directory.
Type: ``str``.
"""
return self._parms.get("export_checkpoints_dir")
@export_checkpoints_dir.setter
def export_checkpoints_dir(self, export_checkpoints_dir):
assert_is_type(export_checkpoints_dir, None, str)
self._parms["export_checkpoints_dir"] = export_checkpoints_dir
@property
def ntrees(self):
"""
(same as n_estimators) Number of trees.
Type: ``int`` (default: ``50``).
"""
return self._parms.get("ntrees")
@ntrees.setter
def ntrees(self, ntrees):
assert_is_type(ntrees, None, int)
self._parms["ntrees"] = ntrees
@property
def max_depth(self):
"""
Maximum tree depth.
Type: ``int`` (default: ``6``).
"""
return self._parms.get("max_depth")
@max_depth.setter
def max_depth(self, max_depth):
assert_is_type(max_depth, None, int)
self._parms["max_depth"] = max_depth
@property
def min_rows(self):
"""
(same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
Type: ``float`` (default: ``1``).
"""
return self._parms.get("min_rows")
@min_rows.setter
def min_rows(self, min_rows):
assert_is_type(min_rows, None, numeric)
self._parms["min_rows"] = min_rows
@property
def min_child_weight(self):
"""
(same as min_rows) Fewest allowed (weighted) observations in a leaf.
Type: ``float`` (default: ``1``).
"""
return self._parms.get("min_child_weight")
@min_child_weight.setter
def min_child_weight(self, min_child_weight):
assert_is_type(min_child_weight, None, numeric)
self._parms["min_child_weight"] = min_child_weight
@property
def learn_rate(self):
"""
(same as eta) Learning rate (from 0.0 to 1.0)
Type: ``float`` (default: ``0.3``).
"""
return self._parms.get("learn_rate")
@learn_rate.setter
def learn_rate(self, learn_rate):
assert_is_type(learn_rate, None, numeric)
self._parms["learn_rate"] = learn_rate
@property
def eta(self):
"""
(same as learn_rate) Learning rate (from 0.0 to 1.0)
Type: ``float`` (default: ``0.3``).
"""
return self._parms.get("eta")
@eta.setter
def eta(self, eta):
assert_is_type(eta, None, numeric)
self._parms["eta"] = eta
@property
def sample_rate(self):
"""
(same as subsample) Row sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("sample_rate")
@sample_rate.setter
def sample_rate(self, sample_rate):
assert_is_type(sample_rate, None, numeric)
self._parms["sample_rate"] = sample_rate
@property
def subsample(self):
"""
(same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("subsample")
@subsample.setter
def subsample(self, subsample):
assert_is_type(subsample, None, numeric)
self._parms["subsample"] = subsample
@property
def col_sample_rate(self):
"""
(same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("col_sample_rate")
@col_sample_rate.setter
def col_sample_rate(self, col_sample_rate):
assert_is_type(col_sample_rate, None, numeric)
self._parms["col_sample_rate"] = col_sample_rate
@property
def colsample_bylevel(self):
"""
(same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("colsample_bylevel")
@colsample_bylevel.setter
def colsample_bylevel(self, colsample_bylevel):
assert_is_type(colsample_bylevel, None, numeric)
self._parms["colsample_bylevel"] = colsample_bylevel
@property
def col_sample_rate_per_tree(self):
"""
(same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("col_sample_rate_per_tree")
@col_sample_rate_per_tree.setter
def col_sample_rate_per_tree(self, col_sample_rate_per_tree):
assert_is_type(col_sample_rate_per_tree, None, numeric)
self._parms["col_sample_rate_per_tree"] = col_sample_rate_per_tree
@property
def colsample_bytree(self):
"""
(same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("colsample_bytree")
@colsample_bytree.setter
def colsample_bytree(self, colsample_bytree):
assert_is_type(colsample_bytree, None, numeric)
self._parms["colsample_bytree"] = colsample_bytree
@property
def max_abs_leafnode_pred(self):
"""
(same as max_delta_step) Maximum absolute value of a leaf node prediction
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_abs_leafnode_pred")
@max_abs_leafnode_pred.setter
def max_abs_leafnode_pred(self, max_abs_leafnode_pred):
assert_is_type(max_abs_leafnode_pred, None, float)
self._parms["max_abs_leafnode_pred"] = max_abs_leafnode_pred
@property
def max_delta_step(self):
"""
(same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_delta_step")
@max_delta_step.setter
def max_delta_step(self, max_delta_step):
assert_is_type(max_delta_step, None, float)
self._parms["max_delta_step"] = max_delta_step
@property
def monotone_constraints(self):
"""
A mapping representing monotonic constraints. Use +1 to enforce an increasing constraint and -1 to specify a
decreasing constraint.
Type: ``dict``.
"""
return self._parms.get("monotone_constraints")
@monotone_constraints.setter
def monotone_constraints(self, monotone_constraints):
assert_is_type(monotone_constraints, None, dict)
self._parms["monotone_constraints"] = monotone_constraints
@property
def score_tree_interval(self):
"""
Score the model after every so many trees. Disabled if set to 0.
Type: ``int`` (default: ``0``).
"""
return self._parms.get("score_tree_interval")
@score_tree_interval.setter
def score_tree_interval(self, score_tree_interval):
assert_is_type(score_tree_interval, None, int)
self._parms["score_tree_interval"] = score_tree_interval
@property
def min_split_improvement(self):
"""
(same as gamma) Minimum relative improvement in squared error reduction for a split to happen
Type: ``float`` (default: ``0``).
"""
return self._parms.get("min_split_improvement")
@min_split_improvement.setter
def min_split_improvement(self, min_split_improvement):
assert_is_type(min_split_improvement, None, float)
self._parms["min_split_improvement"] = min_split_improvement
@property
def gamma(self):
"""
(same as min_split_improvement) Minimum relative improvement in squared error reduction for a split to happen
Type: ``float`` (default: ``0``).
"""
return self._parms.get("gamma")
@gamma.setter
def gamma(self, gamma):
assert_is_type(gamma, None, float)
self._parms["gamma"] = gamma
@property
def nthread(self):
"""
Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits (-nthreads
parameter). Defaults to maximum available
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("nthread")
@nthread.setter
def nthread(self, nthread):
assert_is_type(nthread, None, int)
self._parms["nthread"] = nthread
@property
def max_bins(self):
"""
For tree_method=hist only: maximum number of bins
Type: ``int`` (default: ``256``).
"""
return self._parms.get("max_bins")
@max_bins.setter
def max_bins(self, max_bins):
assert_is_type(max_bins, None, int)
self._parms["max_bins"] = max_bins
@property
def max_leaves(self):
"""
For tree_method=hist only: maximum number of leaves
Type: ``int`` (default: ``0``).
"""
return self._parms.get("max_leaves")
@max_leaves.setter
def max_leaves(self, max_leaves):
assert_is_type(max_leaves, None, int)
self._parms["max_leaves"] = max_leaves
@property
def min_sum_hessian_in_leaf(self):
"""
For tree_method=hist only: the mininum sum of hessian in a leaf to keep splitting
Type: ``float`` (default: ``100``).
"""
return self._parms.get("min_sum_hessian_in_leaf")
@min_sum_hessian_in_leaf.setter
def min_sum_hessian_in_leaf(self, min_sum_hessian_in_leaf):
assert_is_type(min_sum_hessian_in_leaf, None, float)
self._parms["min_sum_hessian_in_leaf"] = min_sum_hessian_in_leaf
@property
def min_data_in_leaf(self):
"""
For tree_method=hist only: the mininum data in a leaf to keep splitting
Type: ``float`` (default: ``0``).
"""
return self._parms.get("min_data_in_leaf")
@min_data_in_leaf.setter
def min_data_in_leaf(self, min_data_in_leaf):
assert_is_type(min_data_in_leaf, None, float)
self._parms["min_data_in_leaf"] = min_data_in_leaf
@property
def sample_type(self):
"""
For booster=dart only: sample_type
One of: ``"uniform"``, ``"weighted"`` (default: ``"uniform"``).
"""
return self._parms.get("sample_type")
@sample_type.setter
def sample_type(self, sample_type):
assert_is_type(sample_type, None, Enum("uniform", "weighted"))
self._parms["sample_type"] = sample_type
@property
def normalize_type(self):
"""
For booster=dart only: normalize_type
One of: ``"tree"``, ``"forest"`` (default: ``"tree"``).
"""
return self._parms.get("normalize_type")
@normalize_type.setter
def normalize_type(self, normalize_type):
assert_is_type(normalize_type, None, Enum("tree", "forest"))
self._parms["normalize_type"] = normalize_type
@property
def rate_drop(self):
"""
For booster=dart only: rate_drop (0..1)
Type: ``float`` (default: ``0``).
"""
return self._parms.get("rate_drop")
@rate_drop.setter
def rate_drop(self, rate_drop):
assert_is_type(rate_drop, None, float)
self._parms["rate_drop"] = rate_drop
@property
def one_drop(self):
"""
For booster=dart only: one_drop
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("one_drop")
@one_drop.setter
def one_drop(self, one_drop):
assert_is_type(one_drop, None, bool)
self._parms["one_drop"] = one_drop
@property
def skip_drop(self):
"""
For booster=dart only: skip_drop (0..1)
Type: ``float`` (default: ``0``).
"""
return self._parms.get("skip_drop")
@skip_drop.setter
def skip_drop(self, skip_drop):
assert_is_type(skip_drop, None, float)
self._parms["skip_drop"] = skip_drop
@property
def tree_method(self):
"""
Tree method
One of: ``"auto"``, ``"exact"``, ``"approx"``, ``"hist"`` (default: ``"auto"``).
"""
return self._parms.get("tree_method")
@tree_method.setter
def tree_method(self, tree_method):
assert_is_type(tree_method, None, Enum("auto", "exact", "approx", "hist"))
self._parms["tree_method"] = tree_method
@property
def grow_policy(self):
"""
Grow policy - depthwise is standard GBM, lossguide is LightGBM
One of: ``"depthwise"``, ``"lossguide"`` (default: ``"depthwise"``).
"""
return self._parms.get("grow_policy")
@grow_policy.setter
def grow_policy(self, grow_policy):
assert_is_type(grow_policy, None, Enum("depthwise", "lossguide"))
self._parms["grow_policy"] = grow_policy
@property
def booster(self):
"""
Booster type
One of: ``"gbtree"``, ``"gblinear"``, ``"dart"`` (default: ``"gbtree"``).
"""
return self._parms.get("booster")
@booster.setter
def booster(self, booster):
assert_is_type(booster, None, Enum("gbtree", "gblinear", "dart"))
self._parms["booster"] = booster
@property
def reg_lambda(self):
"""
L2 regularization
Type: ``float`` (default: ``1``).
"""
return self._parms.get("reg_lambda")
@reg_lambda.setter
def reg_lambda(self, reg_lambda):
assert_is_type(reg_lambda, None, float)
self._parms["reg_lambda"] = reg_lambda
@property
def reg_alpha(self):
"""
L1 regularization
Type: ``float`` (default: ``0``).
"""
return self._parms.get("reg_alpha")
@reg_alpha.setter
def reg_alpha(self, reg_alpha):
assert_is_type(reg_alpha, None, float)
self._parms["reg_alpha"] = reg_alpha
@property
def dmatrix_type(self):
"""
Type of DMatrix. For sparse, NAs and 0 are treated equally.
One of: ``"auto"``, ``"dense"``, ``"sparse"`` (default: ``"auto"``).
"""
return self._parms.get("dmatrix_type")
@dmatrix_type.setter
def dmatrix_type(self, dmatrix_type):
assert_is_type(dmatrix_type, None, Enum("auto", "dense", "sparse"))
self._parms["dmatrix_type"] = dmatrix_type
@property
def backend(self):
"""
Backend. By default (auto), a GPU is used if available.
One of: ``"auto"``, ``"gpu"``, ``"cpu"`` (default: ``"auto"``).
"""
return self._parms.get("backend")
@backend.setter
def backend(self, backend):
assert_is_type(backend, None, Enum("auto", "gpu", "cpu"))
self._parms["backend"] = backend
@property
def gpu_id(self):
"""
Which GPU to use.
Type: ``int`` (default: ``0``).
"""
return self._parms.get("gpu_id")
@gpu_id.setter
def gpu_id(self, gpu_id):
assert_is_type(gpu_id, None, int)
self._parms["gpu_id"] = gpu_id
# Ask the H2O server whether a XGBoost model can be built (depends on availability of native backends)
@staticmethod
def available():
"""
Returns True if a XGBoost model can be built, or False otherwise.
"""
if "XGBoost" not in h2o.cluster().list_core_extensions():
print("Cannot build an XGBoost model - no backend found.")
return False
else:
return True
|
{
"content_hash": "e1bf985c8be51b835ccc2ac1b1ebaa48",
"timestamp": "",
"source": "github",
"line_count": 969,
"max_line_length": 214,
"avg_line_length": 30.50361197110423,
"alnum_prop": 0.58965423912308,
"repo_name": "h2oai/h2o-dev",
"id": "10abd0701db53e8b59d6fb925df58abda4a98f5e",
"size": "29759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/h2o/estimators/xgboost.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162399"
},
{
"name": "CoffeeScript",
"bytes": "267048"
},
{
"name": "Emacs Lisp",
"bytes": "6465"
},
{
"name": "HTML",
"bytes": "140849"
},
{
"name": "Java",
"bytes": "6216622"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Jupyter Notebook",
"bytes": "5585408"
},
{
"name": "Makefile",
"bytes": "34105"
},
{
"name": "Python",
"bytes": "2644394"
},
{
"name": "R",
"bytes": "1848754"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22830"
},
{
"name": "Shell",
"bytes": "47513"
},
{
"name": "TeX",
"bytes": "579960"
}
],
"symlink_target": ""
}
|
from scrapinghub import ScrapinghubClient
apikey = '11befd9da9304fecb83dfa114d1926e9'
client = ScrapinghubClient(apikey)
project = client.get_project(252342)
project.jobs.run('javname')
project.jobs.run('javcode')
project.jobs.run('thzride')
project.jobs.run('myspider')
|
{
"content_hash": "6d638de87eed4cd933d686be89ec3be3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 30.22222222222222,
"alnum_prop": 0.8051470588235294,
"repo_name": "jiangtianyu2009/PiSoftCake",
"id": "2bdef2702c7d9946e99205acf4d30bc465498aa4",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "thzspider/scripts/runonscrapinghub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38396"
}
],
"symlink_target": ""
}
|
import tornado.ioloop
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from tornado import gen
import math
import tornado
import gdal
from helpers import TileSampler, CoordSystem
import json
from geojson import Feature, Point
import geojson
from algo import generate_line_segments, generate_visible, iter_to_runs
PORT = 8888
ZOOM = 12
class ApiHandler(tornado.web.RequestHandler):
def write_geojson(self, obj):
self.set_header("Content-Type", "application/vnd.geo+json")
self.write(geojson.dumps(obj))
def write_json(self, obj):
self.set_header("Content-Type", "application/javascript")
self.write(json.dumps(obj))
def write_error(self, status_code, exc_info=None, **kwargs):
errortext = 'Internal error'
if exc_info:
errortext = getattr(exc_info[1], 'log_message', errortext)
self.write_json({'status' : 'error',
'code' : status_code,
'reason' : errortext})
class ElevationHandler(ApiHandler):
@gen.coroutine
def get(self, lng, lat):
try:
lnglat = map(float, (lng, lat))
except Exception:
raise tornado.web.HTTPError(400)
sampler = TileSampler()
pixel = CoordSystem.lnglat_to_pixel(lnglat)
print 'Getting elevation at lng,lat:%s,%s %s,%s:' % (lng, lat, pixel[0], pixel[1])
value = yield sampler.sample_pixel(pixel)
lnglat = CoordSystem.pixel_to_lnglat(pixel)
self.write_geojson(Feature(geometry=Point(lnglat), properties={
"elevation":float(value)
}))
class ShedHandler(ApiHandler):
@gen.coroutine
def get(self, lng, lat, altitude, radius):
#168036.0, 404958.0
#(168036.0, 404958.0) (168038.83662185463, 404948.41075725335)
try:
lng, lat, altitude, radius = map(float, (lng, lat, altitude, radius))
except Exception:
raise tornado.web.HTTPError(400)
print 'Getting elevation at lng: {}, lat: {}, altitude: {}, radius:{}'.format(lng, lat, altitude, radius)
center = CoordSystem.lnglat_to_pixel((lng, lat))
sampler = TileSampler()
line_segments = []
for start, stop in generate_line_segments(radius, center):
print start, stop
elevations, pixels = yield sampler.sample_line(start, stop)
line_segments.extend(iter_to_runs(generate_visible(altitude, elevations), pixels))
line_segments = [map(tuple, segment) for segment in line_segments]
self.write_json(line_segments)
application = tornado.web.Application([
(r"/elevation/(-?\d+\.?\d*)/(-?\d+\.?\d*)", ElevationHandler),
(r"/shed/(-?\d+\.?\d*)/(-?\d+\.?\d*)/(\d+\.?\d*)/(\d+\.?\d*)", ShedHandler),
])
if __name__ == "__main__":
application.listen(PORT)
print 'listening on port %s' % PORT
tornado.ioloop.IOLoop.current().start()
|
{
"content_hash": "edb714c237bcf678d0bfa82cc14acf42",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 113,
"avg_line_length": 36.6875,
"alnum_prop": 0.6228279386712096,
"repo_name": "lachesis/shed",
"id": "240061e528ae5c6dc77901b5b8ea2e3d323e0c71",
"size": "2935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "142"
},
{
"name": "HTML",
"bytes": "184"
},
{
"name": "JavaScript",
"bytes": "2032"
},
{
"name": "Python",
"bytes": "124467"
},
{
"name": "Shell",
"bytes": "2881"
}
],
"symlink_target": ""
}
|
import numbers
from typing import Any, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_bool_dtype, is_integer_dtype, CategoricalDtype
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.base import column_op, IndexOpsMixin, numpy_column_op
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
is_valid_operand_for_numeric_arithmetic,
transform_boolean_operand_to_numeric,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
_sanitize_list_like,
_is_valid_for_logical_operator,
_is_boolean_type,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef.typehints import extension_dtypes, pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.column import Column
from pyspark.sql.types import (
BooleanType,
DataType,
StringType,
)
def _non_fractional_astype(
index_ops: IndexOpsLike, dtype: Dtype, spark_type: DataType
) -> IndexOpsLike:
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class NumericOps(DataTypeOps):
"""The class for binary operations of numeric pandas-on-Spark objects."""
@property
def pretty_name(self) -> str:
return "numerics"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("Addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return column_op(Column.__add__)(left, right)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("Subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return column_op(Column.__sub__)(left, right)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("Modulo can not be applied to given types.")
def mod(left: Column, right: Any) -> Column:
return ((left % right) + right) % right
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return column_op(mod)(left, right)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("Exponentiation can not be applied to given types.")
def pow_func(left: Column, right: Any) -> Column:
return (
F.when(left == 1, left)
.when(SF.lit(right) == 0, 1)
.otherwise(Column.__pow__(left, right))
)
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return column_op(pow_func)(left, right)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("Addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__radd__)(left, right)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("Subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rsub__)(left, right)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("Multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rmul__)(left, right)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("Exponentiation can not be applied to given types.")
def rpow_func(left: Column, right: Any) -> Column:
return F.when(SF.lit(right == 1), right).otherwise(Column.__rpow__(left, right))
right = transform_boolean_operand_to_numeric(right)
return column_op(rpow_func)(left, right)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("Modulo can not be applied to given types.")
def rmod(left: Column, right: Any) -> Column:
return ((right % left) + left) % left
right = transform_boolean_operand_to_numeric(right)
return column_op(rmod)(left, right)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
return operand._with_new_scol(-operand.spark.column, field=operand._internal.data_fields[0])
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
return operand._with_new_scol(
F.abs(operand.spark.column), field=operand._internal.data_fields[0]
)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return column_op(Column.__lt__)(left, right)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return column_op(Column.__le__)(left, right)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return column_op(Column.__ge__)(left, right)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return column_op(Column.__gt__)(left, right)
class IntegralOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
LongType, IntegerType, ByteType and ShortType.
"""
def xor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if isinstance(right, IndexOpsMixin) and isinstance(right.dtype, extension_dtypes):
return right ^ left
elif _is_valid_for_logical_operator(right):
right_is_boolean = _is_boolean_type(right)
def xor_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column):
if pd.isna(right):
right = SF.lit(None)
else:
right = SF.lit(right)
return (
left.bitwiseXOR(right.cast("integer")).cast("boolean")
if right_is_boolean
else left.bitwiseXOR(right)
)
return column_op(xor_func)(left, right)
else:
raise TypeError("XOR can not be applied to given types.")
@property
def pretty_name(self) -> str:
return "integrals"
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType):
return column_op(SF.repeat)(right, left)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("Multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("True division can not be applied to given types.")
def truediv(left: Column, right: Any) -> Column:
return F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), left.__div__(right)
).otherwise(SF.lit(np.inf).__div__(left))
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return numpy_column_op(truediv)(left, right)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("Floor division can not be applied to given types.")
def floordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(right is np.nan), np.nan).otherwise(
F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(SF.lit(np.inf).__div__(left))
)
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("True division can not be applied to given types.")
def rtruediv(left: Column, right: Any) -> Column:
return F.when(left == 0, SF.lit(np.inf).__div__(right)).otherwise(
SF.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("Floor division can not be applied to given types.")
def rfloordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(left == 0), SF.lit(np.inf).__div__(right)).otherwise(
F.floor(SF.lit(right).__div__(left))
)
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
return operand._with_new_scol(
F.bitwise_not(operand.spark.column), field=operand._internal.data_fields[0]
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
return _non_fractional_astype(index_ops, dtype, spark_type)
class FractionalOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
FloatType, DoubleType.
"""
@property
def pretty_name(self) -> str:
return "fractions"
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("Multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("True division can not be applied to given types.")
def truediv(left: Column, right: Any) -> Column:
return F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), left.__div__(right)
).otherwise(
F.when(SF.lit(left == np.inf) | SF.lit(left == -np.inf), left).otherwise(
SF.lit(np.inf).__div__(left)
)
)
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return numpy_column_op(truediv)(left, right)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("Floor division can not be applied to given types.")
def floordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(right is np.nan), np.nan).otherwise(
F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(
F.when(SF.lit(left == np.inf) | SF.lit(left == -np.inf), left).otherwise(
SF.lit(np.inf).__div__(left)
)
)
)
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("True division can not be applied to given types.")
def rtruediv(left: Column, right: Any) -> Column:
return F.when(left == 0, SF.lit(np.inf).__div__(right)).otherwise(
SF.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
if not isinstance(right, numbers.Number):
raise TypeError("Floor division can not be applied to given types.")
def rfloordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(left == 0), SF.lit(np.inf).__div__(right)).otherwise(
F.when(SF.lit(left) == np.nan, np.nan).otherwise(
F.floor(SF.lit(right).__div__(left))
)
)
right = transform_boolean_operand_to_numeric(right, spark_type=left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def nan_to_null(self, index_ops: IndexOpsLike) -> IndexOpsLike:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
return index_ops._with_new_scol(
F.nanvl(index_ops.spark.column, SF.lit(None)),
field=index_ops._internal.data_fields[0].copy(nullable=True),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if is_integer_dtype(dtype) and not isinstance(dtype, extension_dtypes):
if index_ops.hasnans:
raise ValueError(
"Cannot convert %s with missing values to integer" % self.pretty_name
)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
SF.lit(True),
).otherwise(index_ops.spark.column.cast(spark_type))
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type),
)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class DecimalOps(FractionalOps):
"""
The class for decimal operations of pandas-on-Spark objects with spark type:
DecimalType.
"""
@property
def pretty_name(self) -> str:
return "decimal"
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def nan_to_null(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops.copy()
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
# TODO(SPARK-36230): check index_ops.hasnans after fixing SPARK-36230
dtype, spark_type = pandas_on_spark_type(dtype)
return _non_fractional_astype(index_ops, dtype, spark_type)
class IntegralExtensionOps(IntegralOps):
"""
The class for binary operations of pandas-on-Spark objects with one of the
- spark types:
LongType, IntegerType, ByteType and ShortType
- dtypes:
Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
"""
def xor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
raise TypeError("XOR can not be applied to given types.")
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if is_integer_dtype(dtype) and not isinstance(dtype, extension_dtypes):
if index_ops.hasnans:
raise ValueError(
"Cannot convert %s with missing values to integer" % self.pretty_name
)
elif is_bool_dtype(dtype) and not isinstance(dtype, extension_dtypes):
if index_ops.hasnans:
raise ValueError("Cannot convert %s with missing values to bool" % self.pretty_name)
return _non_fractional_astype(index_ops, dtype, spark_type)
class FractionalExtensionOps(FractionalOps):
"""
The class for binary operations of pandas-on-Spark objects with one of the
- spark types:
FloatType, DoubleType and DecimalType
- dtypes:
Float32Dtype, Float64Dtype
"""
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if is_integer_dtype(dtype) and not isinstance(dtype, extension_dtypes):
if index_ops.hasnans:
raise ValueError(
"Cannot convert %s with missing values to integer" % self.pretty_name
)
elif is_bool_dtype(dtype) and not isinstance(dtype, extension_dtypes):
if index_ops.hasnans:
raise ValueError("Cannot convert %s with missing values to bool" % self.pretty_name)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
SF.lit(True),
).otherwise(index_ops.spark.column.cast(spark_type))
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type),
)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
|
{
"content_hash": "a0b444a674c0c0ed7bfca9784391681a",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 100,
"avg_line_length": 42.223091976516635,
"alnum_prop": 0.6228680014831294,
"repo_name": "chuckchen/spark",
"id": "3e746643d98719fa5693734105461a8254dbf07e",
"size": "22361",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/pandas/data_type_ops/num_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "50108"
},
{
"name": "Batchfile",
"bytes": "25676"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26852"
},
{
"name": "Dockerfile",
"bytes": "9127"
},
{
"name": "HTML",
"bytes": "40529"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4156231"
},
{
"name": "JavaScript",
"bytes": "209968"
},
{
"name": "Makefile",
"bytes": "1587"
},
{
"name": "PLSQL",
"bytes": "6658"
},
{
"name": "PLpgSQL",
"bytes": "380488"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3222278"
},
{
"name": "R",
"bytes": "1203999"
},
{
"name": "Roff",
"bytes": "36516"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "32613994"
},
{
"name": "Shell",
"bytes": "209299"
},
{
"name": "TSQL",
"bytes": "473509"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
}
|
from SCons.Script import *
########################################################################
#
# generate a builder for weighted pushdown systems
#
def PASSBuilder(env, engine, action, suffix='.out', target_scanner=None, **kwargs):
def pass_generator(target, source, env, for_signature):
actions = [action]
return actions
def pass_target_scanner(node, env, path):
deps = [env[engine]]
if target_scanner:
deps += target_scanner(node, env, path)
return deps
return Builder(generator=pass_generator, target_scanner=Scanner(pass_target_scanner), suffix=suffix, **kwargs)
########################################################################
def generate(env):
if hasattr(env, 'PASSBuilder'):
return
env.AddMethod(PASSBuilder)
def exists(env):
return True
|
{
"content_hash": "49534693d7b06b3ba1a072be29fa2874",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 114,
"avg_line_length": 23.88888888888889,
"alnum_prop": 0.5441860465116279,
"repo_name": "corvette-berkeley/precimonious",
"id": "91f8cfc0f49af6223ac0cdf32798e675e52ff9a6",
"size": "860",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/tests/pass-builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "127356"
},
{
"name": "C++",
"bytes": "152747"
},
{
"name": "Python",
"bytes": "91681"
},
{
"name": "Shell",
"bytes": "6799"
}
],
"symlink_target": ""
}
|
__version__ = "2.5"
__tabversion__ = "2.4" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
import re, types, sys, cStringIO, md5, os.path
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Exception raised for errors raised in production rules
class SyntaxError(Exception): pass
# Available instance types. This is used when parsers are defined by a class.
# it's a little funky because I want to preserve backwards compatibility
# with Python 2.0 where types.ObjectType is undefined.
try:
_INSTANCETYPE = (types.InstanceType, types.ObjectType)
except AttributeError:
_INSTANCETYPE = types.InstanceType
class object: pass # Note: needed if no new-style classes present
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# The LR Parsing engine. This is defined as a class so that multiple parsers
# can exist in the same process. A user never instantiates this directly.
# Instead, the global yacc() function should be used to create a suitable Parser
# object.
class Parser:
def __init__(self,magic=None):
# This is a hack to keep users from trying to instantiate a Parser
# object directly.
if magic != "xyzzy":
raise YaccError, "Can't directly instantiate Parser. Use yacc() instead."
# Reset internal state
self.productions = None # List of productions
self.errorfunc = None # Error handling function
self.action = { } # LR Action table
self.goto = { } # LR goto table
self.require = { } # Attribute require table
self.method = "Unknown LR" # Table construction method used
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
endsym = "$end" # End symbol
# If no lexer was given, we will try to use the lex module
if not lexer:
import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = endsym
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
if debug > 1:
print 'state', state
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = endsym
# --! DEBUG
if debug:
errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
# --! DEBUG
if debug > 1:
print 'action', t
# --! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
if ltype is endsym:
# Error, end of input
sys.stderr.write("yacc: Parse error. EOF\n")
return
statestack.append(t)
state = t
# --! DEBUG
if debug > 1:
sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if debug > 1:
sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.func(pslice)
del symstack[-plen:]
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.func(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# --! DEBUG
if debug:
sys.stderr.write(errorlead + "\n")
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type is endsym:
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type is not endsym:
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type is endsym:
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError, "yacc: internal parser error!!!\n"
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
if ltype == '$end':
# Error, end of input
sys.stderr.write("yacc: Parse error. EOF\n")
return
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.func(pslice)
del symstack[-plen:]
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.func(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError, "yacc: internal parser error!!!\n"
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
if ltype == '$end':
# Error, end of input
sys.stderr.write("yacc: Parse error. EOF\n")
return
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.func(pslice)
del symstack[-plen:]
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.func(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError, "yacc: internal parser error!!!\n"
# -----------------------------------------------------------------------------
# === Parser Construction ===
#
# The following functions and variables are used to implement the yacc() function
# itself. This is pretty hairy stuff involving lots of error checking,
# construction of LR items, kernels, and so forth. Although a lot of
# this work is done using global variables, the resulting Parser object
# is completely self contained--meaning that it is safe to repeatedly
# call yacc() with different grammars in the same application.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# validate_file()
#
# This function checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_file(filename):
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
noerror = 0
linen += 1
return noerror
# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
def validate_dict(d):
for n,v in d.items():
if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_':
sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
except StandardError:
pass
# -----------------------------------------------------------------------------
# === GRAMMAR FUNCTIONS ===
#
# The following global variables and functions are used to store, manipulate,
# and verify the grammar rules specified by the user.
# -----------------------------------------------------------------------------
# Initialize all of the global variables used during grammar construction
def initialize_vars():
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
global Errorfunc, Signature, Requires
Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
First = { } # A dictionary of precomputed FIRST(x) symbols
Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
LRitems = [ ] # A list of all LR items for the grammar. These are the
# productions with the "dot" like E -> E . PLUS E
Errorfunc = None # User defined error handler
Signature = md5.new() # Digital signature of the grammar rules, precedence
# and other information. Used to determined when a
# parsing table needs to be regenerated.
Signature.update(__tabversion__)
Requires = { } # Requires list
# File objects used when creating the parser.out debugging file
global _vf, _vfc
_vf = cStringIO.StringIO()
_vfc = cStringIO.StringIO()
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# It has a few required attributes:
#
# name - Name of the production (nonterminal)
# prod - A list of symbols making up its production
# number - Production number.
#
# In addition, a few additional attributes are used to help with debugging or
# optimization of table generation.
#
# file - File where production action is defined.
# lineno - Line number where action is defined
# func - Action function
# prec - Precedence level
# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E'
# then lr_next refers to 'E -> E PLUS . E'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# -----------------------------------------------------------------------------
class Production:
def __init__(self,**kw):
for k,v in kw.items():
setattr(self,k,v)
self.lr_index = -1
self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure
self.lr1_added = 0 # Flag indicating whether or not added to LR1
self.usyms = [ ]
self.lookaheads = { }
self.lk_added = { }
self.setnumbers = [ ]
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return str(self)
# Compute lr_items from the production
def lr_item(self,n):
if n > len(self.prod): return None
p = Production()
p.name = self.name
p.prod = list(self.prod)
p.number = self.number
p.lr_index = n
p.lookaheads = { }
p.setnumbers = self.setnumbers
p.prod.insert(n,".")
p.prod = tuple(p.prod)
p.len = len(p.prod)
p.usyms = self.usyms
# Precompute list of productions immediately following
try:
p.lrafter = Prodnames[p.prod[n+1]]
except (IndexError,KeyError),e:
p.lrafter = []
try:
p.lrbefore = p.prod[n-1]
except IndexError:
p.lrbefore = None
return p
class MiniProduction:
pass
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule.
# The production rule is assumed to be found in the function's docstring.
# This rule has the general syntax:
#
# name1 ::= production1
# | production2
# | production3
# ...
# | productionn
# name2 ::= production1
# | production2
# ...
# -----------------------------------------------------------------------------
def add_production(f,file,line,prodname,syms):
if Terminals.has_key(prodname):
sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
return -1
if prodname == 'error':
sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
return -1
if not _is_identifier.match(prodname):
sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
return -1
for x in range(len(syms)):
s = syms[x]
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
return -1
if not Terminals.has_key(c):
Terminals[c] = []
syms[x] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
return -1
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if Prodmap.has_key(map):
m = Prodmap[map]
sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
return -1
p = Production()
p.name = prodname
p.prod = syms
p.file = file
p.line = line
p.func = f
p.number = len(Productions)
Productions.append(p)
Prodmap[map] = p
if not Nonterminals.has_key(prodname):
Nonterminals[prodname] = [ ]
# Add all terminals to Terminals
i = 0
while i < len(p.prod):
t = p.prod[i]
if t == '%prec':
try:
precname = p.prod[i+1]
except IndexError:
sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
return -1
prec = Precedence.get(precname,None)
if not prec:
sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
return -1
else:
p.prec = prec
UsedPrecedence[precname] = 1
del p.prod[i]
del p.prod[i]
continue
if Terminals.has_key(t):
Terminals[t].append(p.number)
# Is a terminal. We'll assign a precedence to p based on this
if not hasattr(p,"prec"):
p.prec = Precedence.get(t,('right',0))
else:
if not Nonterminals.has_key(t):
Nonterminals[t] = [ ]
Nonterminals[t].append(p.number)
i += 1
if not hasattr(p,"prec"):
p.prec = ('right',0)
# Set final length of productions
p.len = len(p.prod)
p.prod = tuple(p.prod)
# Calculate unique syms in the production
p.usyms = [ ]
for s in p.prod:
if s not in p.usyms:
p.usyms.append(s)
# Add to the global productions list
try:
Prodnames[p.name].append(p)
except KeyError:
Prodnames[p.name] = [ p ]
return 0
# Given a raw rule function, this function rips out its doc string
# and adds rules to the grammar
def add_function(f):
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
error = 0
if isinstance(f,types.MethodType):
reqdargs = 2
else:
reqdargs = 1
if f.func_code.co_argcount > reqdargs:
sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
return -1
if f.func_code.co_argcount < reqdargs:
sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
return -1
if f.__doc__:
# Split the doc string into lines
pstrings = f.__doc__.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
return -1
prodname = lastp
if len(p) > 1:
syms = p[1:]
else:
syms = [ ]
else:
prodname = p[0]
lastp = prodname
assign = p[1]
if len(p) > 2:
syms = p[2:]
else:
syms = [ ]
if assign != ':' and assign != '::=':
sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
return -1
e = add_production(f,file,dline,prodname,syms)
error += e
except StandardError:
sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
error -= 1
else:
sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
return error
# Cycle checking code (Michael Dyck)
def compute_reachable():
'''
Find each symbol that can be reached from the start symbol.
Print a warning for any nonterminals that can't be reached.
(Unused terminals have already had their warning.)
'''
Reachable = { }
for s in Terminals.keys() + Nonterminals.keys():
Reachable[s] = 0
mark_reachable_from( Productions[0].prod[0], Reachable )
for s in Nonterminals.keys():
if not Reachable[s]:
sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
def mark_reachable_from(s, Reachable):
'''
Mark all symbols that are reachable from symbol s.
'''
if Reachable[s]:
# We've already reached symbol s.
return
Reachable[s] = 1
for p in Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r, Reachable)
# -----------------------------------------------------------------------------
# compute_terminates()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def compute_terminates():
'''
Raise an error for any symbols that don't terminate.
'''
Terminates = {}
# Terminals:
for t in Terminals.keys():
Terminates[t] = 1
Terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in Nonterminals.keys():
Terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not Terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not Terminates[n]:
Terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
some_error = 0
for (s,terminates) in Terminates.items():
if not terminates:
if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
some_error = 1
return some_error
# -----------------------------------------------------------------------------
# verify_productions()
#
# This function examines all of the supplied rules to see if they seem valid.
# -----------------------------------------------------------------------------
def verify_productions(cycle_check=1):
error = 0
for p in Productions:
if not p: continue
for s in p.prod:
if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
error = 1
continue
unused_tok = 0
# Now verify all of the tokens
if yaccdebug:
_vf.write("Unused terminals:\n\n")
for s,v in Terminals.items():
if s != 'error' and not v:
sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
if yaccdebug: _vf.write(" %s\n"% s)
unused_tok += 1
# Print out all of the productions
if yaccdebug:
_vf.write("\nGrammar\n\n")
for i in range(1,len(Productions)):
_vf.write("Rule %-5d %s\n" % (i, Productions[i]))
unused_prod = 0
# Verify the use of all productions
for s,v in Nonterminals.items():
if not v:
p = Prodnames[s][0]
sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
unused_prod += 1
if unused_tok == 1:
sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
if unused_tok > 1:
sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
if unused_prod == 1:
sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
if unused_prod > 1:
sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
if yaccdebug:
_vf.write("\nTerminals, with rules where they appear\n\n")
ks = Terminals.keys()
ks.sort()
for k in ks:
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
_vf.write("\nNonterminals, with rules where they appear\n\n")
ks = Nonterminals.keys()
ks.sort()
for k in ks:
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
if (cycle_check):
compute_reachable()
error += compute_terminates()
# error += check_cycles()
return error
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems():
for p in Productions:
lastlri = p
lri = p.lr_item(0)
i = 0
while 1:
lri = p.lr_item(i)
lastlri.lr_next = lri
if not lri: break
lri.lr_num = len(LRitems)
LRitems.append(lri)
lastlri = lri
i += 1
# In order for the rest of the parser generator to work, we need to
# guarantee that no more lritems are generated. Therefore, we nuke
# the p.lr_item method. (Only used in debugging)
# Production.lr_item = None
# -----------------------------------------------------------------------------
# add_precedence()
#
# Given a list of precedence rules, add to the precedence table.
# -----------------------------------------------------------------------------
def add_precedence(plist):
plevel = 0
error = 0
for p in plist:
plevel += 1
try:
prec = p[0]
terms = p[1:]
if prec != 'left' and prec != 'right' and prec != 'nonassoc':
sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
return -1
for t in terms:
if Precedence.has_key(t):
sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
error += 1
continue
Precedence[t] = (prec,plevel)
except:
sys.stderr.write("yacc: Invalid precedence table.\n")
error += 1
return error
# -----------------------------------------------------------------------------
# check_precedence()
#
# Checks the use of the Precedence tables. This makes sure all of the symbols
# are terminals or were used with %prec
# -----------------------------------------------------------------------------
def check_precedence():
error = 0
for precname in Precedence.keys():
if not (Terminals.has_key(precname) or UsedPrecedence.has_key(precname)):
sys.stderr.write("yacc: Precedence rule '%s' defined for unknown symbol '%s'\n" % (Precedence[precname][0],precname))
error += 1
return error
# -----------------------------------------------------------------------------
# augment_grammar()
#
# Compute the augmented grammar. This is just a rule S' -> start where start
# is the starting symbol.
# -----------------------------------------------------------------------------
def augment_grammar(start=None):
if not start:
start = Productions[1].name
Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
Productions[0].usyms = [ start ]
Nonterminals[start].append(0)
# -------------------------------------------------------------------------
# first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def first(beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# FOLLOW(x)
# Given a non-terminal. This function computes the set of all symbols
# that might follow it. Dragon book, p. 189.
def compute_follow(start=None):
# Add '$end' to the follow list of the start symbol
for k in Nonterminals.keys():
Follow[k] = [ ]
if not start:
start = Productions[1].name
Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if Nonterminals.has_key(B):
# Okay. We got a non-terminal in a production
fst = first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in Follow[B]:
Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in Follow[p.name]:
if f not in Follow[B]:
Follow[B].append(f)
didadd = 1
if not didadd: break
if 0 and yaccdebug:
_vf.write('\nFollow:\n')
for k in Nonterminals.keys():
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
# -------------------------------------------------------------------------
# compute_first1()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first1():
# Terminals:
for t in Terminals.keys():
First[t] = [t]
First['$end'] = ['$end']
First['#'] = ['#'] # what's this for?
# Nonterminals:
# Initialize to the empty set:
for n in Nonterminals.keys():
First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in Nonterminals.keys():
for p in Prodnames[n]:
for f in first(p.prod):
if f not in First[n]:
First[n].append( f )
some_change = 1
if not some_change:
break
if 0 and yaccdebug:
_vf.write('\nFirst:\n')
for k in Nonterminals.keys():
_vf.write("%-20s : %s\n" %
(k, " ".join([str(s) for s in First[k]])))
# -----------------------------------------------------------------------------
# === SLR Generation ===
#
# The following functions are used to construct SLR (Simple LR) parsing tables
# as described on p.221-229 of the dragon book.
# -----------------------------------------------------------------------------
# Global variables for the LR parsing engine
def lr_init_vars():
global _lr_action, _lr_goto, _lr_method
global _lr_goto_cache, _lr0_cidhash
_lr_action = { } # Action table
_lr_goto = { } # Goto table
_lr_method = "Unknown" # LR method used
_lr_goto_cache = { }
_lr0_cidhash = { }
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
# prodlist is a list of productions.
_add_count = 0 # Counter used to detect cycles
def lr0_closure(I):
global _add_count
_add_count += 1
prodlist = Productions
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lrafter:
if x.lr0_added == _add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = _add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(I,x):
# First we look for a previously cached entry
g = _lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = _lr_goto_cache.get(x,None)
if not s:
s = { }
_lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lrbefore == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
_lr_goto_cache[(id(I),x)] = g
return g
_lr0_cidhash = { }
# Compute the LR(0) sets of item function
def lr0_items():
C = [ lr0_closure([Productions[0].lr_next]) ]
i = 0
for I in C:
_lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms.keys():
g = lr0_goto(I,x)
if not g: continue
if _lr0_cidhash.has_key(id(g)): continue
_lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# Note: This implementation is a complete replacement of the LALR(1)
# implementation in PLY-1.x releases. That version was based on
# a less efficient algorithm and it had bugs in its implementation.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals():
nullable = {}
num_nullable = 0
while 1:
for p in Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not nullable.has_key(t): break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if Nonterminals.has_key(t[1]):
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if Terminals.has_key(a):
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = lr0_goto(C[state],N)
j = _lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if empty.has_key(a):
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if dtrans.has_key((j,t)):
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if Terminals.has_key(p.prod[li]): break # No forget it
if not nullable.has_key(p.prod[li]): break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = lr0_goto(C[j],t) # Go to next set
j = _lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not includedict.has_key(i): includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = sys.maxint
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = sys.maxint
F[stack[-1]] = F[x]
element = stack.pop()
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(C, ntrans, nullable):
FP = lambda x: dr_relation(C,x,nullable)
R = lambda x: reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not p.lookaheads.has_key(state):
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(C):
# Determine all of the nullable nonterminals
nullable = compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = find_nonterminal_transitions(C)
# Compute read sets
readsets = compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(method):
global _lr_method
goto = _lr_goto # Goto array
action = _lr_action # Action array
actionp = { } # Action production array (temporary)
_lr_method = method
n_srconflict = 0
n_rrconflict = 0
if yaccdebug:
sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
_vf.write("\n\nParsing method: %s\n\n" % method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = lr0_items()
if method == 'LALR':
add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
if yaccdebug:
_vf.write("\nstate %d\n\n" % st)
for p in I:
_vf.write(" (%d) %s\n" % (p.number, str(p)))
_vf.write("\n")
for p in I:
try:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
_vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
n_srconflict += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
_vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
n_srconflict +=1
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
# sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
n_rrconflict += 1
_vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, st_actionp[a].number, st_actionp[a]))
_vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,st_actionp[a].number, st_actionp[a]))
else:
sys.stderr.write("Unknown conflict in state %d\n" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if Terminals.has_key(a):
g = lr0_goto(I,a)
j = _lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
sys.stderr.write("Shift/shift conflict in state %d\n" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
st_action[a] = j
st_actionp[a] = p
if not rlevel:
n_srconflict += 1
_vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
n_srconflict +=1
_vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
else:
sys.stderr.write("Unknown conflict in state %d\n" % st)
else:
st_action[a] = j
st_actionp[a] = p
except StandardError,e:
print sys.exc_info()
raise YaccError, "Hosed in lr_parse_table"
# Print the actions associated with each terminal
if yaccdebug:
_actprint = { }
for a,p,m in actlist:
if st_action.has_key(a):
if p is st_actionp[a]:
_vf.write(" %-15s %s\n" % (a,m))
_actprint[(a,m)] = 1
_vf.write("\n")
for a,p,m in actlist:
if st_action.has_key(a):
if p is not st_actionp[a]:
if not _actprint.has_key((a,m)):
_vf.write(" ! %-15s [ %s ]\n" % (a,m))
_actprint[(a,m)] = 1
# Construct the goto table for this state
if yaccdebug:
_vf.write("\n")
nkeys = { }
for ii in I:
for s in ii.usyms:
if Nonterminals.has_key(s):
nkeys[s] = None
for n in nkeys.keys():
g = lr0_goto(I,n)
j = _lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
if yaccdebug:
_vf.write(" %-30s shift and go to state %d\n" % (n,j))
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
if yaccdebug:
if n_srconflict == 1:
sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
if n_srconflict > 1:
sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
if n_rrconflict == 1:
sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
if n_rrconflict > 1:
sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
# -----------------------------------------------------------------------------
# ==== LR Utility functions ====
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _lr_write_tables()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def lr_write_tables(modulename=tab_module,outputdir=''):
if isinstance(modulename, types.ModuleType):
print >>sys.stderr, "Warning module %s is inconsistent with the grammar (ignored)" % modulename
return
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_lr_method = %s
_lr_signature = %s
""" % (filename, repr(_lr_method), repr(Signature.digest())))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in _lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _lr_action.has_key(_x): _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in _lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in _lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _lr_goto.has_key(_x): _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in _lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in Productions:
if p:
if (p.func):
f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
else:
f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len))
else:
f.write(" None,\n")
f.write("]\n")
f.close()
except IOError,e:
print >>sys.stderr, "Unable to create '%s'" % filename
print >>sys.stderr, e
return
def lr_read_tables(module=tab_module,optimize=0):
global _lr_action, _lr_goto, _lr_productions, _lr_method
try:
if isinstance(module,types.ModuleType):
parsetab = module
else:
exec "import %s as parsetab" % module
if (optimize) or (Signature.digest() == parsetab._lr_signature):
_lr_action = parsetab._lr_action
_lr_goto = parsetab._lr_goto
_lr_productions = parsetab._lr_productions
_lr_method = parsetab._lr_method
return 1
else:
return 0
except (ImportError,AttributeError):
return 0
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build the parser module
# -----------------------------------------------------------------------------
def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
global yaccdebug
yaccdebug = debug
initialize_vars()
files = { }
error = 0
# Add parsing method to signature
Signature.update(method)
# If a "module" parameter was supplied, extract its dictionary.
# Note: a module may in fact be an instance as well.
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for i in _items:
ldict[i[0]] = i[1]
else:
raise ValueError,"Expected a module"
else:
# No module given. We might be able to get information from the caller.
# Throw an exception and unwind the traceback to get the globals
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
if f.f_globals is f.f_locals: # Collect global and local variations from caller
ldict = f.f_globals
else:
ldict = f.f_globals.copy()
ldict.update(f.f_locals)
# Add starting symbol to signature
if not start:
start = ldict.get("start",None)
if start:
Signature.update(start)
# Look for error handler
ef = ldict.get('p_error',None)
if ef:
if isinstance(ef,types.FunctionType):
ismethod = 0
elif isinstance(ef, types.MethodType):
ismethod = 1
else:
raise YaccError,"'p_error' defined, but is not a function or method."
eline = ef.func_code.co_firstlineno
efile = ef.func_code.co_filename
files[efile] = None
if (ef.func_code.co_argcount != 1+ismethod):
raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
global Errorfunc
Errorfunc = ef
else:
print >>sys.stderr, "yacc: Warning. no p_error() function is defined."
# If running in optimized mode. We're going to read tables instead
if (optimize and lr_read_tables(tabmodule,1)):
# Read parse table
del Productions[:]
for p in _lr_productions:
if not p:
Productions.append(None)
else:
m = MiniProduction()
m.name = p[0]
m.len = p[1]
m.file = p[3]
m.line = p[4]
if p[2]:
m.func = ldict[p[2]]
Productions.append(m)
else:
# Get the tokens map
if (module and isinstance(module,_INSTANCETYPE)):
tokens = getattr(module,"tokens",None)
else:
tokens = ldict.get("tokens",None)
if not tokens:
raise YaccError,"module does not define a list 'tokens'"
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise YaccError,"tokens must be a list or tuple."
# Check to see if a requires dictionary is defined.
requires = ldict.get("require",None)
if requires:
if not (isinstance(requires,types.DictType)):
raise YaccError,"require must be a dictionary."
for r,v in requires.items():
try:
if not (isinstance(v,types.ListType)):
raise TypeError
v1 = [x.split(".") for x in v]
Requires[r] = v1
except StandardError:
print >>sys.stderr, "Invalid specification for rule '%s' in require. Expected a list of strings" % r
# Build the dictionary of terminals. We a record a 0 in the
# dictionary to track whether or not a terminal is actually
# used in the grammar
if 'error' in tokens:
print >>sys.stderr, "yacc: Illegal token 'error'. Is a reserved word."
raise YaccError,"Illegal token name"
for n in tokens:
if Terminals.has_key(n):
print >>sys.stderr, "yacc: Warning. Token '%s' multiply defined." % n
Terminals[n] = [ ]
Terminals['error'] = [ ]
# Get the precedence map (if any)
prec = ldict.get("precedence",None)
if prec:
if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
raise YaccError,"precedence must be a list or tuple."
add_precedence(prec)
Signature.update(repr(prec))
for n in tokens:
if not Precedence.has_key(n):
Precedence[n] = ('right',0) # Default, right associative, 0 precedence
# Get the list of built-in functions with p_ prefix
symbols = [ldict[f] for f in ldict.keys()
if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
and ldict[f].__name__ != 'p_error')]
# Check for non-empty symbols
if len(symbols) == 0:
raise YaccError,"no rules of the form p_rulename are defined."
# Sort the symbols by line number
symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
# Add all of the symbols to the grammar
for f in symbols:
if (add_function(f)) < 0:
error += 1
else:
files[f.func_code.co_filename] = None
# Make a signature of the docstrings
for f in symbols:
if f.__doc__:
Signature.update(f.__doc__)
lr_init_vars()
if error:
raise YaccError,"Unable to construct parser."
if not lr_read_tables(tabmodule):
# Validate files
for filename in files.keys():
if not validate_file(filename):
error = 1
# Validate dictionary
validate_dict(ldict)
if start and not Prodnames.has_key(start):
raise YaccError,"Bad starting symbol '%s'" % start
augment_grammar(start)
error = verify_productions(cycle_check=check_recursion)
otherfunc = [ldict[f] for f in ldict.keys()
if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
# Check precedence rules
if check_precedence():
error = 1
if error:
raise YaccError,"Unable to construct parser."
build_lritems()
compute_first1()
compute_follow(start)
if method in ['SLR','LALR']:
lr_parse_table(method)
else:
raise YaccError, "Unknown parsing method '%s'" % method
if write_tables:
lr_write_tables(tabmodule,outputdir)
if yaccdebug:
try:
f = open(os.path.join(outputdir,debugfile),"w")
f.write(_vfc.getvalue())
f.write("\n\n")
f.write(_vf.getvalue())
f.close()
except IOError,e:
print >>sys.stderr, "yacc: can't create '%s'" % debugfile,e
# Made it here. Create a parser object and set up its internal state.
# Set global parse() method to bound method of parser object.
p = Parser("xyzzy")
p.productions = Productions
p.errorfunc = Errorfunc
p.action = _lr_action
p.goto = _lr_goto
p.method = _lr_method
p.require = Requires
global parse
parse = p.parse
global parser
parser = p
# Clean up all of the globals we created
if (not optimize):
yacc_cleanup()
return p
# yacc_cleanup function. Delete all of the global variables
# used during table construction
def yacc_cleanup():
global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
global Errorfunc, Signature, Requires
del Productions, Prodnames, Prodmap, Terminals
del Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
del Errorfunc, Signature, Requires
global _vf, _vfc
del _vf, _vfc
# Stub that raises an error if parsing is attempted without first calling yacc()
def parse(*args,**kwargs):
raise YaccError, "yacc: No parser built with yacc()"
|
{
"content_hash": "f05404c39e1d872855ec45eb900cc771",
"timestamp": "",
"source": "github",
"line_count": 2843,
"max_line_length": 172,
"avg_line_length": 37.67710165318326,
"alnum_prop": 0.4650939168751634,
"repo_name": "strawlab/pyopy",
"id": "bf3a30b986b38d81271638cb123ad1dc89d336a2",
"size": "109649",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pyopy/externals/ompc/yacc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "13490"
},
{
"name": "Matlab",
"bytes": "306"
},
{
"name": "Python",
"bytes": "1003257"
},
{
"name": "Shell",
"bytes": "592"
}
],
"symlink_target": ""
}
|
from info import __doc__
from numpy.version import version as __version__
import multiarray
import umath
import _internal # for freeze programs
import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from numeric import *
from fromnumeric import *
import defchararray as char
import records as rec
from records import *
from memmap import *
from defchararray import chararray
import scalarmath
from function_base import *
from machar import *
from getlimits import *
from shape_base import *
del nt
from fromnumeric import amax as max, amin as min, \
round_ as round
from numeric import absolute as abs
__all__ = ['char','rec','memmap']
__all__ += numeric.__all__
__all__ += fromnumeric.__all__
__all__ += rec.__all__
__all__ += ['chararray']
__all__ += function_base.__all__
__all__ += machar.__all__
__all__ += getlimits.__all__
__all__ += shape_base.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
{
"content_hash": "2231264465b930c8906805c9a6495357",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 51,
"avg_line_length": 23.925,
"alnum_prop": 0.7053291536050157,
"repo_name": "stefanv/numpy",
"id": "fa7df13d8930c00091e5fe9598d625df8ad9c4e5",
"size": "958",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numpy/core/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6184633"
},
{
"name": "C++",
"bytes": "297087"
},
{
"name": "CSS",
"bytes": "8887"
},
{
"name": "Fortran",
"bytes": "14157"
},
{
"name": "Objective-C",
"bytes": "135"
},
{
"name": "Perl",
"bytes": "458"
},
{
"name": "Python",
"bytes": "5338917"
},
{
"name": "Shell",
"bytes": "3545"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ServiceSasParameters(Model):
"""The parameters to list service SAS credentials of a speicific resource.
:param canonicalized_resource: The canonical path to the signed resource.
:type canonicalized_resource: str
:param resource: The signed services accessible with the service SAS.
Possible values include: Blob (b), Container (c), File (f), Share (s).
Possible values include: 'b', 'c', 'f', 's'
:type resource: str or
~azure.mgmt.storage.v2017_06_01.models.SignedResource
:param permissions: The signed permissions for the service SAS. Possible
values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create
(c), Update (u) and Process (p). Possible values include: 'r', 'd', 'w',
'l', 'a', 'c', 'u', 'p'
:type permissions: str or
~azure.mgmt.storage.v2017_06_01.models.Permissions
:param ip_address_or_range: An IP address or a range of IP addresses from
which to accept requests.
:type ip_address_or_range: str
:param protocols: The protocol permitted for a request made with the
account SAS. Possible values include: 'https,http', 'https'
:type protocols: str or
~azure.mgmt.storage.v2017_06_01.models.HttpProtocol
:param shared_access_start_time: The time at which the SAS becomes valid.
:type shared_access_start_time: datetime
:param shared_access_expiry_time: The time at which the shared access
signature becomes invalid.
:type shared_access_expiry_time: datetime
:param identifier: A unique value up to 64 characters in length that
correlates to an access policy specified for the container, queue, or
table.
:type identifier: str
:param partition_key_start: The start of partition key.
:type partition_key_start: str
:param partition_key_end: The end of partition key.
:type partition_key_end: str
:param row_key_start: The start of row key.
:type row_key_start: str
:param row_key_end: The end of row key.
:type row_key_end: str
:param key_to_sign: The key to sign the account SAS token with.
:type key_to_sign: str
:param cache_control: The response header override for cache control.
:type cache_control: str
:param content_disposition: The response header override for content
disposition.
:type content_disposition: str
:param content_encoding: The response header override for content
encoding.
:type content_encoding: str
:param content_language: The response header override for content
language.
:type content_language: str
:param content_type: The response header override for content type.
:type content_type: str
"""
_validation = {
'canonicalized_resource': {'required': True},
'resource': {'required': True},
'identifier': {'max_length': 64},
}
_attribute_map = {
'canonicalized_resource': {'key': 'canonicalizedResource', 'type': 'str'},
'resource': {'key': 'signedResource', 'type': 'str'},
'permissions': {'key': 'signedPermission', 'type': 'str'},
'ip_address_or_range': {'key': 'signedIp', 'type': 'str'},
'protocols': {'key': 'signedProtocol', 'type': 'HttpProtocol'},
'shared_access_start_time': {'key': 'signedStart', 'type': 'iso-8601'},
'shared_access_expiry_time': {'key': 'signedExpiry', 'type': 'iso-8601'},
'identifier': {'key': 'signedIdentifier', 'type': 'str'},
'partition_key_start': {'key': 'startPk', 'type': 'str'},
'partition_key_end': {'key': 'endPk', 'type': 'str'},
'row_key_start': {'key': 'startRk', 'type': 'str'},
'row_key_end': {'key': 'endRk', 'type': 'str'},
'key_to_sign': {'key': 'keyToSign', 'type': 'str'},
'cache_control': {'key': 'rscc', 'type': 'str'},
'content_disposition': {'key': 'rscd', 'type': 'str'},
'content_encoding': {'key': 'rsce', 'type': 'str'},
'content_language': {'key': 'rscl', 'type': 'str'},
'content_type': {'key': 'rsct', 'type': 'str'},
}
def __init__(self, canonicalized_resource, resource, permissions=None, ip_address_or_range=None, protocols=None, shared_access_start_time=None, shared_access_expiry_time=None, identifier=None, partition_key_start=None, partition_key_end=None, row_key_start=None, row_key_end=None, key_to_sign=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_type=None):
super(ServiceSasParameters, self).__init__()
self.canonicalized_resource = canonicalized_resource
self.resource = resource
self.permissions = permissions
self.ip_address_or_range = ip_address_or_range
self.protocols = protocols
self.shared_access_start_time = shared_access_start_time
self.shared_access_expiry_time = shared_access_expiry_time
self.identifier = identifier
self.partition_key_start = partition_key_start
self.partition_key_end = partition_key_end
self.row_key_start = row_key_start
self.row_key_end = row_key_end
self.key_to_sign = key_to_sign
self.cache_control = cache_control
self.content_disposition = content_disposition
self.content_encoding = content_encoding
self.content_language = content_language
self.content_type = content_type
|
{
"content_hash": "32789151fff1bdcdd5c8945eab2bedb0",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 414,
"avg_line_length": 50.74766355140187,
"alnum_prop": 0.6556169429097606,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "e9e4b615799c248388f9626c318a10eb5189bea2",
"size": "5904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/models/service_sas_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import asyncore
import base64
import mimetypes
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from email import charset, message_from_binary_file, message_from_bytes
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr
from io import StringIO
from pathlib import Path
from smtplib import SMTP, SMTPAuthenticationError, SMTPException
from ssl import SSLError
from unittest import mock
from django.core import mail
from django.core.mail import (
DNS_NAME, EmailMessage, EmailMultiAlternatives, mail_admins, mail_managers,
send_mail, send_mass_mail,
)
from django.core.mail.backends import console, dummy, filebased, locmem, smtp
from django.core.mail.message import BadHeaderError, sanitize_address
from django.test import SimpleTestCase, override_settings
from django.test.utils import requires_tz_support
from django.utils.translation import gettext_lazy
class HeadersCheckMixin:
def assertMessageHasHeaders(self, message, headers):
"""
Asserts that the `message` has all `headers`.
message: can be an instance of an email.Message subclass or a string
with the contents of an email message.
headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, bytes):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def get_decoded_attachments(self, django_message):
"""
Encode the specified django.core.mail.message.EmailMessage, then decode
it using Python's email.parser module and, for each attachment of the
message, return a list of tuples with (filename, content, mimetype).
"""
msg_bytes = django_message.message().as_bytes()
email_message = message_from_bytes(msg_bytes)
def iter_attachments():
for i in email_message.walk():
if i.get_content_disposition() == 'attachment':
filename = i.get_filename()
content = i.get_payload(decode=True)
mimetype = i.get_content_type()
yield filename, content, mimetype
return list(iter_attachments())
def test_ascii(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com, other@example.com')
def test_header_omitted_for_no_to_recipients(self):
message = EmailMessage('Subject', 'Content', 'from@example.com', cc=['cc@example.com']).message()
self.assertNotIn('To', message)
def test_recipients_with_empty_strings(self):
"""
Empty strings in various recipient arguments are always stripped
off the final recipient list.
"""
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com', ''],
cc=['cc@example.com', ''],
bcc=['', 'bcc@example.com'],
reply_to=['', None],
)
self.assertEqual(
email.recipients(),
['to@example.com', 'cc@example.com', 'bcc@example.com']
)
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'cc@example.com'])
# Test multiple CC with multiple To
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'],
cc=['cc@example.com', 'cc.other@example.com']
)
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(
email.recipients(),
['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com']
)
# Testing with Bcc
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'],
cc=['cc@example.com', 'cc.other@example.com'], bcc=['bcc@example.com']
)
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(
email.recipients(),
['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com']
)
def test_cc_headers(self):
message = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
cc=['foo@example.com'], headers={'Cc': 'override@example.com'},
).message()
self.assertEqual(message['Cc'], 'override@example.com')
def test_cc_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'Cc': 'foo@example.com'},
).message()
self.assertEqual(message['Cc'], 'foo@example.com')
def test_reply_to(self):
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com'],
reply_to=['reply_to@example.com'],
)
message = email.message()
self.assertEqual(message['Reply-To'], 'reply_to@example.com')
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com'],
reply_to=['reply_to1@example.com', 'reply_to2@example.com']
)
message = email.message()
self.assertEqual(message['Reply-To'], 'reply_to1@example.com, reply_to2@example.com')
def test_recipients_as_tuple(self):
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ('to@example.com', 'other@example.com'),
cc=('cc@example.com', 'cc.other@example.com'), bcc=('bcc@example.com',)
)
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(
email.recipients(),
['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com']
)
def test_recipients_as_string(self):
with self.assertRaisesMessage(TypeError, '"to" argument must be a list or tuple'):
EmailMessage(to='foo@example.com')
with self.assertRaisesMessage(TypeError, '"cc" argument must be a list or tuple'):
EmailMessage(cc='foo@example.com')
with self.assertRaisesMessage(TypeError, '"bcc" argument must be a list or tuple'):
EmailMessage(bcc='foo@example.com')
with self.assertRaisesMessage(TypeError, '"reply_to" argument must be a list or tuple'):
EmailMessage(reply_to='reply_to@example.com')
def test_header_injection(self):
msg = "Header values can't contain newlines "
email = EmailMessage('Subject\nInjection Test', 'Content', 'from@example.com', ['to@example.com'])
with self.assertRaisesMessage(BadHeaderError, msg):
email.message()
email = EmailMessage(
gettext_lazy('Subject\nInjection Test'), 'Content', 'from@example.com', ['to@example.com']
)
with self.assertRaisesMessage(BadHeaderError, msg):
email.message()
with self.assertRaisesMessage(BadHeaderError, msg):
EmailMessage(
'Subject',
'Content',
'from@example.com',
['Name\nInjection test <to@example.com>'],
).message()
def test_space_continuation(self):
"""
Test for space continuation character in long (ASCII) subject headers (#7747)
"""
email = EmailMessage(
'Long subject lines that get wrapped should contain a space '
'continuation character to get expected behavior in Outlook and Thunderbird',
'Content', 'from@example.com', ['to@example.com']
)
message = email.message()
self.assertEqual(
message['Subject'].encode(),
b'Long subject lines that get wrapped should contain a space continuation\n'
b' character to get expected behavior in Outlook and Thunderbird'
)
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', 'from@example.com', ['to@example.com'], headers=headers)
self.assertMessageHasHeaders(email.message(), {
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', 'from@example.com'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', 'to@example.com'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
})
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'],
headers={'To': 'mailing-list@example.com'})
message = email.message()
self.assertEqual(message['To'], 'mailing-list@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'])
message = email.message()
self.assertEqual(message['To'], 'list-subscriber@example.com, list-subscriber2@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
def test_to_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', 'bounce@example.com',
headers={'To': 'to@example.com'},
).message()
self.assertEqual(message['To'], 'to@example.com')
def test_reply_to_header(self):
"""
Specifying 'Reply-To' in headers should override reply_to.
"""
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
reply_to=['foo@example.com'], headers={'Reply-To': 'override@example.com'},
)
message = email.message()
self.assertEqual(message['Reply-To'], 'override@example.com')
def test_reply_to_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com'],
headers={'Reply-To': 'reply_to@example.com'},
).message()
self.assertEqual(message['Reply-To'], 'reply_to@example.com')
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains Unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage(
'Subject', 'Content', 'from@example.com',
['"Firstname Sürname" <to@example.com>', 'other@example.com'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?Firstname_S=C3=BCrname?= <to@example.com>, other@example.com'
)
email = EmailMessage(
'Subject', 'Content', 'from@example.com',
['"Sürname, Firstname" <to@example.com>', 'other@example.com'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?S=C3=BCrname=2C_Firstname?= <to@example.com>, other@example.com'
)
def test_unicode_headers(self):
email = EmailMessage(
'Gżegżółka', 'Content', 'from@example.com', ['to@example.com'],
headers={
'Sender': '"Firstname Sürname" <sender@example.com>',
'Comments': 'My Sürname is non-ASCII',
},
)
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <sender@example.com>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
from_email, to = 'from@example.com', '"Sürname, Firstname" <to@example.com>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <to@example.com>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_safe_mime_multipart_with_attachments(self):
"""
EmailMultiAlternatives includes alternatives if the body is empty and
it has attachments.
"""
msg = EmailMultiAlternatives(body='')
html_content = '<p>This is <strong>html</strong></p>'
msg.attach_alternative(html_content, 'text/html')
msg.attach('example.txt', 'Text file content', 'text/plain')
self.assertIn(html_content, msg.message().as_string())
def test_none_body(self):
msg = EmailMessage('subject', None, 'from@example.com', ['to@example.com'])
self.assertEqual(msg.body, '')
self.assertEqual(msg.message().get_payload(), '')
@mock.patch('socket.getfqdn', return_value='漢字')
def test_non_ascii_dns_non_unicode_email(self, mocked_getfqdn):
delattr(DNS_NAME, '_fqdn')
email = EmailMessage('subject', 'content', 'from@example.com', ['to@example.com'])
email.encoding = 'iso-8859-1'
self.assertIn('@xn--p8s937b>', email.message()['Message-ID'])
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', 'from@example.com', ['other@example.com'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'other@example.com')})
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, 'from@example.com', ['to@example.com'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, {
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(
payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>')
)
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_attachments_two_tuple(self):
msg = EmailMessage(attachments=[('filename1', 'content1')])
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'filename1')
self.assertEqual(content, b'content1')
self.assertEqual(mimetype, 'application/octet-stream')
def test_attachments_MIMEText(self):
txt = MIMEText('content1')
msg = EmailMessage(attachments=[txt])
payload = msg.message().get_payload()
self.assertEqual(payload[0], txt)
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_attach_file(self):
"""
Test attaching a file against different mimetypes and make sure that
a file will be attached and sent properly even if an invalid mimetype
is specified.
"""
files = (
# filename, actual mimetype
('file.txt', 'text/plain'),
('file.png', 'image/png'),
('file_txt', None),
('file_png', None),
('file_txt.png', 'image/png'),
('file_png.txt', 'text/plain'),
('file.eml', 'message/rfc822'),
)
test_mimetypes = ['text/plain', 'image/png', None]
for basename, real_mimetype in files:
for mimetype in test_mimetypes:
email = EmailMessage('subject', 'body', 'from@example.com', ['to@example.com'])
self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype)
self.assertEqual(email.attachments, [])
file_path = os.path.join(os.path.dirname(__file__), 'attachments', basename)
email.attach_file(file_path, mimetype=mimetype)
self.assertEqual(len(email.attachments), 1)
self.assertIn(basename, email.attachments[0])
msgs_sent_num = email.send()
self.assertEqual(msgs_sent_num, 1)
def test_attach_text_as_bytes(self):
msg = EmailMessage('subject', 'body', 'from@example.com', ['to@example.com'])
msg.attach('file.txt', b'file content')
sent_num = msg.send()
self.assertEqual(sent_num, 1)
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
self.assertEqual(content, b'file content')
self.assertEqual(mimetype, 'text/plain')
def test_attach_utf8_text_as_bytes(self):
"""
Non-ASCII characters encoded as valid UTF-8 are correctly transported
and decoded.
"""
msg = EmailMessage('subject', 'body', 'from@example.com', ['to@example.com'])
msg.attach('file.txt', b'\xc3\xa4') # UTF-8 encoded a umlaut.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
self.assertEqual(content, b'\xc3\xa4')
self.assertEqual(mimetype, 'text/plain')
def test_attach_non_utf8_text_as_bytes(self):
"""
Binary data that can't be decoded as UTF-8 overrides the MIME type
instead of decoding the data.
"""
msg = EmailMessage('subject', 'body', 'from@example.com', ['to@example.com'])
msg.attach('file.txt', b'\xff') # Invalid UTF-8.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
# Content should be passed through unmodified.
self.assertEqual(content, b'\xff')
self.assertEqual(mimetype, 'application/octet-stream')
def test_attach_mimetext_content_mimetype(self):
email_msg = EmailMessage()
txt = MIMEText('content')
msg = (
'content and mimetype must not be given when a MIMEBase instance '
'is provided.'
)
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach(txt, content='content')
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach(txt, mimetype='text/plain')
def test_attach_content_none(self):
email_msg = EmailMessage()
msg = 'content must be provided.'
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach('file.txt', mimetype="application/pdf")
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.locmem.EmailBackend'),
locmem.EmailBackend
)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.console.EmailBackend'),
console.EmailBackend
)
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir),
filebased.EmailBackend
)
if sys.platform == 'win32':
msg = '_getfullpathname: path should be string, bytes or os.PathLike, not object'
else:
msg = 'expected str, bytes or os.PathLike object, not object'
with self.assertRaisesMessage(TypeError, msg):
mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=object())
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', 'nobody@example.com')],
MANAGERS=[('nobody', 'nobody@example.com')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', 'from1@example.com', ['to1@example.com']),
('Subject2', 'Content2', 'from2@example.com', ['to2@example.com']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage(
'Subject', 'From the future', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
self.assertNotIn(b'>From the future', email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage(
'Subject', 'UTF-8 encoded body', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
self.assertIn(b'Content-Transfer-Encoding: 7bit', msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage(
'Subject', 'Body with only ASCII characters.', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 7bit', s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage(
'Subject', 'Body with latin characters: àáä.', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
s = msg.message().as_string()
self.assertIn('Content-Transfer-Encoding: 8bit', s)
msg = EmailMessage(
'Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'bounce@example.com',
['to@example.com'], headers={'From': 'from@example.com'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
s = msg.message().as_string()
self.assertIn('Content-Transfer-Encoding: 8bit', s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage(
'Child Subject', 'Some body of child message', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
parent_msg.attach(content=child_msg, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
def test_custom_utf8_encoding(self):
"""A UTF-8 charset with a custom body encoding is respected."""
body = 'Body with latin characters: àáä.'
msg = EmailMessage('Subject', body, 'bounce@example.com', ['to@example.com'])
encoding = charset.Charset('utf-8')
encoding.body_encoding = charset.QP
msg.encoding = encoding
message = msg.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', 'quoted-printable'),
})
self.assertEqual(message.get_payload(), encoding.body_encode(body))
def test_sanitize_address(self):
"""Email addresses are properly sanitized."""
for email_address, encoding, expected_result in (
# ASCII addresses.
('to@example.com', 'ascii', 'to@example.com'),
('to@example.com', 'utf-8', 'to@example.com'),
(('A name', 'to@example.com'), 'ascii', 'A name <to@example.com>'),
(
('A name', 'to@example.com'),
'utf-8',
'A name <to@example.com>',
),
('localpartonly', 'ascii', 'localpartonly'),
# ASCII addresses with display names.
('A name <to@example.com>', 'ascii', 'A name <to@example.com>'),
('A name <to@example.com>', 'utf-8', 'A name <to@example.com>'),
('"A name" <to@example.com>', 'ascii', 'A name <to@example.com>'),
('"A name" <to@example.com>', 'utf-8', 'A name <to@example.com>'),
# Unicode addresses (supported per RFC-6532).
('tó@example.com', 'utf-8', '=?utf-8?b?dMOz?=@example.com'),
('to@éxample.com', 'utf-8', 'to@xn--xample-9ua.com'),
(
('Tó Example', 'tó@example.com'),
'utf-8',
'=?utf-8?q?T=C3=B3_Example?= <=?utf-8?b?dMOz?=@example.com>',
),
# Unicode addresses with display names.
(
'Tó Example <tó@example.com>',
'utf-8',
'=?utf-8?q?T=C3=B3_Example?= <=?utf-8?b?dMOz?=@example.com>',
),
('To Example <to@éxample.com>', 'ascii', 'To Example <to@xn--xample-9ua.com>'),
(
'To Example <to@éxample.com>',
'utf-8',
'To Example <to@xn--xample-9ua.com>',
),
# Addresses with two @ signs.
('"to@other.com"@example.com', 'utf-8', r'"to@other.com"@example.com'),
(
'"to@other.com" <to@example.com>',
'utf-8',
'"to@other.com" <to@example.com>',
),
(
('To Example', 'to@other.com@example.com'),
'utf-8',
'To Example <"to@other.com"@example.com>',
),
# Addresses with long unicode display names.
(
'Tó Example very long' * 4 + ' <to@example.com>',
'utf-8',
'=?utf-8?q?T=C3=B3_Example_very_longT=C3=B3_Example_very_longT'
'=C3=B3_Example_?=\n'
' =?utf-8?q?very_longT=C3=B3_Example_very_long?= '
'<to@example.com>',
),
(
('Tó Example very long' * 4, 'to@example.com'),
'utf-8',
'=?utf-8?q?T=C3=B3_Example_very_longT=C3=B3_Example_very_longT'
'=C3=B3_Example_?=\n'
' =?utf-8?q?very_longT=C3=B3_Example_very_long?= '
'<to@example.com>',
),
# Address with long display name and unicode domain.
(
('To Example very long' * 4, 'to@exampl€.com'),
'utf-8',
'To Example very longTo Example very longTo Example very longT'
'o Example very\n'
' long <to@xn--exampl-nc1c.com>'
)
):
with self.subTest(email_address=email_address, encoding=encoding):
self.assertEqual(sanitize_address(email_address, encoding), expected_result)
def test_sanitize_address_invalid(self):
for email_address in (
# Invalid address with two @ signs.
'to@other.com@example.com',
# Invalid address without the quotes.
'to@other.com <to@example.com>',
# Other invalid addresses.
'@',
'to@',
'@example.com',
):
with self.subTest(email_address=email_address):
with self.assertRaises(ValueError):
sanitize_address(email_address, encoding='utf-8')
def test_sanitize_address_header_injection(self):
msg = 'Invalid address; address parts cannot contain newlines.'
tests = [
'Name\nInjection <to@example.com>',
('Name\nInjection', 'to@xample.com'),
'Name <to\ninjection@example.com>',
('Name', 'to\ninjection@example.com'),
]
for email_address in tests:
with self.subTest(email_address=email_address):
with self.assertRaisesMessage(ValueError, msg):
sanitize_address(email_address, encoding='utf-8')
def test_email_multi_alternatives_content_mimetype_none(self):
email_msg = EmailMultiAlternatives()
msg = 'Both content and mimetype must be provided.'
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach_alternative(None, 'text/html')
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach_alternative('<p>content</p>', None)
@requires_tz_support
class MailTimeZoneTests(SimpleTestCase):
@override_settings(EMAIL_USE_LOCALTIME=False, USE_TZ=True, TIME_ZONE='Africa/Algiers')
def test_date_header_utc(self):
"""
EMAIL_USE_LOCALTIME=False creates a datetime in UTC.
"""
email = EmailMessage('Subject', 'Body', 'bounce@example.com', ['to@example.com'])
self.assertTrue(email.message()['Date'].endswith('-0000'))
@override_settings(EMAIL_USE_LOCALTIME=True, USE_TZ=True, TIME_ZONE='Africa/Algiers')
def test_date_header_localtime(self):
"""
EMAIL_USE_LOCALTIME=True creates a datetime in the local time zone.
"""
email = EmailMessage('Subject', 'Body', 'bounce@example.com', ['to@example.com'])
self.assertTrue(email.message()['Date'].endswith('+0100')) # Africa/Algiers is UTC+1
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_7bit(self):
txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_latin(self):
txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method')
def flush_mailbox(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method')
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(
len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [m.as_string() for m in mailbox])
)
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "from@example.com")
self.assertEqual(message.get_all("to"), ["to@example.com"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(message.get_payload(decode=True).decode(), 'Je t\'aime très fort')
def test_send_long_lines(self):
"""
Email line length is limited to 998 chars by the RFC:
https://tools.ietf.org/html/rfc5322#section-2.1.1
Message body containing longer lines are converted to Quoted-Printable
to avoid having to insert newlines, which could be hairy to do properly.
"""
# Unencoded body length is < 998 (840) but > 998 when utf-8 encoded.
email = EmailMessage('Subject', 'В южных морях ' * 60, 'from@example.com', ['to@example.com'])
email.send()
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', 'quoted-printable'),
})
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', 'from@example.com', ['to@example.com'])
email2 = EmailMessage('Subject', 'Content2', 'from@example.com', ['to@example.com'])
# send_messages() may take a list or an iterator.
emails_lists = ([email1, email2], iter((email1, email2)))
for emails_list in emails_lists:
num_sent = mail.get_connection().send_messages(emails_list)
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), 'Content1')
self.assertEqual(messages[1].get_payload(), 'Content2')
self.flush_mailbox()
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <from@example.com>',
["to@example.com"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <from@example.com>")
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'])
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message.get_content_type(), 'text/plain')
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'], html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(MANAGERS=[('nobody', 'nobody@example.com')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', 'nobody@example.com')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', 'nobody+admin@example.com')],
MANAGERS=[('nobody', 'nobody+manager@example.com')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(gettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(gettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=[], MANAGERS=[])
def test_empty_admins(self):
"""
mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_wrong_admins_managers(self):
tests = (
'test@example.com',
('test@example.com',),
['test@example.com', 'other@example.com'],
('test@example.com', 'other@example.com'),
)
for setting, mail_func in (
('ADMINS', mail_admins),
('MANAGERS', mail_managers),
):
msg = 'The %s setting must be a list of 2-tuples.' % setting
for value in tests:
with self.subTest(setting=setting, value=value), self.settings(**{setting: value}):
with self.assertRaisesMessage(ValueError, msg):
mail_func('subject', 'content')
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com'),
('Cc', 'cc@example.com')})
self.assertIn('\nDate: ', message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.assertEqual(message.get('cc'), 'cc@xn--4ca9at.com')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_lazy_addresses(self):
"""
Email sending should support lazy email addresses (#24416).
"""
_ = gettext_lazy
self.assertTrue(send_mail('Subject', 'Content', _('tester'), [_('django')]))
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'django')
self.flush_mailbox()
m = EmailMessage(
'Subject', 'Content', _('tester'), [_('to1'), _('to2')],
cc=[_('cc1'), _('cc2')],
bcc=[_('bcc')],
reply_to=[_('reply')],
)
self.assertEqual(m.recipients(), ['to1', 'to2', 'cc1', 'cc2', 'bcc'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'to1, to2')
self.assertEqual(message.get('cc'), 'cc1, cc2')
self.assertEqual(message.get('Reply-To'), 'reply')
def test_close_connection(self):
"""
Connection can be closed (even when not explicitly opened)
"""
conn = mail.get_connection(username='', password='')
conn.close()
def test_use_as_contextmanager(self):
"""
The connection can be used as a contextmanager.
"""
opened = [False]
closed = [False]
conn = mail.get_connection(username='', password='')
def open():
opened[0] = True
conn.open = open
def close():
closed[0] = True
conn.close = close
with conn as same_conn:
self.assertTrue(opened[0])
self.assertIs(same_conn, conn)
self.assertFalse(closed[0])
self.assertTrue(closed[0])
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super().tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', 'from@example.com', ['to@example.com'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super().setUp()
self.tmp_dir = self.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super().tearDown()
def mkdtemp(self):
return tempfile.mkdtemp()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(b'\n' + (b'-' * 79) + b'\n')
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@example.com')
self.assertEqual(message.get('to'), 'to@example.com')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class FileBackendPathLibTests(FileBackendTests):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super().setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super().tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split('\n' + ('-' * 79) + '\n')
return [message_from_bytes(m.encode()) for m in messages if m]
def test_console_stream_kwarg(self):
"""
The console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
message = s.getvalue().split('\n' + ('-' * 79) + '\n')[0].encode()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com')})
self.assertIn(b'\nDate: ', message)
class FakeSMTPChannel(smtpd.SMTPChannel):
def collect_incoming_data(self, data):
try:
smtpd.SMTPChannel.collect_incoming_data(self, data)
except UnicodeDecodeError:
# Ignore decode error in SSL/TLS connection tests as the test only
# cares whether the connection attempt was made.
pass
def smtp_AUTH(self, arg):
if arg == 'CRAM-MD5':
# This is only the first part of the login process. But it's enough
# for our tests.
challenge = base64.b64encode(b'somerandomstring13579')
self.push('334 %s' % challenge.decode())
else:
self.push('502 Error: login "%s" not implemented' % arg)
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
channel_class = FakeSMTPChannel
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, decode_data=True, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
data = data.encode()
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
# According to the spec, mailfrom does not necessarily match the
# From header - this is the case where the local part isn't
# encoded, so try to correct that.
lp, domain = mailfrom.split('@', 1)
lp = Header(lp, 'utf-8').encode()
mailfrom = '@'.join([lp, domain])
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class FakeAUTHSMTPConnection(SMTP):
"""
A SMTP connection pretending support for the AUTH command. It does not, but
at least this can allow testing the first part of the AUTH process.
"""
def ehlo(self, name=''):
response = SMTP.ehlo(self, name=name)
self.esmtp_features.update({
'auth': 'CRAM-MD5 PLAIN LOGIN',
})
return response
class SMTPBackendTestsBase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.addClassCleanup(cls._settings_override.disable)
cls.server.start()
cls.addClassCleanup(cls.server.stop)
class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
def setUp(self):
super().setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super().tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_auth_attempted(self):
"""
Opening the backend with non empty username/password tries
to authenticate against the SMTP server.
"""
backend = smtp.EmailBackend(
username='not empty username', password='not empty password')
with self.assertRaisesMessage(SMTPException, 'SMTP AUTH extension not supported by server.'):
with backend:
pass
def test_server_open(self):
"""
open() returns whether it opened a connection.
"""
backend = smtp.EmailBackend(username='', password='')
self.assertIsNone(backend.connection)
opened = backend.open()
backend.close()
self.assertIs(opened, True)
def test_reopen_connection(self):
backend = smtp.EmailBackend()
# Simulate an already open connection.
backend.connection = mock.Mock(spec=object())
self.assertIs(backend.open(), False)
def test_server_login(self):
"""
Even if the Python SMTP server doesn't support authentication, the
login process starts and the appropriate exception is raised.
"""
class CustomEmailBackend(smtp.EmailBackend):
connection_class = FakeAUTHSMTPConnection
backend = CustomEmailBackend(username='username', password='password')
with self.assertRaises(SMTPAuthenticationError):
with backend:
pass
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
def test_ssl_tls_mutually_exclusive(self):
msg = (
'EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set '
'one of those settings to True.'
)
with self.assertRaisesMessage(ValueError, msg):
smtp.EmailBackend(use_ssl=True, use_tls=True)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, 'foo')
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_override_settings(self):
backend = smtp.EmailBackend(ssl_certfile='bar')
self.assertEqual(backend.ssl_certfile, 'bar')
def test_email_ssl_certfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_certfile)
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, 'foo')
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_override_settings(self):
backend = smtp.EmailBackend(ssl_keyfile='bar')
self.assertEqual(backend.ssl_keyfile, 'bar')
def test_email_ssl_keyfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_keyfile)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
with self.assertRaisesMessage(SMTPException, 'STARTTLS extension not supported by server.'):
with backend:
pass
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
with self.assertRaises(SSLError):
with backend:
pass
def test_connection_timeout_default(self):
"""The connection's timeout value is None by default."""
connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend')
self.assertIsNone(connection.timeout)
def test_connection_timeout_custom(self):
"""The timeout parameter can be customized."""
class MyEmailBackend(smtp.EmailBackend):
def __init__(self, *args, **kwargs):
kwargs.setdefault('timeout', 42)
super().__init__(*args, **kwargs)
myemailbackend = MyEmailBackend()
myemailbackend.open()
self.assertEqual(myemailbackend.timeout, 42)
self.assertEqual(myemailbackend.connection.timeout, 42)
myemailbackend.close()
@override_settings(EMAIL_TIMEOUT=10)
def test_email_timeout_override_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.timeout, 10)
def test_email_msg_uses_crlf(self):
"""#23063 -- RFC-compliant messages are sent over SMTP."""
send = SMTP.send
try:
smtp_messages = []
def mock_send(self, s):
smtp_messages.append(s)
return send(self, s)
SMTP.send = mock_send
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
mail.get_connection().send_messages([email])
# Find the actual message
msg = None
for i, m in enumerate(smtp_messages):
if m[:4] == 'data':
msg = smtp_messages[i + 1]
break
self.assertTrue(msg)
msg = msg.decode()
# The message only contains CRLF and not combinations of CRLF, LF, and CR.
msg = msg.replace('\r\n', '')
self.assertNotIn('\r', msg)
self.assertNotIn('\n', msg)
finally:
SMTP.send = send
def test_send_messages_after_open_failed(self):
"""
send_messages() shouldn't try to send messages if open() raises an
exception after initializing the connection.
"""
backend = smtp.EmailBackend()
# Simulate connection initialization success and a subsequent
# connection exception.
backend.connection = mock.Mock(spec=object())
backend.open = lambda: None
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
self.assertEqual(backend.send_messages([email]), 0)
def test_send_messages_empty_list(self):
backend = smtp.EmailBackend()
backend.connection = mock.Mock(spec=object())
self.assertEqual(backend.send_messages([]), 0)
def test_send_messages_zero_sent(self):
"""A message isn't sent if it doesn't have any recipients."""
backend = smtp.EmailBackend()
backend.connection = mock.Mock(spec=object())
email = EmailMessage('Subject', 'Content', 'from@example.com', to=[])
sent = backend.send_messages([email])
self.assertEqual(sent, 0)
class SMTPBackendStoppedServerTests(SMTPBackendTestsBase):
"""
These tests require a separate class, because the FakeSMTPServer is shut
down in setUpClass(), and it cannot be restarted ("RuntimeError: threads
can only be started once").
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.backend = smtp.EmailBackend(username='', password='')
cls.server.stop()
def test_server_stopped(self):
"""
Closing the backend while the SMTP server is stopped doesn't raise an
exception.
"""
self.backend.close()
def test_fail_silently_on_connection_error(self):
"""
A socket connection error is silenced with fail_silently=True.
"""
with self.assertRaises(ConnectionError):
self.backend.open()
self.backend.fail_silently = True
self.backend.open()
|
{
"content_hash": "4b421837196442c25a7f9ddfea30219b",
"timestamp": "",
"source": "github",
"line_count": 1723,
"max_line_length": 119,
"avg_line_length": 41.75914103308183,
"alnum_prop": 0.6019374296396158,
"repo_name": "atul-bhouraskar/django",
"id": "30f8252e0a88a95de05601899f1f03ec60ffd2db",
"size": "72055",
"binary": false,
"copies": "4",
"ref": "refs/heads/ticket_23424",
"path": "tests/mail/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52098"
},
{
"name": "HTML",
"bytes": "174031"
},
{
"name": "JavaScript",
"bytes": "249623"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11309010"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
from itertools import chain
def drift_subtractor(resultstable, exposuretime = 0.5):
resultstable.rename(columns={'particle':'trajectory'}, inplace=True)
resultstable.rename(columns={'frame':'slice'}, inplace=True)
resultstable = resultstable.sort_values(by=['trajectory', 'slice'])
resultstraj = resultstable.groupby(['trajectory'])
resultstable['x2'] = resultstraj['x'].transform(lambda bzz: bzz - bzz.mean())
resultstable['y2'] = resultstraj['y'].transform(lambda bzz: bzz - bzz.mean())
stuckslices = resultstable[resultstable['slice'] >= resultstable['slice'].max()]
stucktraj = resultstable[resultstable['trajectory'].isin(stuckslices['trajectory'])]
stuckgroupslice = stucktraj.groupby(['slice'])
driftx = stuckgroupslice['x2'].aggregate(np.median)
drifty = stuckgroupslice['y2'].aggregate(np.median)
drift = DataFrame({
'slice' : Series(range(0,1 + resultstable['slice'].idxmax())),
'rawx' : driftx,
'rawy' : drifty
})
drift['xdrift'] = drift['rawx'].rolling(window=int(5*(1.0/exposuretime)), center=True, min_periods=1).mean()
drift['ydrift'] = drift['rawy'].rolling(window=int(5*(1.0/exposuretime)), center = True, min_periods=1).mean()
drift.to_csv("drift.csv")
mergedresults = pd.merge(left=resultstable,right=drift, how='left', left_on='slice', right_on='slice')
mergedresults = mergedresults.sort_values(by=['trajectory', 'slice'])
mergedresults['x3'] = mergedresults['x2'] - mergedresults['xdrift']
mergedresults['y3'] = mergedresults['y2'] - mergedresults['ydrift']
results = mergedresults.drop(['x2','y2', 'rawx', 'rawy','xdrift', 'ydrift'], axis=1)
fig, ax = plt.subplots()
ax.plot(drift.slice, drift.xdrift)
return results
""" Finds the mean position at each point in time for trajectories which endure from the start to the finish, and then subtracts this drift from each trajectory.
"""
def unit_converter(resultstable, exposuretime, ntconversion, micronpixel):
resultstable['time'] = resultstable['slice'] * exposuretime
resultstable['nt'] = resultstable['x3']*ntconversion
resultstable['transverse'] = resultstable['y3'] * micronpixel
return resultstable;
""" Converts from the trackpy units to units appropriate to DNA replication experiments.
"""
def spurious_removal(resultstable):
resultstraj = resultstable.groupby(['trajectory'])
startset = resultstraj['time'].aggregate(np.min) #Find the start time of each trajectory
startset = startset[startset <= 150] #Find only the trajectories which begin prior to 150 s.
resultstable = resultstable.loc[resultstable['trajectory'].isin(startset.index)] #Keep only the trajectories of interest.
resultstraj = resultstable.groupby(['trajectory'])
endset = resultstraj['time'].aggregate(np.max) #Find the end time of each trajectory
endset = endset[endset >= 150] #Find only the trajectories which continue beyond the 150 s timepoint.
resultstable = resultstable.loc[resultstable['trajectory'].isin(endset.index)] #Keep only the trajectories of interest.
return resultstable;
""" Removes trajectories which start after a certain timepoint, as well as those which do not endure beyond a certain timepoint.
"""
def baseline(resultstable, exposuretime):
resultstraj = resultstable.groupby(['trajectory'])
nucleotides = resultstraj['nt'].transform(lambda bzz: bzz - bzz.head(150*int(1.0/exposuretime)).median())
resultstable['nucleotides'] = nucleotides
del resultstable['mass'], resultstable['nt'], resultstable['x3'], resultstable['y3']
return resultstable;
""" Calculates the median position for the first 150 seconds of the trajectory, and then subtracts this from the whole trajectory.
"""
def trajectory_renumber(resultstable):
test = []
trajindex = np.unique(resultstable.trajectory, return_index=True, return_counts=True)
for i in range(0,len(trajindex[2])):
test.append([i]*trajindex[2][i])
resultstable['trajectory'] = list(chain.from_iterable(test))
del test
resultstable.fillna(0)
resultstable.slice = resultstable.slice.astype(int)
resultstable.trajectory = resultstable.trajectory.astype(int)
resultstable = resultstable.reset_index(drop=True)
return resultstable;
def flip_coordinates(resultstable, direction = 'x'):
maxval = resultstable[direction].max()
resultstable[direction] = -1*resultstable[direction] + maxval
return resultstable
|
{
"content_hash": "b5e9584db4176081ef71c2c651579c21",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 161,
"avg_line_length": 50.60674157303371,
"alnum_prop": 0.7320159857904085,
"repo_name": "flynn949/beadpy",
"id": "ca0db7d616ea28ef8ec8a1b0355e3d7132d5b8d5",
"size": "4504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beadpy/resultsprocessor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2163608"
},
{
"name": "Python",
"bytes": "63509"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from itertools import compress
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE
class Embedder:
def __init__(self,
filenames=['clean_en_US.blogs.txt',
'clean_en_US.news.txt',
'clean_en_US.twitter.txt'],
vocabulary_size=10000,
data_index=0,
num_skips=1,
skip_window=6,
batch_size=1024,
embedding_size=28,
valid_size=8,
valid_window=100,
num_sampled=4096,
num_steps=3001
):
self.filenames = filenames
self.word_list = []
self.vocabulary_size = vocabulary_size
self.dictionary = dict()
self.data = list()
self.count = [['UNK', -1]]
self.reverse_dictionary = None
self.data_index = data_index
self.num_skips = num_skips # How many times to reuse an input to generate a label.
self.skip_window = skip_window # How many words to consider left and right.
self.batch_size = batch_size
self.embedding_size = embedding_size # Dimension of the embedding vector.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
self.valid_size = valid_size # Random set of words to evaluate similarity on.
self.valid_window = valid_window # Only pick dev samples in the head of the distribution.
self.valid_examples = np.array(random.sample(range(valid_window), valid_size))
self.num_sampled = num_sampled # Number of negative examples to sample.
assert self.batch_size % self.num_skips == 0
assert self.num_skips <= 2 * self.skip_window
self.graph = tf.Graph()
self.num_steps = num_steps
def build_embedding(self):
self.read_data()
self.build_dataset()
##self.test_data()
return self.train_data()
def read_data(self):
"""Load the data from each line and put it in a list."""
print('Generating list for embedding.')
for temp_file_name in self.filenames:
with open(temp_file_name, 'r', encoding="utf8") as temp_file:
for line in temp_file:
temp_line = line.strip().split()
self.word_list.extend(temp_line)
print('List is %d words long.' % len(self.word_list))
def build_dataset(self):
self.count.extend(collections.Counter(self.word_list).most_common(self.vocabulary_size - 1))
for word, _ in self.count:
self.dictionary[word] = len(self.dictionary)
unk_count = 0
for word in self.word_list:
if word in self.dictionary:
index = self.dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
self.data.append(index)
self.count[0][1] = unk_count
self.reverse_dictionary = dict(zip(self.dictionary.values(), self.dictionary.keys()))
print('Most common words (+UNK)', self.count[:5])
print('Sample data', self.data[:10])
self.word_list = None
##return data, count, dictionary, reverse_dictionary
def generate_batch(self):
##global data_index
batch = np.ndarray(shape=(self.batch_size), dtype=np.int32)
labels = np.ndarray(shape=(self.batch_size, 1), dtype=np.int32)
span = 2 * self.skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(self.data[self.data_index])
self.data_index = (self.data_index + 1) % len(self.data)
for i in range(self.batch_size // self.num_skips):
target = self.skip_window # target label at the center of the buffer
targets_to_avoid = [self.skip_window]
for j in range(self.num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * self.num_skips + j] = buffer[self.skip_window]
labels[i * self.num_skips + j, 0] = buffer[target]
buffer.append(self.data[self.data_index])
self.data_index = (self.data_index + 1) % len(self.data)
return batch, labels
def test_data(self):
print('data:', [self.reverse_dictionary[di] for di in self.data[:8]])
for num_skips, skip_window in [(2, 1), (4, 2)]:
data_index = 0
batch, labels = self.generate_batch()
print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
print(' batch:', [self.reverse_dictionary[bi] for bi in batch])
print(' labels:', [self.reverse_dictionary[li] for li in labels.reshape(self.batch_size)])
"""==============================PROGRESS=============================="""
def create_graph(self):
with self.graph.as_default(), tf.device('/cpu:0'):
# Input data.
self.train_dataset = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)
self.train_labels = tf.placeholder(shape=[self.batch_size, 1], dtype=tf.int32)
self.valid_dataset = tf.constant(self.valid_examples, dtype=tf.int32)
# Variables.
self.embeddings = tf.Variable(
tf.random_uniform([self.vocabulary_size, self.embedding_size], -1.0, 1.0))
self.softmax_weights = tf.Variable(
tf.random_uniform([self.vocabulary_size, self.embedding_size], -1.0, 1.0))
self.softmax_biases = tf.Variable(tf.zeros([self.vocabulary_size]))
# Model.
# Look up embeddings for inputs.
self.embed = tf.nn.embedding_lookup(self.embeddings, self.train_dataset)
##print(tf.DType.is_floating(self.embed))
##self.embed = tf.nn.embedding_lookup(self.train_dataset, self.embeddings)
# Compute the softmax loss, using a sample of the negative labels each time.
##self.loss = tf.reduce_mean(
## tf.nn.sampled_softmax_loss(self.softmax_weights,
## self.softmax_biases,
## self.train_labels,
## self.embed,
## self.num_sampled,
##
## self.vocabulary_size))
self.loss = tf.reduce_mean(tf.nn.nce_loss(self.softmax_weights,
self.softmax_biases,
self.train_labels,
self.embed,
self.num_sampled,
self.vocabulary_size))
# Optimizer.
# Note: The optimizer will optimize the softmax_weights AND the embeddings.
# This is because the embeddings are defined as a variable quantity and the
# optimizer's `minimize` method will by default modify all variable quantities
# that contribute to the tensor it is passed.
# See docs on `tf.train.Optimizer.minimize()` for more details.
self.optimizer = tf.train.AdagradOptimizer(1.0).minimize(self.loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.embeddings), 1, keepdims=True))
self.normalized_embeddings = self.embeddings / self.norm
self.valid_embeddings = tf.nn.embedding_lookup(
self.normalized_embeddings, self.valid_dataset)
self.similarity = tf.matmul(self.valid_embeddings,
tf.transpose(self.normalized_embeddings))
def run_graph(self):
with self.graph.as_default(), tf.device('/cpu:0'):
with tf.Session(graph=self.graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
average_loss = 0
for step in range(self.num_steps):
batch_data, batch_labels = self.generate_batch()
feed_dict = {self.train_dataset : batch_data, self.train_labels : batch_labels}
_, l = session.run([self.optimizer, self.loss], feed_dict=feed_dict)
average_loss += l
if step % 1000 == 0:
if step > 0:
average_loss = average_loss / 1000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 2000 == 0:
sim = self.similarity.eval()
for i in range(self.valid_size):
valid_word = self.reverse_dictionary[self.valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = self.reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
return self.normalized_embeddings.eval()
##final_embeddings = self.normalized_embeddings.eval()
def train_data(self):
self.create_graph()
return self.run_graph()
if __name__ == "__main__":
os.chdir('..')
os.chdir('Datasets')
temp = Embedder()
print(temp.build_embedding())
|
{
"content_hash": "92b7545c3cd905bf1031dce59eb96427",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 105,
"avg_line_length": 48.32420091324201,
"alnum_prop": 0.5412453935557026,
"repo_name": "SirRujak/OpenSpeech-Prediciton",
"id": "a35c53fddba52430cd4ff34db45af2905b4baf0c",
"size": "10735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/embedder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78283"
}
],
"symlink_target": ""
}
|
from webob import exc
from neutron.api.v2 import attributes as attr
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.db import securitygroups_db
from neutron.extensions import portsecurity as psec
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_extension_security_group
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_portsecurity.'
'PortSecurityTestPlugin')
class PortSecurityTestCase(
test_extension_security_group.SecurityGroupsTestCase,
test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None):
ext_mgr = (
test_extension_security_group.SecurityGroupTestExtensionManager())
super(PortSecurityTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
# Check if a plugin supports security groups
plugin_obj = manager.NeutronManager.get_plugin()
self._skip_security_group = ('security-group' not in
plugin_obj.supported_extension_aliases)
def tearDown(self):
super(PortSecurityTestCase, self).tearDown()
self._skip_security_group = None
class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin,
portsecurity_db.PortSecurityDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups and port security.
"""
supported_extension_aliases = ["security-group", "port-security"]
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with context.session.begin(subtransactions=True):
neutron_db = super(PortSecurityTestPlugin, self).create_network(
context, network)
neutron_db.update(network['network'])
self._process_network_port_security_create(
context, network['network'], neutron_db)
return neutron_db
def update_network(self, context, id, network):
with context.session.begin(subtransactions=True):
neutron_db = super(PortSecurityTestPlugin, self).update_network(
context, id, network)
if psec.PORTSECURITY in network['network']:
self._process_network_port_security_update(
context, network['network'], neutron_db)
return neutron_db
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
net = super(PortSecurityTestPlugin, self).get_network(
context, id)
return self._fields(net, fields)
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
p[ext_sg.SECURITYGROUPS] = self._get_security_groups_on_port(
context, port)
neutron_db = super(PortSecurityTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, p)
p[psec.PORTSECURITY] = port_security
self._process_port_port_security_create(context, p, neutron_db)
if (attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and
not (port_security and has_ip)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Port requires ip and port_security enabled for security group
if has_ip and port_security:
self._ensure_default_security_group_on_port(context, port)
if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]):
self._process_port_create_security_group(
context, p, p[ext_sg.SECURITYGROUPS])
return port['port']
def update_port(self, context, id, port):
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
with context.session.begin(subtransactions=True):
ret_port = super(PortSecurityTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
# populate port_security setting
if psec.PORTSECURITY not in ret_port:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if (has_security_groups and (not ret_port[psec.PORTSECURITY]
or not has_ip)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Port security/IP was updated off. Need to check that no security
# groups are on port.
if ret_port[psec.PORTSECURITY] is not True or not has_ip:
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# get security groups on port
filters = {'port_id': [id]}
security_groups = (super(PortSecurityTestPlugin, self).
_get_port_security_group_bindings(
context, filters))
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
# process port create sec groups needs port id
port['id'] = id
self._process_port_create_security_group(context,
ret_port, sgids)
if psec.PORTSECURITY in port['port']:
self._process_port_port_security_update(
context, port['port'], ret_port)
return ret_port
class PortSecurityDBTestCase(PortSecurityTestCase):
def setUp(self, plugin=None):
plugin = plugin or DB_PLUGIN_KLASS
super(PortSecurityDBTestCase, self).setUp(plugin)
class TestPortSecurity(PortSecurityDBTestCase):
def test_create_network_with_portsecurity_mac(self):
res = self._create_network('json', 'net1', True)
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], True)
def test_create_network_with_portsecurity_false(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], False)
def test_updating_network_port_security(self):
res = self._create_network('json', 'net1', True,
port_security_enabled='True')
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], True)
update_net = {'network': {psec.PORTSECURITY: False}}
req = self.new_update_request('networks', update_net,
net['network']['id'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network'][psec.PORTSECURITY], False)
req = self.new_show_request('networks', net['network']['id'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network'][psec.PORTSECURITY], False)
def test_create_port_default_true(self):
with self.network() as net:
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_passing_true(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=True)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_on_port_security_false_network(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], False)
self._delete('ports', port['port']['id'])
def test_create_port_security_overrides_network_value(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_fails_with_secgroup_and_port_security_false(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
security_group = self.deserialize(
'json',
self._create_security_group(self.fmt, 'asdf', 'asdf'))
security_group_id = security_group['security_group']['id']
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
security_groups=[security_group_id],
port_security_enabled=False)
self.assertEqual(res.status_int, 400)
def test_create_port_with_default_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['port']['id'])
def test_create_port_with_security_group_and_net_sec_false(self):
# This tests that port_security_enabled is true when creating
# a port on a network that is marked as port_security_enabled=False
# that has a subnet and securiy_groups are passed it.
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
self._create_subnet('json', net['network']['id'], '10.0.0.0/24')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf'))
security_group_id = security_group['security_group']['id']
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',),
security_groups=[security_group_id])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port']['security_groups'], [security_group_id])
self._delete('ports', port['port']['id'])
def test_update_port_security_off_with_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
update_port = {'port': {psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
self.deserialize('json', req.get_response(self.api))
self._delete('ports', port['port']['id'])
def test_update_port_remove_port_security_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port'][psec.PORTSECURITY], False)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 0)
self._delete('ports', port['port']['id'])
def test_update_port_remove_port_security_security_group_read(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
self.deserialize('json', req.get_response(self.api))
sg_id = port['port'][ext_sg.SECURITYGROUPS]
update_port = {'port': {ext_sg.SECURITYGROUPS: [sg_id[0]],
psec.PORTSECURITY: True}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['port']['id'])
def test_create_port_security_off_shared_network(self):
with self.network(shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=False,
tenant_id='not_network_owner',
set_context=True)
self.deserialize('json', res)
self.assertEqual(res.status_int, 403)
def test_update_port_security_off_shared_network(self):
with self.network(shared=True, do_delete=False) as net:
with self.subnet(network=net, do_delete=False):
res = self._create_port('json', net['network']['id'],
tenant_id='not_network_owner',
set_context=True)
port = self.deserialize('json', res)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
req.environ['neutron.context'] = context.Context(
'', 'not_network_owner')
res = req.get_response(self.api)
self.assertEqual(res.status_int, exc.HTTPForbidden.code)
|
{
"content_hash": "fadf3c263f3db711ed013fd43ed9968d",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 79,
"avg_line_length": 49.9946380697051,
"alnum_prop": 0.5582368082368082,
"repo_name": "shakamunyi/neutron-vrrp",
"id": "8845905781ffa9477bd5980fe04fb7a0e013b4e3",
"size": "19239",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_extension_portsecurity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "10032378"
},
{
"name": "Shell",
"bytes": "9885"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
from os import path
from ansible import __version__ as ansible_ver
from . import __version__ as prudentia_ver
# Setting Ansible config file environment variable as first thing
cwd = path.dirname(path.realpath(__file__))
os.environ['ANSIBLE_CONFIG'] = path.join(cwd, 'ansible.cfg')
os.environ['ANSIBLE_ROLES_PATH'] = path.join(cwd, 'roles') + ':/etc/ansible/roles'
os.environ['ANSIBLE_LOOKUP_PLUGINS'] = path.join(cwd, 'plugins', 'lookup') + \
':~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup'
os.environ['ANSIBLE_ACTION_PLUGINS'] = path.join(cwd, 'plugins', 'action') + \
':~/.ansible/plugins/action:/usr/share/ansible/plugins/action'
os.environ['ANSIBLE_LIBRARY'] = path.join(cwd, 'modules')
from prudentia.digital_ocean import DigitalOceanCli
from prudentia.local import LocalCli
from prudentia.ssh import SshCli
from prudentia.vagrant import VagrantCli
Providers = {
'local': LocalCli,
'ssh': SshCli,
'vagrant': VagrantCli,
'digital-ocean': DigitalOceanCli
}
def parse(args=None):
parser = argparse.ArgumentParser(
prog='Prudentia',
description='A useful Continuous Deployment toolkit.'
)
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + prudentia_ver + ', Ansible ' + ansible_ver)
parser.add_argument('provider', choices=Providers.keys(),
help='use one of the available providers')
parser.add_argument('commands', nargs='*', default='',
help='optional quoted list of commands to run with the chosen provider')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args(args)
def run(args):
chosen_cli = Providers[args.provider]()
if args.commands:
for c in args.commands:
print ("Executing: '{0}'\n".format(c))
chosen_cli.onecmd(c)
else:
chosen_cli.cmdloop()
return chosen_cli.provider.provisioned
|
{
"content_hash": "c8141452a0843c48bf1b04b59c509893",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 101,
"avg_line_length": 36.719298245614034,
"alnum_prop": 0.6392737697085523,
"repo_name": "StarterSquad/prudentia",
"id": "d74b76ad57d48d1af7a80ea329a9edbf60c2b541",
"size": "2093",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "prudentia/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87577"
},
{
"name": "Shell",
"bytes": "4010"
}
],
"symlink_target": ""
}
|
def add_native_methods(clazz):
def init0____():
raise NotImplementedError()
def loadDNSconfig0____():
raise NotImplementedError()
def notifyAddrChange0____():
raise NotImplementedError()
clazz.init0____ = staticmethod(init0____)
clazz.loadDNSconfig0____ = staticmethod(loadDNSconfig0____)
clazz.notifyAddrChange0____ = staticmethod(notifyAddrChange0____)
|
{
"content_hash": "d5403858baea19f326c7db87a787b69d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 69,
"avg_line_length": 29.071428571428573,
"alnum_prop": 0.6486486486486487,
"repo_name": "laffra/pava",
"id": "448dc53ab5db7e19fd599e7251f88dd7a1cab007",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pava/implementation/natives/sun/net/dns/ResolverConfigurationImpl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "144"
},
{
"name": "Python",
"bytes": "369288"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
# Adding model 'ProjectOwnership'
db.create_table('sentry_projectownership', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.Project'], unique=True)),
('raw', self.gf('django.db.models.fields.TextField')(null=True)),
('schema', self.gf('jsonfield.fields.JSONField')(null=True)),
('fallthrough', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('sentry', ['ProjectOwnership'])
def backwards(self, orm):
# Deleting model 'ProjectOwnership'
db.delete_table('sentry_projectownership')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'39472cc5c3d647fbb6dd08aa32aaeb9a40bcc5a2ebfb41a283117aecc9d1c620'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'096dfbb1677947c282bdfe92a8006c72376586ae292f4c998e12e3c106b3d067'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Champion Ibex'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'f42f7069562d4c6eb2ff746c0a378ecf'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 2, 14, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 3, 16, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'64ac10b88d7b44638d93b9fab084575c1d5a86f372db47ffa9e8f28aadf4cab2'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'c4f59e7ea28842e3960603021085d6c8c366a61346b346ce9b0abc6dcf939fe4'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 2, 21, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deletedorganization': {
'Meta': {'object_name': 'DeletedOrganization'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'object_name': 'DeletedProject'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'object_name': 'DeletedTeam'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "[('group_id', 'environment_id')]", 'object_name': 'GroupEnvironment', 'index_together': "[('environment_id', 'first_release_id')]"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'first_release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'object_name': 'GroupShare'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'f547ad4d46804f1b8ad4035dc1e5110b'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'),)", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'organization'),)", 'object_name': 'IdentityProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'object_name': 'ProjectOwnership'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('jsonfield.fields.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'dsym_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 3, 16, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'7d0db12e5adb428486227b078b8384ef'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'sentry.servicehook': {
'Meta': {'object_name': 'ServiceHook'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'9e1ce9c128b4412980233dd974012fba5c506cb89b624479b7db6a2d6ab8d756'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'MerizKo19p8lMfEYadeiCNre7YsFfluv'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
|
{
"content_hash": "f89e3b450bec6526a292314f0f0206a2",
"timestamp": "",
"source": "github",
"line_count": 1085,
"max_line_length": 233,
"avg_line_length": 91.64792626728111,
"alnum_prop": 0.5796476196222772,
"repo_name": "looker/sentry",
"id": "75170930d6def80aa988ed0581e653febe3bb95d",
"size": "99462",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0392_auto__add_projectownership.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import pytest
from securicad.model import Model, Object, View
from securicad.model.exceptions import DuplicateViewException, MissingViewException
def test_create(model: Model, view: View):
assert model.view(1) == view
def test_double_delete(view: View):
view.delete()
with pytest.raises(MissingViewException):
view.delete()
def test_invalid_get(model: Model):
with pytest.raises(MissingViewException):
model.view(1)
def test_duplicate(view: View, model: Model):
with pytest.raises(DuplicateViewException):
model.create_view("default", id=view.id)
def test_delete(view: View, model: Model):
view.delete()
with pytest.raises(MissingViewException):
model.view(view.id)
def test_nested_object(view: View, objects: list[Object]):
view.create_group("g1", "icon")
group = view.create_group("g2", "icon")
obj = group.add_object(objects[0])
assert view.object(objects[0]) == obj
def test_nested_group(view: View):
view.create_group("g1", "icon")
group = view.create_group("g2", "icon")
g = group.create_group("g3", "icon")
assert view.group(g.id) == g
def test_nested_object_delete(view: View, objects: list[Object]):
view.create_group("g1", "icon")
group = view.create_group("g2", "icon")
group.add_object(objects[0])
view.object(objects[0]).delete()
assert not view.objects()
def test_nested_group_delete(view: View):
view.create_group("g1", "icon")
group = view.create_group("g2", "icon")
g = group.create_group("g3", "icon")
view.group(g.id).delete()
groups = view.groups()
assert g not in groups
assert len(groups) == 2
def test_filter(model: Model):
view1_name1 = model.create_view("name1")
view2_name1 = model.create_view("name1")
view_name2 = model.create_view("name2")
name1 = model.views(name="name1")
assert len(name1) == 2
assert view1_name1 in name1
assert view2_name1 in name1
assert [view_name2] == model.views(name="name2")
|
{
"content_hash": "d034108ffbab005b5ed2ae16d239e5d4",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 83,
"avg_line_length": 27.11842105263158,
"alnum_prop": 0.6681222707423581,
"repo_name": "foreseeti/securicad-model-sdk",
"id": "7a9ad59c5f9b14cf67b8f819f87a2a28033d083b",
"size": "2667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model/test_view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "268173"
},
{
"name": "Shell",
"bytes": "5757"
}
],
"symlink_target": ""
}
|
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32c3
@pytest.mark.generic
@pytest.mark.parametrize('config', [
'default',
'release',
], indirect=True)
def test_spiffs_generic(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('')
dut.expect_exact('Enter test for running.')
dut.write('*')
dut.expect_unity_test_output(timeout=120)
@pytest.mark.esp32s3
@pytest.mark.quad_psram
@pytest.mark.parametrize('config', [
'psram',
], indirect=True)
def test_spiffs_psram(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('')
dut.expect_exact('Enter test for running.')
dut.write('*')
dut.expect_unity_test_output(timeout=120)
|
{
"content_hash": "b4bba8279834b0a51ff08533b86b2fdd",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 60,
"avg_line_length": 25.9,
"alnum_prop": 0.6833976833976834,
"repo_name": "espressif/esp-idf",
"id": "33e5253937a8a377e1ff49534f0f37939f7e596f",
"size": "880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/spiffs/test_apps/pytest_spiffs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "388440"
},
{
"name": "Batchfile",
"bytes": "5451"
},
{
"name": "C",
"bytes": "69102322"
},
{
"name": "C++",
"bytes": "992772"
},
{
"name": "CMake",
"bytes": "539972"
},
{
"name": "Dockerfile",
"bytes": "3290"
},
{
"name": "Makefile",
"bytes": "23747"
},
{
"name": "Nim",
"bytes": "1005"
},
{
"name": "PowerShell",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "2158180"
},
{
"name": "Roff",
"bytes": "101"
},
{
"name": "Shell",
"bytes": "126143"
}
],
"symlink_target": ""
}
|
from flask import Flask, jsonify, render_template, request
from flask.ext.mysql import MySQL
import random
# config file
import config
app = Flask(__name__)
# MySQL configurations
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = config.DB_USER
app.config['MYSQL_DATABASE_PASSWORD'] = config.DB_PASS
app.config['MYSQL_DATABASE_DB'] = config.DB_NAME
app.config['MYSQL_DATABASE_HOST'] = config.DB_HOST
mysql.init_app(app)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/api')
def hello_api():
result = { "message":"welcome to Flask Api Server!" }
return jsonify(result)
# api for rapberry pi
@app.route('/api/chair_log',methods=['POST'])
def chair_log():
value = int(request.form['value'])
query = 'insert into chair_log( action, inserted_at ) values( %d , NOW() )' % ( value )
execute(query)
return jsonify({"message":"ok","action":"stand_up"})
def execute(query):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(query)
conn.commit()
return cursor
# api to browser
@app.route('/api/is_music_play')
def is_music_play():
# total of recent 12 of stand
recent_stand_time = execute('select count(*) from (select * from chair_log order by inserted_at desc limit 12) as L where L.action = 0').fetchone()[0] ;
# if you stand up at least once , return 0
res = 0 if recent_stand_time > 0 else 1
return jsonify({"music_play":res})
@app.route('/api/continous_sit_time')
def continous_sit_time():
row = execute('select count(*) from chair_log as C , (select MAX(inserted_at) as M from chair_log where action = 0) as MAX where MAX.M < C.inserted_at;') ;
res = row.fetchone()[0] * 5
return jsonify({"continuous_sit_time":res})
@app.route('/api/daytotal_sit_time')
def daytotal_sit_time():
row = execute('select count(*) from chair_log where action = 1 and DATE_SUB(now(),INTERVAL 1 DAY) < inserted_at;') ;
res = row.fetchone()[0] * 5
return jsonify({"daytotal_sit_time":res})
if __name__ =='__main__':
execute('create table if not exists chair_log(id int primary key auto_increment, action tinyint(1), inserted_at datetime)')
app.run(host='127.0.0.1',debug=True,port=config.SERVER_PORT)
#app.run(host='0.0.0.0',debug=True,port=config.SERVER_PORT)
|
{
"content_hash": "817672fcd7368706bd2783c01cfe19b8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 157,
"avg_line_length": 31.22222222222222,
"alnum_prop": 0.6868327402135231,
"repo_name": "acro5piano/flask_api_server",
"id": "850f3b369d19df1f85a77803c8058eb05e2fdd36",
"size": "2271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api-server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "247"
},
{
"name": "Python",
"bytes": "2415"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser, _SubParsersAction, _MutuallyExclusiveGroup
from gooey.gui.lang.i18n import _
class GooeySubParser(_SubParsersAction):
def __init__(self, *args, **kwargs):
super(GooeySubParser, self).__init__(*args, **kwargs)
class GooeyMutuallyExclusiveGroup(_MutuallyExclusiveGroup):
def __init__(self, parser, widgets, *args, **kwargs):
self.parser = parser
self.widgets = widgets
super(GooeyMutuallyExclusiveGroup, self).__init__(self.parser, *args, **kwargs)
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
metavar = kwargs.pop('metavar', None)
super(GooeyMutuallyExclusiveGroup, self).add_argument(*args, **kwargs)
self.parser._actions[-1].metavar = metavar
self.widgets[self.parser._actions[-1].dest] = widget
class GooeyParser(object):
def __init__(self, **kwargs):
self.__dict__['parser'] = ArgumentParser(**kwargs)
self.widgets = {}
@property
def _mutually_exclusive_groups(self):
return self.parser._mutually_exclusive_groups
@property
def _actions(self):
return self.parser._actions
@property
def description(self):
return self.parser.description
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
metavar = kwargs.pop('metavar', None)
self.parser.add_argument(*args, **kwargs)
self.parser._actions[-1].metavar = metavar
self.widgets[self.parser._actions[-1].dest] = widget
# def add_mutually_exclusive_group(self, **kwargs):
# return self.parser.add_mutually_exclusive_group(**kwargs)
def add_mutually_exclusive_group(self, **kwargs):
group = GooeyMutuallyExclusiveGroup(self.parser, self.widgets, **kwargs)
self.parser._mutually_exclusive_groups.append(group)
return group
def add_argument_group(self, *args, **kwargs):
return GooeyParserGroup(self, *args, **kwargs)
def parse_args(self, args=None, namespace=None):
return self.parser.parse_args(args, namespace)
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def __getattr__(self, item):
return getattr(self.parser, item)
def __setattr__(self, key, value):
return setattr(self.parser, key, value)
class GooeyParserGroup(object):
def __init__(self, parent, title=None, description=None):
self.parent = parent
self.group = parent.parser.add_argument_group(title, description)
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
metavar = kwargs.pop('metavar', None)
self.group.add_argument(*args, **kwargs)
self.group._actions[-1].metavar = metavar
self.parent.widgets[self.parent.parser._actions[-1].dest] = widget
|
{
"content_hash": "d9079fb89432e53f519d9d980a72ff86",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 83,
"avg_line_length": 36.73148148148148,
"alnum_prop": 0.6720443660196622,
"repo_name": "jschultz/Gooey",
"id": "3e88c94a9df378e4dfaa035b900dd154f5a6e51e",
"size": "3967",
"binary": false,
"copies": "1",
"ref": "refs/heads/latest",
"path": "gooey/python_bindings/gooey_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102598"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.