text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from quadpwm_partial import get_ip_name
from quadpwm_partial import QUADPWM
|
{
"content_hash": "ee295678fb2981022ef589fb2f39116b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 39,
"avg_line_length": 37.5,
"alnum_prop": 0.8533333333333334,
"repo_name": "hakehuang/pycpld",
"id": "868133232aeff8c4494df0bda70057aee628923e",
"size": "75",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ips/ip/quadpwm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3006"
},
{
"name": "Mathematica",
"bytes": "806"
},
{
"name": "Python",
"bytes": "50795"
},
{
"name": "Scheme",
"bytes": "40"
},
{
"name": "Tcl",
"bytes": "25993"
},
{
"name": "Verilog",
"bytes": "137097"
}
],
"symlink_target": ""
}
|
"""The Bond integration."""
from asyncio import TimeoutError as AsyncIOTimeoutError
from http import HTTPStatus
import logging
from typing import Any
from aiohttp import ClientError, ClientResponseError, ClientTimeout
from bond_api import Bond, BPUPSubscriptions, start_bpup
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_HOST,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import SLOW_UPDATE_WARNING
from .const import BPUP_SUBS, BRIDGE_MAKE, DOMAIN, HUB
from .utils import BondHub
PLATFORMS = [
Platform.BUTTON,
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SWITCH,
]
_API_TIMEOUT = SLOW_UPDATE_WARNING - 1
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Bond from a config entry."""
host = entry.data[CONF_HOST]
token = entry.data[CONF_ACCESS_TOKEN]
config_entry_id = entry.entry_id
bond = Bond(
host=host,
token=token,
timeout=ClientTimeout(total=_API_TIMEOUT),
session=async_get_clientsession(hass),
)
hub = BondHub(bond, host)
try:
await hub.setup()
except ClientResponseError as ex:
if ex.status == HTTPStatus.UNAUTHORIZED:
_LOGGER.error("Bond token no longer valid: %s", ex)
return False
raise ConfigEntryNotReady from ex
except (ClientError, AsyncIOTimeoutError, OSError) as error:
raise ConfigEntryNotReady from error
bpup_subs = BPUPSubscriptions()
stop_bpup = await start_bpup(host, bpup_subs)
@callback
def _async_stop_event(*_: Any) -> None:
stop_bpup()
entry.async_on_unload(_async_stop_event)
entry.async_on_unload(
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, _async_stop_event)
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
HUB: hub,
BPUP_SUBS: bpup_subs,
}
if not entry.unique_id:
hass.config_entries.async_update_entry(entry, unique_id=hub.bond_id)
assert hub.bond_id is not None
hub_name = hub.name or hub.bond_id
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry_id,
identifiers={(DOMAIN, hub.bond_id)},
manufacturer=BRIDGE_MAKE,
name=hub_name,
model=hub.target,
sw_version=hub.fw_ver,
hw_version=hub.mcu_ver,
suggested_area=hub.location,
configuration_url=f"http://{host}",
)
_async_remove_old_device_identifiers(config_entry_id, device_registry, hub)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
@callback
def _async_remove_old_device_identifiers(
config_entry_id: str, device_registry: dr.DeviceRegistry, hub: BondHub
) -> None:
"""Remove the non-unique device registry entries."""
for device in hub.devices:
dev = device_registry.async_get_device(identifiers={(DOMAIN, device.device_id)})
if dev is None:
continue
if config_entry_id in dev.config_entries:
device_registry.async_remove_device(dev.id)
|
{
"content_hash": "7eb2282b7efcdfd0b13684a7f5095a46",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 88,
"avg_line_length": 31.123966942148762,
"alnum_prop": 0.6879978757302178,
"repo_name": "rohitranjan1991/home-assistant",
"id": "062c1d844c4381598f96b8bcfc07c71936851cd9",
"size": "3766",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/bond/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import datetime
import math
import operator
import uuid
import warnings
from lxml.builder import E
import lxml.etree
from .dates import datetime_from_w3_datestring
from .strings import RawString, SolrString, WildcardString
try:
import pytz
except ImportError:
warnings.warn(
"pytz not found; cannot do timezone conversions for Solr DateFields",
ImportWarning)
pytz = None
class SolrError(Exception):
pass
class solr_date(object):
"""This class can be initialized from either native python datetime
objects and mx.DateTime objects, and will serialize to a format
appropriate for Solr"""
def __init__(self, v):
if isinstance(v, solr_date):
self._dt_obj = v._dt_obj
elif isinstance(v, basestring):
try:
self._dt_obj = datetime_from_w3_datestring(v)
except ValueError, e:
raise SolrError(*e.args)
elif hasattr(v, "strftime"):
self._dt_obj = self.from_date(v)
else:
raise SolrError("Cannot initialize solr_date from %s object"
% type(v))
@staticmethod
def from_date(dt_obj):
# Python datetime objects may include timezone information
if hasattr(dt_obj, 'tzinfo') and dt_obj.tzinfo:
# but Solr requires UTC times.
if pytz:
return dt_obj.astimezone(pytz.utc).replace(tzinfo=None)
else:
raise EnvironmentError("pytz not available, cannot do timezone conversions")
else:
return dt_obj
@property
def microsecond(self):
if hasattr(self._dt_obj, "microsecond"):
return self._dt_obj.microsecond
else:
return int(1000000*math.modf(self._dt_obj.second)[0])
def __repr__(self):
return repr(self._dt_obj)
def __unicode__(self):
""" Serialize a datetime object in the format required
by Solr. See http://wiki.apache.org/solr/IndexingDates
"""
if hasattr(self._dt_obj, 'isoformat'):
return "%sZ" % (self._dt_obj.isoformat(), )
strtime = self._dt_obj.strftime("%Y-%m-%dT%H:%M:%S")
microsecond = self.microsecond
if microsecond:
return u"%s.%06dZ" % (strtime, microsecond)
return u"%sZ" % (strtime,)
def __cmp__(self, other):
try:
other = other._dt_obj
except AttributeError:
pass
if self._dt_obj < other:
return -1
elif self._dt_obj > other:
return 1
else:
return 0
def solr_point_factory(dimension):
if dimension < 1:
raise ValueError("dimension of PointType must be greater than one")
class solr_point(object):
dim = int(dimension)
def __init__(self, *args):
if dimension > 1 and len(args) == 1:
v = args[0]
if isinstance(v, basestring):
v_arr = v.split(',')
else:
try:
v_arr = list(v)
except TypeError:
raise ValueError("bad value provided for point list")
else:
v_arr = args
if len(v_arr) != self.dim:
raise ValueError("point has wrong number of dimensions")
self.point = tuple(float(v) for v in v_arr)
def __repr__(self):
return "solr_point(%s)" % unicode(self)
def __unicode__(self):
return ','.join(str(p) for p in self.point)
return solr_point
class SolrField(object):
def __init__(self, name, indexed=None, stored=None, required=False, multiValued=False, dynamic=False, **kwargs):
self.name = name
if indexed is not None:
self.indexed = indexed
if stored is not None:
self.stored = stored
# By default, indexed & stored are taken from the class attribute
self.multi_valued = multiValued
self.required = required
self.dynamic = dynamic
if dynamic:
if self.name.startswith("*"):
self.wildcard_at_start = True
elif self.name.endswith("*"):
self.wildcard_at_start = False
else:
raise SolrError("Dynamic fields must have * at start or end of name (field %s)" %
self.name)
def match(self, name):
if self.dynamic:
if self.wildcard_at_start:
return name.endswith(self.name[1:])
else:
return name.startswith(self.name[:-1])
def instance_from_user_data(self, data):
return SolrFieldInstance.from_user_data(self, data)
def to_user_data(self, value):
return value
def from_user_data(self, value):
return self.normalize(value)
def to_solr(self, value):
return unicode(value)
def to_query(self, value):
return RawString(self.to_solr(value)).escape_for_lqs_term()
def from_solr(self, value):
return self.normalize(value)
class SolrUnicodeField(SolrField):
def from_user_data(self, value):
if isinstance(value, SolrString):
return value
else:
try:
return WildcardString(unicode(value))
except UnicodeDecodeError as e:
print value
raise e
def to_query(self, value):
return value.escape_for_lqs_term()
def from_solr(self, value):
try:
return unicode(value)
except UnicodeError:
raise SolrError("%s could not be coerced to unicode (field %s)" %
(value, self.name))
class SolrBooleanField(SolrField):
def to_solr(self, value):
return u"true" if value else u"false"
def normalize(self, value):
if isinstance(value, basestring):
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
raise ValueError("sorry, I only understand simple boolean strings (field %s)" %
self.name)
return bool(value)
class SolrBinaryField(SolrField):
def from_user_data(self, value):
try:
return str(value)
except (TypeError, ValueError):
raise SolrError("Could not convert data to binary string (field %s)" %
self.name)
def to_solr(self, value):
return unicode(value.encode('base64'))
def from_solr(self, value):
return value.decode('base64')
class SolrNumericalField(SolrField):
def normalize(self, value):
try:
v = self.base_type(value)
except (OverflowError, TypeError, ValueError):
raise SolrError("%s is invalid value for %s (field %s)" %
(value, self.__class__, self.name))
if v < self.min or v > self.max:
raise SolrError("%s out of range for a %s (field %s)" %
(value, self.__class__, self.name))
return v
class SolrShortField(SolrNumericalField):
base_type = int
min = -(2**15)
max = 2**15-1
class SolrIntField(SolrNumericalField):
base_type = int
min = -(2**31)
max = 2**31-1
class SolrLongField(SolrNumericalField):
base_type = long
min = -(2**63)
max = 2**63-1
class SolrFloatField(SolrNumericalField):
base_type = float
max = (2.0-2.0**(-23)) * 2.0**127
min = -max
class SolrDoubleField(SolrNumericalField):
base_type = float
max = (2.0-2.0**(-52)) * 2.0**1023
min = -max
class SolrDateField(SolrField):
def normalize(self, v):
return solr_date(v)
def to_user_data(self, v):
return v._dt_obj
class SolrRandomField(SolrField):
def normalize(self, v):
raise TypeError("Don't try and store or index values in a RandomSortField")
class SolrUUIDField(SolrUnicodeField):
def from_solr(self, v):
return uuid.UUID(v)
def from_user_data(self, v):
if v == 'NEW':
return v
elif isinstance(v, uuid.UUID):
return v
else:
return uuid.UUID(v)
def to_solr(self, v):
if v == 'NEW':
return v
else:
return v.urn[9:]
class SolrPointField(SolrField):
def __init__(self, **kwargs):
super(SolrPointField, self).__init__(**kwargs)
# dimension will be set by the subclass
self.value_class = solr_point_factory(self.dimension)
def to_solr(self, v):
return unicode(self.value_class(v))
def normalize(self, v):
return self.value_class(v).point
class SolrPoint2Field(SolrPointField):
dimension = 2
class SolrLatLonField(SolrPointField):
dimension = 2
def SolrFieldTypeFactory(cls, name, **kwargs):
atts = {'stored':True, 'indexed':True}
atts.update(kwargs)
# This next because otherwise the class names aren't globally
# visible or useful, which is confusing for debugging.
# We give the new class a name which uniquely identifies it
# (but we don't need Solr class, because we've got the same
# information in cls anyway.
name = 'SolrFieldType_%s_%s' % (cls.__name__, '_'.join('%s_%s' % kv for kv in sorted(atts.items()) if kv[0] != 'class'))
# and its safe to put in globals(), because the class is
# defined by the constituents of its name.
if name not in globals():
globals()[name] = type(name, (cls,), atts)
return globals()[name]
class SolrFieldInstance(object):
@classmethod
def from_solr(cls, field, data):
self = cls()
self.field = field
self.value = self.field.from_solr(data)
return self
@classmethod
def from_user_data(cls, field, data):
self = cls()
self.field = field
self.value = self.field.from_user_data(data)
return self
def to_solr(self):
return self.field.to_solr(self.value)
def to_query(self):
return self.field.to_query(self.value)
def to_user_data(self):
return self.field.to_user_data(self.value)
# These are artificial field classes/instances:
class SolrWildcardField(SolrUnicodeField):
def __init__(self):
pass
class SolrScoreField(SolrDoubleField):
def __init__(self):
pass
class WildcardFieldInstance(SolrFieldInstance):
@classmethod
def from_user_data(cls):
return super(WildcardFieldInstance, cls).from_user_data(SolrWildcardField(), "*")
class SolrSchema(object):
solr_data_types = {
'solr.StrField':SolrUnicodeField,
'solr.TextField':SolrUnicodeField,
'solr.BoolField':SolrBooleanField,
'solr.ShortField':SolrShortField,
'solr.IntField':SolrIntField,
'solr.SortableIntField':SolrIntField,
'solr.TrieIntField':SolrIntField,
'solr.LongField':SolrLongField,
'solr.SortableLongField':SolrLongField,
'solr.TrieLongField':SolrLongField,
'solr.FloatField':SolrFloatField,
'solr.SortableFloatField':SolrFloatField,
'solr.TrieFloatField':SolrFloatField,
'solr.DoubleField':SolrDoubleField,
'solr.SortableDoubleField':SolrDoubleField,
'solr.TrieDoubleField':SolrDoubleField,
'solr.DateField':SolrDateField,
'solr.TrieDateField':SolrDateField,
'solr.RandomSortField':SolrRandomField,
'solr.UUIDField':SolrUUIDField,
'solr.BinaryField':SolrBinaryField,
'solr.PointType':SolrPointField,
'solr.LatLonType':SolrLatLonField,
'solr.GeoHashField':SolrPoint2Field,
}
def __init__(self, f):
"""initialize a schema object from a
filename or file-like object."""
self.fields, self.dynamic_fields, self.default_field_name, self.unique_key \
= self.schema_parse(f)
self.default_field = self.fields[self.default_field_name] \
if self.default_field_name else None
self.unique_field = self.fields[self.unique_key] \
if self.unique_key else None
def Q(self, *args, **kwargs):
from .search import LuceneQuery
q = LuceneQuery(self)
q.add(args, kwargs)
return q
def schema_parse(self, f):
try:
schemadoc = lxml.etree.parse(f)
except lxml.etree.XMLSyntaxError, e:
raise SolrError("Invalid XML in schema:\n%s" % e.args[0])
field_type_classes = {}
for field_type_node in schemadoc.xpath("/schema/types/fieldType|/schema/types/fieldtype"):
name, field_type_class = self.field_type_factory(field_type_node)
field_type_classes[name] = field_type_class
field_classes = {}
for field_node in schemadoc.xpath("/schema/fields/field"):
name, field_class = self.field_factory(field_node, field_type_classes, dynamic=False)
field_classes[name] = field_class
dynamic_field_classes = []
for field_node in schemadoc.xpath("/schema/fields/dynamicField"):
_, field_class = self.field_factory(field_node, field_type_classes, dynamic=True)
dynamic_field_classes.append(field_class)
default_field_name = schemadoc.xpath("/schema/defaultSearchField")
default_field_name = default_field_name[0].text \
if default_field_name else None
unique_key = schemadoc.xpath("/schema/uniqueKey")
unique_key = unique_key[0].text if unique_key else None
return field_classes, dynamic_field_classes, default_field_name, unique_key
def field_type_factory(self, field_type_node):
try:
name, class_name = field_type_node.attrib['name'], field_type_node.attrib['class']
except KeyError, e:
raise SolrError("Invalid schema.xml: missing %s attribute on fieldType" % e.args[0])
try:
field_class = self.solr_data_types[class_name]
except KeyError:
raise SolrError("Unknown field_class '%s'" % class_name)
return name, SolrFieldTypeFactory(field_class,
**self.translate_attributes(field_type_node.attrib))
def field_factory(self, field_node, field_type_classes, dynamic):
try:
name, field_type = field_node.attrib['name'], field_node.attrib['type']
except KeyError, e:
raise SolrError("Invalid schema.xml: missing %s attribute on field" % e.args[0])
try:
field_type_class = field_type_classes[field_type]
except KeyError, e:
raise SolrError("Invalid schema.xml: %s field_type undefined" % field_type)
return name, field_type_class(dynamic=dynamic,
**self.translate_attributes(field_node.attrib))
# From XML Datatypes
attrib_translator = {"true": True, "1": True, "false": False, "0": False}
def translate_attributes(self, attribs):
return dict((k, self.attrib_translator.get(v, v))
for k, v in attribs.items())
def missing_fields(self, field_names):
return [name for name in set(self.fields.keys()) - set(field_names)
if self.fields[name].required]
def check_fields(self, field_names, required_atts=None):
if isinstance(field_names, basestring):
field_names = [field_names]
if required_atts is None:
required_atts = {}
undefined_field_names = []
for field_name in field_names:
field = self.match_field(field_name)
if not field:
undefined_field_names.append(field_name)
else:
for k, v in required_atts.items():
if getattr(field, k) != v:
raise SolrError("Field '%s' does not have %s=%s" % (field_name, k, v))
if undefined_field_names:
raise SolrError("Fields not defined in schema: %s" % list(undefined_field_names))
def match_dynamic_field(self, name):
for field in self.dynamic_fields:
if field.match(name):
return field
def match_field(self, name):
try:
return self.fields[name]
except KeyError:
field = self.match_dynamic_field(name)
return field
def field_from_user_data(self, k, v):
field = self.match_field(k)
if not field:
raise SolrError("No such field '%s' in current schema" % k)
return field.instance_from_user_data(v)
def make_update(self, docs):
return SolrUpdate(self, docs)
def make_delete(self, docs, query):
return SolrDelete(self, docs, query)
def parse_response(self, msg):
return SolrResponse(self, msg)
def parse_result_doc(self, doc, name=None):
if name is None:
name = doc.attrib.get('name')
if doc.tag in ('lst', 'arr'):
values = [self.parse_result_doc(n, name) for n in doc.getchildren()]
return name, tuple(v[1] for v in values)
if doc.tag in 'doc':
return dict([self.parse_result_doc(n) for n in doc.getchildren()])
field_class = self.match_field(name)
if field_class is None and name == "score":
field_class = SolrScoreField()
elif field_class is None:
raise SolrError("unexpected field found in result (field name: %s)" % name)
return name, SolrFieldInstance.from_solr(field_class, doc.text or '').to_user_data()
class SolrUpdate(object):
ADD = E.add
DOC = E.doc
FIELD = E.field
def __init__(self, schema, docs):
self.schema = schema
self.xml = self.add(docs)
def fields(self, name, values):
# values may be multivalued - so we treat that as the default case
if not hasattr(values, "__iter__"):
values = [values]
field_values = [self.schema.field_from_user_data(name, value) for value in values]
return [self.FIELD({'name':name}, field_value.to_solr())
for field_value in field_values]
def doc(self, doc):
missing_fields = self.schema.missing_fields(doc.keys())
if missing_fields:
raise SolrError("These required fields are unspecified:\n %s" %
missing_fields)
if not doc:
return self.DOC()
else:
return self.DOC(*reduce(operator.add,
[self.fields(name, values)
for name, values in doc.items()]))
def add(self, docs):
if hasattr(docs, "items") or not hasattr(docs, "__iter__"):
# is a dictionary, or anything else except a list
docs = [docs]
docs = [(doc if hasattr(doc, "items")
else object_to_dict(doc, self.schema))
for doc in docs]
return self.ADD(*[self.doc(doc) for doc in docs])
def __str__(self):
return lxml.etree.tostring(self.xml, encoding='utf-8')
class SolrDelete(object):
DELETE = E.delete
ID = E.id
QUERY = E.query
def __init__(self, schema, docs=None, queries=None):
self.schema = schema
deletions = []
if docs is not None:
deletions += self.delete_docs(docs)
if queries is not None:
deletions += self.delete_queries(queries)
self.xml = self.DELETE(*deletions)
def delete_docs(self, docs):
if not self.schema.unique_key:
raise SolrError("This schema has no unique key - you can only delete by query")
if hasattr(docs, "items") or not hasattr(docs, "__iter__"):
# docs is a dictionary, or an object which is not a list
docs = [docs]
doc_id_insts = [self.doc_id_from_doc(doc) for doc in docs]
return [self.ID(doc_id_inst.to_solr()) for doc_id_inst in doc_id_insts]
def doc_id_from_doc(self, doc):
# Is this a dictionary, or an document object, or a thing
# that can be cast to a uniqueKey? (which could also be an
# arbitrary object.
if isinstance(doc, (basestring, int, long, float)):
# It's obviously not a document object, just coerce to appropriate type
doc_id = doc
elif hasattr(doc, "items"):
# It's obviously a dictionary
try:
doc_id = doc[self.schema.unique_key]
except KeyError:
raise SolrError("No unique key on this document")
else:
doc_id = get_attribute_or_callable(doc, self.schema.unique_key)
if doc_id is None:
# Well, we couldn't get an ID from it; let's try
# coercing the doc to the type of an ID field.
doc_id = doc
try:
doc_id_inst = self.schema.unique_field.instance_from_user_data(doc_id)
except SolrError:
raise SolrError("Could not parse argument as object or document id")
return doc_id_inst
def delete_queries(self, queries):
if not hasattr(queries, "__iter__"):
queries = [queries]
return [self.QUERY(unicode(query)) for query in queries]
def __str__(self):
return lxml.etree.tostring(self.xml, encoding='utf-8')
class SolrFacetCounts(object):
members= ["facet_dates", "facet_fields", "facet_queries", "facet_ranges"]
def __init__(self, **kwargs):
for member in self.members:
setattr(self, member, kwargs.get(member, ()))
self.facet_fields = dict(self.facet_fields)
@classmethod
def from_response(cls, response):
facet_counts_dict = dict(response.get("facet_counts", {}))
return SolrFacetCounts(**facet_counts_dict)
class SolrResponse(object):
def __init__(self, schema, xmlmsg):
self.schema = schema
self.original_xml = xmlmsg
doc = lxml.etree.fromstring(xmlmsg)
details = dict(value_from_node(n) for n in
doc.xpath("/response/lst[@name!='moreLikeThis']"))
details['responseHeader'] = dict(details['responseHeader'])
for attr in ["QTime", "params", "status"]:
setattr(self, attr, details['responseHeader'].get(attr))
if self.status != 0:
raise ValueError("Response indicates an error")
if doc.xpath("/response/result"):
result_node = doc.xpath("/response/result")[0]
self.result = SolrResult(schema, result_node)
elif doc.xpath("/response/str[@name='importResponse']"):
status_node = doc.xpath("/response/lst[@name='statusMessages']")[0]
self.import_status = SolrImportStatus(status_node)
self.facet_counts = SolrFacetCounts.from_response(details)
self.highlighting = dict((k, dict(v))
for k, v in details.get("highlighting", ()))
more_like_these_nodes = \
doc.xpath("/response/lst[@name='moreLikeThis']/result")
more_like_these_results = [SolrResult(schema, node)
for node in more_like_these_nodes]
self.more_like_these = dict((n.name, n)
for n in more_like_these_results)
if len(self.more_like_these) == 1:
self.more_like_this = self.more_like_these.values()[0]
else:
self.more_like_this = None
# can be computed by MoreLikeThisHandler
termsNodes = doc.xpath("/response/*[@name='interestingTerms']")
if len(termsNodes) == 1:
_, value = value_from_node(termsNodes[0])
else:
value = None
self.interesting_terms = value
self.spellcheck_data = self._lookup_spellcheck(details)
def _lookup_spellcheck(self, details):
spellcheck_data = details.get('spellcheck')
if not spellcheck_data:
return
try:
spellcheck = dict(spellcheck_data)
suggestions = spellcheck.get('suggestions')
if not suggestions:
return
sitems = suggestions
params = {}
for key, v in sitems:
if isinstance(v, (list, tuple)):
v = dict(v)
drow = {key:v}
params.update(drow)
correctly_spelled = params.get('correctlySpelled')
if correctly_spelled:
return
collation = params.get('collation')
if not collation:
return
items = []
collation_items = collation.get('misspellingsAndCorrections')
if not collation_items:
return
for orig_term, suggested_term in collation_items:
items.append(dict(orig=orig_term, new_term=suggested_term))
return items
except:
return
def __str__(self):
return str(self.result)
def __len__(self):
return len(self.result.docs)
def __getitem__(self, key):
return self.result.docs[key]
class SolrImportStatus(object):
markers = [
('Total Requests made to DataSource', 'requests'),
('Total Rows Fetched', 'rows_fetched'),
('Total Documents Skipped', 'docs_skipped'),
('Full Dump Started', 'full_started'),
('Delta Dump started', 'delta_started'),
('Committed', 'committed'),
('Optimized', 'optimized'),
('Total Changed Documents', 'docs_changed'),
('Total Documents Processed', 'docs_processed'),
('Time taken ', 'time_taken'),
]
def __init__(self, node):
self.name = node.attrib['name']
for marker, key in self.markers:
_el = node.xpath("str[@name='%s']" % marker)
if _el:
setattr(self, key, _el[0].text)
def __str__(self):
return "Added/updated %{added}s document in %{time}s time." % {
'added': self.docs_processed,
'time': self.time_taken
}
class SolrResult(object):
def __init__(self, schema, node):
self.schema = schema
self.name = node.attrib['name']
self.numFound = int(node.attrib['numFound'])
self.start = int(node.attrib['start'])
self.docs = [schema.parse_result_doc(n) for n in node.xpath("doc")]
def __str__(self):
return "%(numFound)s results found, starting at #%(start)s\n\n" % self.__dict__ + str(self.docs)
def object_to_dict(o, names):
return dict((name, getattr(o, name)) for name in names
if (hasattr(o, name) and getattr(o, name) is not None))
# This is over twice the speed of the shorter one immediately above.
# apparently hasattr is really slow; try/except is faster.
# Also, the one above doesn't and can't do callables with exception handling
def object_to_dict(o, schema):
d = {}
for name in schema.fields.keys():
a = get_attribute_or_callable(o, name)
if a is not None:
d[name] = a
# and now try for dynamicFields:
try:
names = o.__dict__.keys()
except AttributeError:
names = []
for name in names:
field = schema.match_dynamic_field(name)
if field:
a = get_attribute_or_callable(o, name)
if a is not None:
d[name] = a
try:
names = o.__class__.__dict__.keys()
except AttributeError:
names = []
for name in names:
field = schema.match_dynamic_field(name)
if field:
a = get_attribute_or_callable(o, name)
if a is not None:
d[name] = a
return d
def get_attribute_or_callable(o, name):
try:
a = getattr(o, name)
# Might be attribute or callable
if callable(a):
try:
a = a()
except TypeError:
a = None
except AttributeError:
a = None
return a
def value_from_node(node):
name = node.attrib.get('name')
if node.tag in ('lst', 'arr'):
value = [value_from_node(n) for n in node.getchildren()]
if node.tag in 'doc':
value = dict(value_from_node(n) for n in node.getchildren())
elif node.tag == 'null':
value = None
elif node.tag in ('str', 'byte'):
value = node.text or ""
elif node.tag in ('short', 'int'):
value = int(node.text)
elif node.tag == 'long':
value = long(node.text)
elif node.tag == 'bool':
value = True if node.text == "true" else False
elif node.tag in ('float', 'double'):
value = float(node.text)
elif node.tag == 'date':
value = solr_date(node.text)
if name is not None:
return name, value
else:
return value
|
{
"content_hash": "276d736c1e0cf2152393e725a0b0a8ad",
"timestamp": "",
"source": "github",
"line_count": 852,
"max_line_length": 124,
"avg_line_length": 33.906103286384976,
"alnum_prop": 0.5794447521462199,
"repo_name": "pixbuffer/sunburnt-spatial",
"id": "5600d6e255929c8017dc93dbe1064b47a1436d1f",
"size": "28888",
"binary": false,
"copies": "1",
"ref": "refs/heads/pixbuffer",
"path": "sunburnt/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144869"
},
{
"name": "Shell",
"bytes": "4515"
}
],
"symlink_target": ""
}
|
""" Defines the XAxis and YAxis classes.
"""
import numpy as np
from traits.api import (Float, Instance, Property, cached_property,
on_trait_change)
from .artist.base_artist import BaseArtist
from .layout.bbox_transform import (
BaseTransform, BboxTransform, IdentityTransform, blend_xy_transforms
)
from .layout.grid_layout import BaseGridLayout, XGridLayout, YGridLayout
from .style import config
from .stylus.label_stylus import LabelStylus
from .stylus.tick_stylus import XTickStylus, YTickStylus
from .stylus.tick_label_stylus import XTickLabelStylus, YTickLabelStylus
from .stylus.line_stylus import LineStylus
from .utils.drawing import broadcast_points
class BaseAxis(BaseArtist):
""" An artist that draws axis lines, ticks, and labels. """
# A tick grid that controls tick positioning
tick_grid = Instance(BaseGridLayout)
# -----------------------------------------------------------------------
# Appearance traits
# -----------------------------------------------------------------------
#: Stylus responsible for drawing tick labels.
tick_label_stylus = Instance(LabelStylus)
#: Stylus responsible for drawing ticks.
tick_stylus = Instance(LineStylus)
#: Stylus responsible for drawing the axis line.
line_stylus = Instance(LineStylus)
# -----------------------------------------------------------------------
# Private Traits
# -----------------------------------------------------------------------
#: XXX Maybe rename data-to-screen to axial transform?
#: Transform from axial values to screen-space.
# axial_transform = Instance(BaseTransform)
#: Transform from values orthogonal to the axis to screen-space.
ortho_transform = Instance(BaseTransform, IdentityTransform())
#: Blended transform combining axial and orthogonal transforms.
transform = Property(Instance(BaseTransform))
# -------------------------------------------------------------------------
# Protected interface
# -------------------------------------------------------------------------
locus = Float(0)
@on_trait_change('component.origin')
def _update_locus(self):
self.tick_stylus.locus = self.locus
# -----------------------------------------------------------------------
# Public interface
# -----------------------------------------------------------------------
def data_offset_to_label(self, data_offset):
return str(data_offset)
# -----------------------------------------------------------------------
# Component and AbstractOverlay interface
# -----------------------------------------------------------------------
def draw(self, gc, view_rect=None):
""" Draws this component overlaid on another component. """
with gc:
self._draw_axis_line(gc)
self._draw_ticks(gc)
self._draw_labels(gc)
# -----------------------------------------------------------------------
# Private draw routines
# -----------------------------------------------------------------------
def _draw_axis_line(self, gc):
""" Draws the line for the axis. """
self.line_stylus.draw(gc, self._compute_xy_end_points())
def _draw_ticks(self, gc):
""" Draws the tick marks for the axis. """
self.tick_stylus.draw(gc, self.tick_grid.axial_offsets)
def _draw_labels(self, gc):
""" Draws the tick labels for the axis. """
xy_tick = self._get_tick_positions()
for xy_screen, label in zip(xy_tick, self._get_labels()):
with gc:
gc.translate_ctm(*xy_screen)
self.tick_label_stylus.draw(gc, label)
# -----------------------------------------------------------------------
# Private methods for computing positions and layout
# -----------------------------------------------------------------------
def _get_labels(self):
return [self.data_offset_to_label(z)
for z in self.tick_grid.axial_offsets]
def _compute_xy_end_points(self):
raise NotImplementedError()
def _data_to_screen_default(self):
return BboxTransform(self.data_bbox, self.screen_bbox)
class XAxis(BaseAxis):
def _line_stylus_default(self):
return LineStylus(color=config.get('axis.line.color'))
def _tick_stylus_default(self):
return XTickStylus(color=config.get('axis.tick.color'),
locus=self.locus,
transform=self.transform)
def _tick_label_stylus_default(self):
return XTickLabelStylus(offset=-config.get('axis.tick_label.offset'),
color=config.get('axis.tick_label.color'))
def _tick_grid_default(self):
return XGridLayout(data_bbox=self.data_bbox)
@cached_property
def _get_transform(self):
return blend_xy_transforms(self.data_to_screen, self.ortho_transform)
def _compute_xy_end_points(self):
y_points = [self.locus] * 2
return np.transpose([self.screen_bbox.x_limits, y_points])
def _get_tick_positions(self):
points = broadcast_points(self.tick_grid.axial_offsets, self.locus)
return self.transform.transform(points)
class YAxis(BaseAxis):
def _line_stylus_default(self):
return LineStylus(color=config.get('axis.line.color'))
def _tick_stylus_default(self):
return YTickStylus(color=config.get('axis.tick.color'),
locus=self.locus,
transform=self.transform)
def _tick_label_stylus_default(self):
return YTickLabelStylus(offset=-config.get('axis.tick_label.offset'),
color=config.get('axis.tick_label.color'))
def _tick_grid_default(self):
return YGridLayout(data_bbox=self.data_bbox)
@cached_property
def _get_transform(self):
return blend_xy_transforms(self.ortho_transform, self.data_to_screen)
def _compute_xy_end_points(self):
x_points = [self.locus] * 2
return np.transpose([x_points, self.screen_bbox.y_limits])
def _get_tick_positions(self):
points = broadcast_points(self.locus, self.tick_grid.axial_offsets)
return self.transform.transform(points)
|
{
"content_hash": "4ed88069359b4c0a4c95b89d73b903ca",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 36.44827586206897,
"alnum_prop": 0.543046357615894,
"repo_name": "tonysyu/deli",
"id": "6173b4eb3039c9bff40633e15f3d36551f1ff538",
"size": "6342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deli/axis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1233"
},
{
"name": "Python",
"bytes": "310799"
},
{
"name": "Shell",
"bytes": "5101"
}
],
"symlink_target": ""
}
|
from flask import current_app, Blueprint, render_template, request, url_for, redirect, g
from flask.ext.babel import gettext as _
from app.model.airportinfo import AirportInfo
from app.model.system_settings import SystemSetting
__author__ = 'windschord.com'
blueprint = Blueprint('SystemSetting', __name__, url_prefix='/SystemSetting/')
@blueprint.route('setup')
def setup():
try:
current_app.db_session.add(AirportInfo('RJCC', 1, 2, True))
current_app.db_session.add(AirportInfo('RJTT', 2, 3, True))
current_app.db_session.add(AirportInfo('RJAA', 2, 3, True))
current_app.db_session.commit()
return _('Setup is Success!!')
except Exception as e:
current_app.logger.exception('Setup is Fail!!\n')
return _('Setup is Fail!!')
@blueprint.route('index', methods=['GET', 'POST'])
def index():
title = 'SystemSetting'
if request.method == 'GET':
search_settings = None
elif request.method == 'POST':
settings, search_settings = crud()
else:
return 'Error....404'
settings = current_app.db_session.query(SystemSetting).all()
return render_template("system_setting_index.html", title=title, settings=settings, search_settings=search_settings)
@blueprint.route('load_db', methods=['GET'])
def load_db():
current_app.REQUEST_HOURS = current_app.db_session.query(SystemSetting).filter(SystemSetting.key == 'REQUEST_HOURS').first().value
current_app.logger.debug(current_app.REQUEST_HOURS)
return 'OK'
def crud():
current_app.logger.debug(request.form)
req_id = request.form['id']
req_type = request.form.get('type', None)
key = request.form.get('key', None)
value = request.form.get('value', None)
all_settings = current_app.db_session.query(SystemSetting).all()
target = current_app.db_session.query(SystemSetting).filter(SystemSetting.id == req_id).first()
current_app.logger.debug(target)
if req_type == 'add':
target = SystemSetting(key, value)
current_app.db_session.add(target)
current_app.db_session.commit()
current_app.logger.debug('ADD: %s' % target)
elif req_type == 'update':
target.value = value
current_app.db_session.commit()
current_app.logger.debug('UPDATE: %s' % target)
elif req_type == 'delete':
target = current_app.db_session.query(SystemSetting).filter(
SystemSetting.id == req_id).first()
current_app.db_session.delete(target)
current_app.db_session.commit()
current_app.logger.debug('UPDATE: %s' % target)
target = None
else:
current_app.logger.exception('Unknown type: %s' % target)
return all_settings, target
|
{
"content_hash": "2fc6874b877ec1c1c7cad6ca3beee104",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 134,
"avg_line_length": 35.45454545454545,
"alnum_prop": 0.6600732600732601,
"repo_name": "windschord/airport_weather",
"id": "77f8429cad1bda44d23e596e0a2d741a49337245",
"size": "2754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/view/system_preferences.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "692"
},
{
"name": "HTML",
"bytes": "12963"
},
{
"name": "Python",
"bytes": "23545"
},
{
"name": "Shell",
"bytes": "1331"
}
],
"symlink_target": ""
}
|
"""Tests for Generalized Pareto distribution."""
# Dependency imports
import hypothesis as hp
import hypothesis.strategies as hps
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import generalized_pareto as gp
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math import gradient
# Pylint doesn't understand hps.composite.
# pylint: disable=no-value-for-parameter
@hps.composite
def generalized_paretos(draw, batch_shape=None):
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
constraints = dict(
loc=tfp_hps.identity_fn,
scale=tfp_hps.softplus_plus_eps(),
concentration=lambda x: tf.math.tanh(x) * 0.24) # <.25==safe for variance
params = draw(
tfp_hps.broadcasting_params(
batch_shape,
params_event_ndims=dict(loc=0, scale=0, concentration=0),
constraint_fn_for=constraints.get))
dist = gp.GeneralizedPareto(validate_args=draw(hps.booleans()), **params)
if dist.batch_shape != batch_shape:
raise AssertionError('batch_shape mismatch: expect {} but got {}'.format(
batch_shape, dist))
return dist
@test_util.test_all_tf_execution_regimes
class GeneralizedParetoTest(test_util.TestCase):
@hp.given(generalized_paretos())
@tfp_hps.tfp_hp_settings()
def testShape(self, dist):
# batch_shape == dist.batch_shape asserted in generalized_paretos()
self.assertEqual(dist.batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings()
def testLogPDF(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
hp.assume(abs(loc / scale) < 1e7)
xs = self.evaluate(dist.sample(seed=test_util.test_seed()))
logp = dist.log_prob(xs)
self.assertEqual(dist.batch_shape, logp.shape)
p = dist.prob(xs)
self.assertEqual(dist.batch_shape, p.shape)
expected_logp = sp_stats.genpareto(conc, loc=loc, scale=scale).logpdf(xs)
actual_logp = self.evaluate(logp)
self.assertAllClose(expected_logp, actual_logp, rtol=1e-5, atol=1e-5)
self.assertAllClose(np.exp(expected_logp), self.evaluate(p),
rtol=1e-5, atol=1e-5)
def testLogPDFBoundary(self):
# When loc = concentration = 0, we have an exponential distribution. Check
# that at 0 we have finite log prob.
scale = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
dist = gp.GeneralizedPareto(
loc=0, scale=scale, concentration=0, validate_args=True)
log_pdf = self.evaluate(dist.log_prob(0.))
self.assertAllClose(-np.log(scale), log_pdf, rtol=1e-5)
# Log prob should be finite on the boundary regardless of parameters.
loc = np.array([1., 2., 5.]).astype(np.float32)
scale = 2.
concentration = np.array([-5., -3.4, -1.]).astype(np.float32)
dist = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=concentration, validate_args=True)
log_pdf_at_loc = self.evaluate(dist.log_prob(loc))
self.assertAllFinite(log_pdf_at_loc)
# TODO(b/144948687) Avoid `nan` at boundary. Ideally we'd do this test:
# boundary = loc - scale / concentration
# log_pdf_at_boundary = dist.log_prob(boundary)
# self.assertAllFinite(log_pdf_at_boundary)
def testAssertValidSample(self):
loc = np.array([1., 2., 5.]).astype(np.float32)
scale = 2.
concentration = np.array([-5., -3.4, 1.]).astype(np.float32)
dist = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=concentration, validate_args=True)
with self.assertRaisesOpError('must be greater than or equal to `loc`'):
self.evaluate(dist.prob([1.3, 1.3, 6.]))
with self.assertRaisesOpError(
'less than or equal to `loc - scale / concentration`'):
self.evaluate(dist.cdf([1.5, 2.3, 6.]))
def testSupportBijectorOutsideRange(self):
loc = np.array([1., 2., 5.]).astype(np.float32)
scale = 2.
concentration = np.array([-5., -2., 1.]).astype(np.float32)
dist = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=concentration, validate_args=True)
x = np.array([1. - 1e-6, 3.1, 4.9]).astype(np.float32)
bijector_inverse_x = dist.experimental_default_event_space_bijector(
).inverse(x)
self.assertAllNan(self.evaluate(bijector_inverse_x))
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings()
def testCDF(self, dist):
xs = self.evaluate(dist.sample(seed=test_util.test_seed()))
cdf = dist.cdf(xs)
self.assertEqual(dist.batch_shape, cdf.shape)
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
hp.assume(abs(loc / scale) < 1e7)
expected_cdf = sp_stats.genpareto(conc, loc=loc, scale=scale).cdf(xs)
actual_cdf = self.evaluate(cdf)
msg = ('Location: {}, scale: {}, concentration: {}, xs: {} '
'scipy cdf: {}, tfp cdf: {}')
hp.note(msg.format(loc, scale, conc, xs, expected_cdf, actual_cdf))
self.assertAllClose(expected_cdf, actual_cdf, rtol=5e-5)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings()
def testMean(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
hp.note('Location: {}, scale: {}, concentration: {}'.format(
loc, scale, conc))
self.assertEqual(dist.batch_shape, dist.mean().shape)
# scipy doesn't seem to be very accurate for small concentrations, so use
# higher precision.
expected = sp_stats.genpareto(np.float64(conc), loc=np.float64(loc),
scale=np.float64(scale)).mean()
actual = self.evaluate(dist.mean())
# There is an unavoidable catastropic cancellation for means near 0
self.assertAllClose(expected, actual, rtol=5e-4, atol=1e-4)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings()
def testVariance(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
# scipy doesn't seem to be very accurate for small concentrations, so use
# higher precision.
expected = sp_stats.genpareto(np.float64(conc), loc=np.float64(loc),
scale=np.float64(scale)).var()
# scipy sometimes returns nonsense zero or negative variances.
hp.assume(expected > 0)
# scipy gets bad answers for very small concentrations even in 64-bit.
# https://github.com/scipy/scipy/issues/11168
hp.assume(conc > 1e-4)
self.assertEqual(dist.batch_shape, dist.variance().shape)
actual = self.evaluate(dist.variance())
msg = ('Location: {}, scale: {}, concentration: {}, '
'scipy variance: {}, tfp variance: {}')
hp.note(msg.format(loc, scale, conc, expected, actual))
self.assertAllClose(expected, actual)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings()
def testEntropy(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
self.assertEqual(dist.batch_shape, dist.entropy().shape)
expected = sp_stats.genpareto.entropy(conc, loc=loc, scale=scale)
actual = self.evaluate(dist.entropy())
self.assertAllClose(expected, actual)
def testSample(self):
loc = np.float32(-7.5)
scale = np.float32(3.5)
conc = np.float32(0.07)
n = 10**5
dist = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=conc, validate_args=True)
samples = dist.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertTrue(self._kstest(loc, scale, conc, sample_values))
self.assertAllClose(
sp_stats.genpareto.mean(conc, loc=loc, scale=scale),
sample_values.mean(),
rtol=.02)
self.assertAllClose(
sp_stats.genpareto.var(conc, loc=loc, scale=scale),
sample_values.var(),
rtol=.08)
@test_util.numpy_disable_gradient_test
def testFullyReparameterized(self):
loc = tf.constant(4.0)
scale = tf.constant(3.0)
conc = tf.constant(2.0)
_, grads = gradient.value_and_gradient(
lambda *args: gp.GeneralizedPareto(*args, validate_args=True).sample( # pylint: disable=g-long-lambda
100, seed=test_util.test_seed()),
[loc, scale, conc])
self.assertLen(grads, 3)
self.assertAllNotNone(grads)
def testSampleKolmogorovSmirnovMultiDimensional(self):
loc = np.linspace(-10, 10, 3).reshape([3, 1, 1])
scale = np.linspace(1e-6, 7, 5).reshape([5, 1])
conc = np.linspace(-1.3, 1.3, 7)
dist = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=conc, validate_args=True)
n = 10**4
samples = dist.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 3, 5, 7), samples.shape)
self.assertEqual((n, 3, 5, 7), sample_values.shape)
fails = 0
trials = 0
for li, l in enumerate(loc.reshape(-1)):
for si, s in enumerate(scale.reshape(-1)):
for ci, c in enumerate(conc.reshape(-1)):
samps = sample_values[:, li, si, ci]
trials += 1
fails += 0 if self._kstest(l, s, c, samps) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, loc, scale, conc, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = sp_stats.kstest(samples,
sp_stats.genpareto(conc, loc=loc, scale=scale).cdf)
# Return True when the test passes.
return ks < 0.02
def testPdfOfSampleMultiDims(self):
dist = gp.GeneralizedPareto(
loc=0,
scale=[[2.], [3.]],
concentration=[-.37, .11],
validate_args=True)
num = 50000
samples = dist.sample(num, seed=test_util.test_seed())
pdfs = dist.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual((num, 2, 2), samples.shape)
self.assertEqual((num, 2, 2), pdfs.shape)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testNonPositiveInitializationParamsRaises(self):
scale = tf.constant(0.0, name='scale')
with self.assertRaisesOpError('Argument `scale` must be positive.'):
dist = gp.GeneralizedPareto(
loc=0, scale=scale, concentration=1, validate_args=True)
self.evaluate(dist.mean())
@test_util.tf_tape_safety_test
def testGradientThroughConcentration(self):
concentration = tf.Variable(3.)
d = gp.GeneralizedPareto(
loc=0, scale=1, concentration=concentration, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 4.])
grad = tape.gradient(loss, d.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
def testAssertsPositiveScale(self):
scale = tf.Variable([1., 2., -3.])
self.evaluate(scale.initializer)
with self.assertRaisesOpError('Argument `scale` must be positive.'):
d = gp.GeneralizedPareto(
loc=0, scale=scale, concentration=1, validate_args=True)
self.evaluate(d.sample(seed=test_util.test_seed()))
def testAssertsPositiveScaleAfterMutation(self):
scale = tf.Variable([1., 2., 3.])
self.evaluate(scale.initializer)
d = gp.GeneralizedPareto(
loc=0, scale=scale, concentration=0.25, validate_args=True)
self.evaluate(d.mean())
with self.assertRaisesOpError('Argument `scale` must be positive.'):
with tf.control_dependencies([scale.assign([1., 2., -3.])]):
self.evaluate(d.sample(seed=test_util.test_seed()))
@test_util.tf_tape_safety_test
def testGradientThroughLocScale(self):
loc = tf.Variable(1.)
scale = tf.Variable(2.5)
d = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=.15, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 4.])
grads = tape.gradient(loss, d.trainable_variables)
self.assertLen(grads, 2)
self.assertAllNotNone(grads)
def testQuantile(self):
scale = tf.Variable(2.)
concentration = tf.Variable(0.5)
loc = tf.Variable(1.)
self.evaluate(scale.initializer)
self.evaluate(concentration.initializer)
self.evaluate(loc.initializer)
d = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=concentration, validate_args=True)
p = tf.linspace(0., 1., 1000)[1:-1]
q = d.quantile(p)
self.assertAllFinite(q)
q_scipy = sp_stats.genpareto.ppf(
np.linspace(0., 1., 1000)[1:-1], 0.5, 1., 2.)
self.assertAllClose(q, q_scipy, rtol=1.e-5)
def testQuantileZeroConcetration(self):
scale = tf.Variable(0.5)
concentration = tf.Variable(0.)
loc = tf.Variable(1.)
self.evaluate(scale.initializer)
self.evaluate(concentration.initializer)
self.evaluate(loc.initializer)
d = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=concentration, validate_args=True)
p = tf.linspace(0., 1., 1000)[1:-1]
q = d.quantile(p)
self.assertAllFinite(q)
q_scipy = sp_stats.genpareto.ppf(
np.linspace(0., 1., 1000)[1:-1], 0., 1., 0.5)
self.assertAllClose(q, q_scipy, rtol=1.e-5)
def testQuantilesBroadcasting(self):
loc = tf.constant([0.1, 0.2])[:, tf.newaxis, tf.newaxis]
scale = tf.constant([0.9, 1., 1.1])[:, tf.newaxis]
concentration = tf.constant([0.0, 0.4, 0.5, 0.6, 1.0])
d = gp.GeneralizedPareto(
loc=loc, scale=scale, concentration=concentration, validate_args=True)
p = tf.linspace(0., 1., 1000)[1:-1][:, tf.newaxis, tf.newaxis, tf.newaxis]
q = d.quantile(p)
self.assertAllFinite(q)
loc_numpy = self.evaluate(loc)
scale_numpy = self.evaluate(scale)
conc_numpy = self.evaluate(concentration)
q_scipys = []
for i in range(5):
q_scipys.append(sp_stats.genpareto.ppf(
np.linspace(0., 1., 1000)[1:-1].reshape(998, 1, 1, 1),
conc_numpy[i].reshape(1, 1, 1, 1),
loc_numpy.reshape(1, 2, 1, 1),
scale_numpy.reshape(1, 1, 3, 1)))
q_scipy = np.concatenate(q_scipys, axis=-1)
print(q.shape, q_scipy.shape)
self.assertAllClose(q, q_scipy, rtol=1.e-5)
if __name__ == '__main__':
test_util.main()
|
{
"content_hash": "5131f7c9114b5375c1ced292987855e5",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 110,
"avg_line_length": 39.247395833333336,
"alnum_prop": 0.6559617809037224,
"repo_name": "tensorflow/probability",
"id": "fa779f3fa1f6bf83187b753b7ccb257ad69fbfb8",
"size": "15749",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/distributions/generalized_pareto_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
}
|
"""
Class that describes a CDN
"""
from SMDS.user import *
from SMDS.content import *
class MDCDN:
"""
Interface for communicating with a CDN's brain to add/remove users and content.
"""
def __init__(self):
pass
def setup(self, api):
return 1
def shutdown(self):
return 1
def add_user( self, user ):
return 1
def add_content( self, user, content ):
return 1
def rm_user( self, user ):
return 1
def rm_content( self, content ):
return 1
def get_users( self ):
return None
def get_contents( self ):
return None
def update_user( self, user ):
return 1
def update_content( self, content ):
return 1
|
{
"content_hash": "749d8e9ecfb1192a101df7789dd13a5f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 82,
"avg_line_length": 17,
"alnum_prop": 0.5668449197860963,
"repo_name": "jcnelson/syndicate",
"id": "6dcb7e138092bcc223d6f29d7b5500ec95dd187d",
"size": "748",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "old/md-service/SMDS/API/CDN/CDN.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343449"
},
{
"name": "C++",
"bytes": "3136667"
},
{
"name": "CSS",
"bytes": "321366"
},
{
"name": "Gnuplot",
"bytes": "3596"
},
{
"name": "HTML",
"bytes": "172638"
},
{
"name": "JavaScript",
"bytes": "55112"
},
{
"name": "Makefile",
"bytes": "43170"
},
{
"name": "Perl",
"bytes": "8025"
},
{
"name": "Protocol Buffer",
"bytes": "20793"
},
{
"name": "Python",
"bytes": "3273669"
},
{
"name": "Ruby",
"bytes": "13015"
},
{
"name": "Shell",
"bytes": "63133"
},
{
"name": "TeX",
"bytes": "605910"
},
{
"name": "Thrift",
"bytes": "2996"
}
],
"symlink_target": ""
}
|
"""Package contenant la commande 'décor' et ses sous-commandes.
Dans ce fichier se trouve la commande même.
"""
from primaires.interpreteur.commande.commande import Commande
from .creer import PrmCreer
from .edit import PrmEdit
from .installer import PrmInstaller
from .liste import PrmListe
from .retirer import PrmRetirer
class CmdDecor(Commande):
"""Commande 'decor'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "décor", "decor")
self.groupe = "administrateur"
self.aide_courte = "manipulation des décors"
self.aide_longue = \
"Cette commande permet de manipuler les prototypes de " \
"décors, en créer ou éditer."
def ajouter_parametres(self):
"""Ajout des paramètres"""
self.ajouter_parametre(PrmCreer())
self.ajouter_parametre(PrmEdit())
self.ajouter_parametre(PrmInstaller())
self.ajouter_parametre(PrmListe())
self.ajouter_parametre(PrmRetirer())
|
{
"content_hash": "692fcf17215c00a995fd0c7ee6b3a84a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 69,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.6555023923444976,
"repo_name": "stormi/tsunami",
"id": "74f4893b1ddf31aa4dd039db55d65cc0778999eb",
"size": "2618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/salle/commandes/decor/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name="covertrack",
version="0.1",
packages=find_packages(),
# packages=["covertrack"],
author='Takamasa Kudo',
author_email='kudo@stanford.edu',
license="MIT License",
entry_points={
"console_scripts": [
"covertrack=covertrack.caller:main",
],
}
)
|
{
"content_hash": "ba71d24a21c40232a120fbb4d2874d69",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 48,
"avg_line_length": 19.31578947368421,
"alnum_prop": 0.5967302452316077,
"repo_name": "braysia/covertrack",
"id": "ce19b63125f309a4ad4caffad95531fdcdb03bce",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "682275"
},
{
"name": "Python",
"bytes": "250180"
},
{
"name": "Shell",
"bytes": "112"
}
],
"symlink_target": ""
}
|
import jobinfo
__author__ = 'Masataka'
class JobMaya(jobinfo.JobInfo):
def __init__(self, param):
jobinfo.JobInfo.__init__(self, param)
class JobMayaSw(JobMaya):
def __init__(self, param):
JobMaya.__init__(self, param)
class JobMayaMr(JobMaya):
def __init__(self, param):
JobMaya.__init__(self, param)
class JobMayaFile(JobMaya):
def __init__(self, param):
JobMaya.__init__(self, param)
|
{
"content_hash": "72794b4c76c4d68cf9782a07ea47b2e3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 45,
"avg_line_length": 19.304347826086957,
"alnum_prop": 0.6058558558558559,
"repo_name": "plinecom/JobManager",
"id": "08ac5e969f53c24a9067c0241fb770d0d291d6ae",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "job/_maya.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59376"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateAuthenticatedPost(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateAuthenticatedPost Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateAuthenticatedPost, self).__init__(temboo_session, '/Library/Disqus/Posts/CreateAuthenticatedPost')
def new_input_set(self):
return CreateAuthenticatedPostInputSet()
def _make_result_set(self, result, path):
return CreateAuthenticatedPostResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateAuthenticatedPostChoreographyExecution(session, exec_id, path)
class CreateAuthenticatedPostInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateAuthenticatedPost
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) A valid OAuth 2.0 access token.)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('AccessToken', value)
def set_Date(self, value):
"""
Set the value of the Date input for this Choreo. ((optional, string) The date of the post, either in Unix timestamp format, or ISO datetime standard. You must be a moderator to do this.)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('Date', value)
def set_IPAddress(self, value):
"""
Set the value of the IPAddress input for this Choreo. ((optional, string) The author's IP address. You must be a moderator to do this.)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('IPAddress', value)
def set_ParentPost(self, value):
"""
Set the value of the ParentPost input for this Choreo. ((conditional, string) The ID of a parent post to which the new post will be responding to. Either ParentPost, or Thread must be set, or both.)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('ParentPost', value)
def set_PostContent(self, value):
"""
Set the value of the PostContent input for this Choreo. ((required, string) The text of this post.)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('PostContent', value)
def set_PostState(self, value):
"""
Set the value of the PostState input for this Choreo. ((optional, string) Specify the state of the post (comment). Available options include: unapproved, approved, spam, killed. You must be a moderator to do this. If set, pre-approval validation will be skipped.)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('PostState', value)
def set_PublicKey(self, value):
"""
Set the value of the PublicKey input for this Choreo. ((required, string) The Public Key provided by Disqus (AKA the API Key).)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('PublicKey', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and jsonp.)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('ResponseFormat', value)
def set_ThreadID(self, value):
"""
Set the value of the ThreadID input for this Choreo. ((conditional, string) The thread ID to attach the new post to. Either ParentPost, or Thread must be set, or both.)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('ThreadID', value)
def set_Thread(self, value):
"""
Set the value of the Thread input for this Choreo. ((conditional, string) Deprecated (retained for backward compatibility only).)
"""
super(CreateAuthenticatedPostInputSet, self)._set_input('Thread', value)
class CreateAuthenticatedPostResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateAuthenticatedPost Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Disqus.)
"""
return self._output.get('Response', None)
class CreateAuthenticatedPostChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateAuthenticatedPostResultSet(response, path)
|
{
"content_hash": "763604f2e4e79e18eac392e0ec10298a",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 271,
"avg_line_length": 49.42156862745098,
"alnum_prop": 0.6964887919063678,
"repo_name": "jordanemedlock/psychtruths",
"id": "fbd217b512a78870ea5b9dae326ba8e9908c2a08",
"size": "5915",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/Library/Disqus/Posts/CreateAuthenticatedPost.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/weapon/shared_min_damage_intensifier_mk1.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","min_damage_intensifier_mk1")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "289447198fdf20298ca622dc0b66b5f4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 94,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.7175792507204611,
"repo_name": "anhstudios/swganh",
"id": "43653e5d14b188ddf4357a5dd9e9e396ed0338b2",
"size": "492",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/crafted/weapon/shared_min_damage_intensifier_mk1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
PROXY = {"http": "http://proxy.iiit.ac.in:8080/",
"https": "http://proxy.iiit.ac.in:8080/"}
user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5"
import requests
import gevent
from gevent import queue, monkey
from gevent.pool import Group
monkey.patch_all(thread=False)
count = 1
def fetch(pid):
global count
while True:
tmp = requests.get("https://www.codechef.com/recent/user?user_handle=tryingtocode&page=4",
headers={"User-Agent": user_agent},
proxies=PROXY)
count += 1
if tmp.status_code == 200:
break
print tmp
def synchronous():
for i in range(1,100):
fetch(i)
def asynchronous():
threads = []
for i in xrange(100):
threads.append(gevent.spawn(fetch, i))
gevent.joinall(threads)
print('Synchronous:')
synchronous()
print('Asynchronous:')
asynchronous()
print count
|
{
"content_hash": "870410a4044c060cd3144b95d275d0d6",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 163,
"avg_line_length": 35.435483870967744,
"alnum_prop": 0.6927628584433319,
"repo_name": "stopstalk/stopstalk-deployment",
"id": "f71e041b4adf8e3e90f5927ee502d16b93e75667",
"size": "2197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "private/scripts/extras/parallel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "CSS",
"bytes": "83271"
},
{
"name": "Cython",
"bytes": "123663"
},
{
"name": "HTML",
"bytes": "190175"
},
{
"name": "JavaScript",
"bytes": "681456"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Makefile",
"bytes": "98"
},
{
"name": "Python",
"bytes": "7648306"
},
{
"name": "SCSS",
"bytes": "79489"
},
{
"name": "Shell",
"bytes": "6187"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
import unittest
import six
import mock
from webtest import TestApp
from pyramid import testing
from pyramid.config import Configurator
from pyramid.url import route_path
from pyramid.response import Response
from nefertari.view import BaseView
from nefertari.renderers import _JSONEncoder
def get_test_view_class(name=''):
class View(BaseView):
_json_encoder = _JSONEncoder
Model = mock.Mock(__name__='Foo')
def __init__(self, *a, **k):
BaseView.__init__(self, *a, **k)
# turning off before and after calls
self._before_calls = {}
self._after_calls = {}
def index(self, **a):
return Response(name + 'index')
def show(self, **a):
return Response(name + 'show')
def delete(self, **a):
return Response(name + 'delete')
def __getattr__(self, attr):
return lambda *a, **k: Response(name + attr)
def convert_ids2objects(self, *args, **kwargs):
pass
def fill_null_values(self, *args, **kwargs):
pass
return View
def _create_config():
config = Configurator(autocommit=True)
config.include('nefertari')
return config
class Test(unittest.TestCase):
def setUp(self):
self.config = _create_config()
self.config.begin()
def tearDown(self):
self.config.end()
del self.config
class DummyCrudView(object):
_json_encoder = _JSONEncoder
def __init__(self, request):
self.request = request
def index(self, **a):
return Response('index')
def show(self, **a):
return Response('show')
def delete(self, **a):
return Response('delete')
def __getattr__(self, attr):
return lambda *a, **kw: Response(attr)
class TestResourceGeneration(Test):
def test_get_resource_map(self):
from nefertari.resource import get_resource_map
request = mock.Mock()
assert get_resource_map(request) == request.registry._resources_map
def test_basic_resources(self):
from nefertari.resource import add_resource_routes
add_resource_routes(self.config, DummyCrudView, 'message', 'messages')
self.assertEqual(
'/messages',
route_path('messages', testing.DummyRequest())
)
self.assertEqual(
'/messages/1',
route_path('message', testing.DummyRequest(), id=1)
)
def test_resources_with_path_prefix(self):
from nefertari.resource import add_resource_routes
add_resource_routes(
self.config,
DummyCrudView,
'message',
'messages',
path_prefix='/category/{category_id}'
)
self.assertEqual(
'/category/2/messages',
route_path('messages', testing.DummyRequest(), category_id=2)
)
self.assertEqual(
'/category/2/messages/1',
route_path('message', testing.DummyRequest(), id=1, category_id=2)
)
def test_resources_with_path_prefix_with_trailing_slash(self):
from nefertari.resource import add_resource_routes
add_resource_routes(
self.config,
DummyCrudView,
'message',
'messages',
path_prefix='/category/{category_id}/'
)
self.assertEqual(
'/category/2/messages',
route_path('messages', testing.DummyRequest(), category_id=2)
)
self.assertEqual(
'/category/2/messages/1',
route_path('message', testing.DummyRequest(), id=1, category_id=2)
)
def test_resources_with_name_prefix(self):
from nefertari.resource import add_resource_routes
add_resource_routes(
self.config,
DummyCrudView,
'message',
'messages',
name_prefix="special_"
)
self.assertEqual(
'/messages/1',
route_path('special_message', testing.DummyRequest(), id=1)
)
def test_resources_with_name_prefix_from_config(self):
from nefertari.resource import add_resource_routes
self.config.route_prefix = 'api'
add_resource_routes(
self.config,
DummyCrudView,
'message',
'messages',
name_prefix='foo_'
)
self.assertEqual(
'/api/messages/1',
route_path('api_foo_message', testing.DummyRequest(), id=1)
)
class DummyCrudRenderedView(object):
_json_encoder = _JSONEncoder
def __init__(self, request):
self.request = request
def __getattr__(self, attr):
return lambda *a, **kw: attr
class TestResourceRecognition(Test):
def setUp(self):
from nefertari.resource import add_resource_routes
self.config = _create_config()
add_resource_routes(
self.config,
DummyCrudRenderedView,
'message',
'messages',
renderer='string'
)
self.config.begin()
self.app = TestApp(self.config.make_wsgi_app())
self.collection_path = '/messages'
self.collection_name = 'messages'
self.member_path = '/messages/{id}'
self.member_name = 'message'
def test_get_collection(self):
self.assertEqual(self.app.get('/messages').body, six.b('index'))
def test_get_collection_json(self):
from nefertari.resource import add_resource_routes
add_resource_routes(
self.config,
DummyCrudRenderedView,
'message',
'messages',
renderer='json'
)
self.assertEqual(self.app.get('/messages').body, six.b('"index"'))
def test_get_collection_nefertari_json(self):
from nefertari.resource import add_resource_routes
add_resource_routes(
self.config,
DummyCrudRenderedView,
'message',
'messages',
renderer='nefertari_json'
)
self.assertEqual(self.app.get('/messages').body, six.b('"index"'))
def test_get_collection_no_renderer(self):
from nefertari.resource import add_resource_routes
add_resource_routes(
self.config, DummyCrudRenderedView, 'message', 'messages')
self.assertRaises(ValueError, self.app.get, '/messages')
def test_post_collection(self):
result = self.app.post('/messages').body
self.assertEqual(result, six.b('create'))
def test_head_collection(self):
response = self.app.head('/messages')
self.assertEqual(response.body, six.b(''))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.headers)
def test_get_member(self):
result = self.app.get('/messages/1').body
self.assertEqual(result, six.b('show'))
def test_head_member(self):
response = self.app.head('/messages/1')
self.assertEqual(response.body, six.b(''))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.headers)
def test_put_member(self):
result = self.app.put('/messages/1').body
self.assertEqual(result, six.b('replace'))
def test_patch_member(self):
result = self.app.patch('/messages/1').body
self.assertEqual(result, six.b('update'))
def test_delete_member(self):
result = self.app.delete('/messages/1').body
self.assertEqual(result, six.b('delete'))
class TestResource(Test):
def test_get_default_view_path(self, *a):
from nefertari.resource import Resource, get_default_view_path
m = Resource(
self.config,
member_name='group_member',
collection_name='group_members'
)
self.assertEqual(
"test_resource.views.group_members:GroupMembersView",
get_default_view_path(m)
)
# singular
m = Resource(self.config, member_name='group_member')
self.assertEqual(
"test_resource.views.group_member:GroupMemberView",
get_default_view_path(m)
)
def test_get_default_view_path_resource_prefix(self, *a):
from nefertari.resource import Resource, get_default_view_path
m = Resource(
self.config,
member_name='group_member',
collection_name='group_members'
)
m.prefix = 'foo'
self.assertEqual(
"test_resource.views.foo_group_members:FooGroupMembersView",
get_default_view_path(m)
)
# singular
m = Resource(self.config, member_name='group_member')
m.prefix = 'foo'
self.assertEqual(
"test_resource.views.foo_group_member:FooGroupMemberView",
get_default_view_path(m)
)
@mock.patch('nefertari.view.trigger_events')
def test_singular_resource(self, *a):
View = get_test_view_class()
config = _create_config()
root = config.get_root_resource()
root.add('thing', view=View)
grandpa = root.add('grandpa', 'grandpas', view=View)
wife = grandpa.add('wife', view=View, renderer='string')
wife.add('child', 'children', view=View)
config.begin()
app = TestApp(config.make_wsgi_app())
self.assertEqual(
'/grandpas/1/wife',
route_path('grandpa:wife', testing.DummyRequest(), grandpa_id=1)
)
self.assertEqual(
'/grandpas/1',
route_path('grandpa', testing.DummyRequest(), id=1)
)
self.assertEqual(
'/grandpas/1/wife/children/2',
route_path('grandpa_wife:child', testing.DummyRequest(),
grandpa_id=1, id=2)
)
self.assertEqual(app.put('/grandpas').body, six.b('update_many'))
self.assertEqual(app.head('/grandpas').body, six.b(''))
self.assertEqual(app.options('/grandpas').body, six.b(''))
self.assertEqual(app.delete('/grandpas/1').body, six.b('delete'))
self.assertEqual(app.head('/grandpas/1').body, six.b(''))
self.assertEqual(app.options('/grandpas/1').body, six.b(''))
self.assertEqual(app.put('/thing').body, six.b('replace'))
self.assertEqual(app.patch('/thing').body, six.b('update'))
self.assertEqual(app.delete('/thing').body, six.b('delete'))
self.assertEqual(app.head('/thing').body, six.b(''))
self.assertEqual(app.options('/thing').body, six.b(''))
self.assertEqual(app.put('/grandpas/1/wife').body, six.b('replace'))
self.assertEqual(app.patch('/grandpas/1/wife').body, six.b('update'))
self.assertEqual(app.delete('/grandpas/1/wife').body, six.b('delete'))
self.assertEqual(app.head('/grandpas/1/wife').body, six.b(''))
self.assertEqual(app.options('/grandpas/1/wife').body, six.b(''))
self.assertEqual(six.b('show'), app.get('/grandpas/1').body)
self.assertEqual(six.b('show'), app.get('/grandpas/1/wife').body)
self.assertEqual(
six.b('show'), app.get('/grandpas/1/wife/children/1').body)
@mock.patch('nefertari.view.trigger_events')
def test_renderer_override(self, *args):
# resource.renderer and view._default_renderer are only used
# when accept header is missing.
View = get_test_view_class()
config = _create_config()
r = config.get_root_resource()
r.add('thing', 'things', renderer='json', view=View)
r.add('2thing', '2things', renderer='json', view=View)
r.add('3thing', '3things', view=View) # defaults to nefertari_json
config.begin()
app = TestApp(config.make_wsgi_app())
# no headers, user renderer==string.returns string
self.assertEqual(six.b('index'), app.get('/things').body)
# header is sting, renderer is string. returns string
self.assertEqual(six.b('index'), app.get('/things',
headers={'ACCEPT': 'text/plain'}).body)
# header is json, renderer is string. returns json
self.assertEqual(six.b('index'), app.get('/things',
headers={'ACCEPT': 'application/json'}).body)
# no header. returns json
self.assertEqual(six.b('index'), app.get('/2things').body)
# header==json, renderer==json, returns json
self.assertEqual(six.b('index'), app.get('/2things',
headers={'ACCEPT': 'application/json'}).body)
# header==text, renderer==json, returns string
self.assertEqual(six.b("index"), app.get('/2things',
headers={'ACCEPT': 'text/plain'}).body)
# no header, no renderer. uses default_renderer, returns
# View._default_renderer==nefertari_json
self.assertEqual(six.b('index'), app.get('/3things').body)
self.assertEqual(six.b('index'), app.get('/3things',
headers={'ACCEPT': 'application/json'}).body)
self.assertEqual(six.b('index'), app.get('/3things',
headers={'ACCEPT': 'text/plain'}).body)
# bad accept.defaults to json
self.assertEqual(six.b('index'), app.get('/3things',
headers={'ACCEPT': 'text/blablabla'}).body)
@mock.patch('nefertari.view.trigger_events')
def test_nonBaseView_default_renderer(self, *a):
config = _create_config()
r = config.get_root_resource()
r.add('ything', 'ythings', view=get_test_view_class())
config.begin()
app = TestApp(config.make_wsgi_app())
self.assertEqual(six.b('index'), app.get('/ythings').body)
@mock.patch('nefertari.view.trigger_events')
def test_nested_resources(self, *a):
config = _create_config()
root = config.get_root_resource()
aa = root.add('a', 'as', view=get_test_view_class('A'))
bb = aa.add('b', 'bs', view=get_test_view_class('B'))
cc = bb.add('c', 'cs', view=get_test_view_class('C'))
cc.add('d', 'ds', view=get_test_view_class('D'))
config.begin()
app = TestApp(config.make_wsgi_app())
app.get('/as/1/bs/2/cs/3/ds/4')
def test_add_resource_prefix(self, *a):
config = _create_config()
root = config.get_root_resource()
resource = root.add(
'message', 'messages',
view=get_test_view_class('A'),
prefix='api')
assert resource.uid == 'api:message'
config.begin()
self.assertEqual(
'/api/messages',
route_path('api:messages', testing.DummyRequest())
)
def test_add_resource_view_args(self, *a):
config = _create_config()
root = config.get_root_resource()
view = get_test_view_class('A')
assert not hasattr(view, 'foo')
root.add('message', 'messages', view=view,
view_args={'foo': 'bar'})
assert view.foo == 'bar'
def test_nested_resource_id_name(self, *a):
config = _create_config()
root = config.get_root_resource()
aa = root.add(
'a', 'as', view=get_test_view_class('A'),
id_name='super_id')
aa.add('b', 'bs', view=get_test_view_class('B'))
config.begin()
self.assertEqual(
'/as/1/bs',
route_path('a:bs', testing.DummyRequest(), super_id=1)
)
# @mock.patch('nefertari.resource.add_tunneling')
class TestMockedResource(Test):
def test_get_root_resource(self, *args):
from nefertari.resource import Resource
root = self.config.get_root_resource()
w = root.add('whatver', 'whatevers', view=get_test_view_class())
self.assertIsInstance(root, Resource)
self.assertIsInstance(w, Resource)
self.assertEqual(root, self.config.get_root_resource())
def test_resource_repr(self, *args):
r = self.config.get_root_resource()
bl = r.add('blabla', view=get_test_view_class())
assert "Resource(uid='blabla')" == str(bl)
def test_resource_exists(self, *a):
r = self.config.get_root_resource()
r.add('blabla', view=get_test_view_class())
self.assertRaises(ValueError, r.add, 'blabla')
def test_get_ancestors(self, *args):
from nefertari.resource import Resource
m = Resource(self.config)
self.assertEqual([], m.ancestors)
gr = m.add('grandpa', 'grandpas', view=get_test_view_class())
pa = m.add('parent', 'parents', parent=gr, view=get_test_view_class())
ch = m.add('child', 'children', parent=pa, view=get_test_view_class())
self.assertListEqual([gr, pa], ch.ancestors)
def test_resource_uid(self, *arg):
from nefertari.resource import Resource
m = Resource(self.config)
self.assertEqual(m.uid, '')
a = m.add('a', 'aa', view=get_test_view_class())
self.assertEqual('a', a.uid)
c = a.add('b', 'bb', view=get_test_view_class()).add(
'c', 'cc', view=get_test_view_class())
self.assertEqual('a:b:c', c.uid)
@mock.patch('nefertari.resource.add_resource_routes')
def test_add_resource_routes(self, *arg):
from nefertari.resource import Resource
View = get_test_view_class()
m_add_resource_routes = arg[0]
m = Resource(self.config)
g = m.add('grandpa', 'grandpas', view=View)
m_add_resource_routes.assert_called_once_with(
self.config,
View,
'grandpa',
'grandpas',
factory=None,
http_cache=0,
auth=False,
renderer=View._default_renderer,
path_prefix=''
)
pr = g.add('parent', 'parents', view=View)
m_add_resource_routes.assert_called_with(
self.config,
View,
'parent',
'parents',
factory=None,
http_cache=0,
auth=False,
path_prefix='grandpas/{grandpa_id}',
name_prefix='grandpa:',
renderer=View._default_renderer
)
ch = pr.add('child', 'children', view=View)
m_add_resource_routes.assert_called_with(
self.config,
View,
'child',
'children',
factory=None,
http_cache=0,
auth=False,
path_prefix='grandpas/{grandpa_id}/parents/{parent_id}',
name_prefix='grandpa_parent:',
renderer=View._default_renderer
)
self.assertEqual(ch.uid, 'grandpa:parent:child')
@mock.patch('nefertari.resource.add_resource_routes')
def test_add_resource_routes_with_parent_param(self, *arg):
from nefertari.resource import Resource
View = get_test_view_class()
m_add_resource_routes = arg[0]
m = Resource(self.config)
m.add('grandpa', 'grandpas', view=View)
m.add('parent', 'parents', parent='grandpa', view=View)
m_add_resource_routes.assert_called_with(
self.config,
View,
'parent',
'parents',
factory=None,
auth=False,
http_cache=0,
path_prefix='grandpas/{grandpa_id}',
name_prefix='grandpa:',
renderer='nefertari_json',
)
gm = m.add('grandma', 'grandmas', view=View)
pa = m.add('parent', 'parents', parent=gm, view=View)
m_add_resource_routes.assert_called_with(
self.config,
View,
'parent',
'parents',
factory=None,
auth=False,
http_cache=0,
path_prefix='grandmas/{grandma_id}',
name_prefix='grandma:',
renderer=View._default_renderer,
)
pa.add('child', 'children', parent='grandpa:parent', view=View)
m_add_resource_routes.assert_called_with(
self.config,
View,
'child',
'children',
factory=None,
auth=False,
http_cache=0,
path_prefix='grandpas/{grandpa_id}/parents/{parent_id}',
name_prefix='grandpa_parent:',
renderer=View._default_renderer
)
@mock.patch('nefertari.resource.add_resource_routes')
def test_add_resource_routes_from(self, *args):
View = get_test_view_class()
root = self.config.get_root_resource()
gm = root.add('grandma', 'grandmas', view=View)
pa = gm.add('parent', 'parents', view=View)
boy = pa.add('boy', 'boys', view=View)
boy.add('child', 'children', view=View)
girl = pa.add('girl', 'girls', view=View)
self.assertEqual(len(root.resource_map), 5)
gp = root.add('grandpa', 'grandpas', view=View)
gp.add_from_child(pa, view=View)
self.assertEqual(
pa.children[0],
root.resource_map['grandma:parent:boy']
)
self.assertEqual(
gp.children[0].children[1],
root.resource_map['grandpa:parent:girl']
)
self.assertEqual(len(root.resource_map), 10)
# make sure these are not same objects but copies.
self.assertNotEqual(girl, gp.children[0].children[1])
|
{
"content_hash": "a4c1d71cb5b7a9398086ba4143278a2d",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 78,
"avg_line_length": 32.37556561085973,
"alnum_prop": 0.5698579082226881,
"repo_name": "postatum/nefertari",
"id": "e1586f36ab76d127ff762f5be4f80ba848c6f577",
"size": "21465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "352754"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe.integrations.doctype.social_login_key.social_login_key import BaseUrlNotSetError
import unittest
class TestSocialLoginKey(unittest.TestCase):
def test_adding_frappe_social_login_provider(self):
provider_name = "Frappe"
social_login_key = make_social_login_key(
social_login_provider=provider_name
)
social_login_key.get_social_login_provider(provider_name, initialize=True)
self.assertRaises(BaseUrlNotSetError, social_login_key.insert)
def make_social_login_key(**kwargs):
kwargs["doctype"] = "Social Login Key"
if not "provider_name" in kwargs:
kwargs["provider_name"] = "Test OAuth2 Provider"
doc = frappe.get_doc(kwargs)
return doc
def create_or_update_social_login_key():
# used in other tests (connected app, oauth20)
try:
social_login_key = frappe.get_doc("Social Login Key", "frappe")
except frappe.DoesNotExistError:
social_login_key = frappe.new_doc("Social Login Key")
social_login_key.get_social_login_provider("Frappe", initialize=True)
social_login_key.base_url = frappe.utils.get_url()
social_login_key.enable_social_login = 0
social_login_key.save()
frappe.db.commit()
return social_login_key
|
{
"content_hash": "c6fe6e123bc6ecd051236a39bd3314bd",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 92,
"avg_line_length": 34.628571428571426,
"alnum_prop": 0.7615511551155115,
"repo_name": "vjFaLk/frappe",
"id": "8a71045ff12c627e37b7b65d144d4945b4a08f9f",
"size": "1313",
"binary": false,
"copies": "1",
"ref": "refs/heads/parsimony-production",
"path": "frappe/integrations/doctype/social_login_key/test_social_login_key.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "290337"
},
{
"name": "HTML",
"bytes": "179507"
},
{
"name": "JavaScript",
"bytes": "2179734"
},
{
"name": "Less",
"bytes": "146135"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2774237"
},
{
"name": "SCSS",
"bytes": "15721"
},
{
"name": "Shell",
"bytes": "3875"
},
{
"name": "Vue",
"bytes": "95109"
}
],
"symlink_target": ""
}
|
""" Interface to the C++ Murtagh hierarchic clustering code
"""
from rdkit.ML.Cluster import Clusters
from rdkit.ML.Cluster.Clustering import MurtaghCluster, MurtaghDistCluster
import numpy
# constants to select the clustering algorithm
WARDS = 1
SLINK = 2
CLINK = 3
UPGMA = 4
MCQUITTY = 5
GOWER = 6
CENTROID = 7
# descriptions of the methods:
methods = [
("Ward's Minimum Variance", WARDS, "Ward's Minimum Variance"),
('Average Linkage', UPGMA, 'Group Average Linkage (UPGMA)'),
('Single Linkage', SLINK, 'Single Linkage (SLINK)'),
('Complete Linkage', CLINK, 'Complete Linkage (CLINK)'),
# ("McQuitty",MCQUITTY,"McQuitty's method"),
# ("Gower",GOWER,"Gower's median method"),
("Centroid", CENTROID, "Centroid method"),
]
def _LookupDist(dists, i, j, n):
""" *Internal Use Only*
returns the distance between points i and j in the symmetric
distance matrix _dists_
"""
if i == j:
return 0.0
if i > j:
i, j = j, i
return dists[j * (j - 1) / 2 + i]
def _ToClusters(data, nPts, ia, ib, crit, isDistData=0):
""" *Internal Use Only*
Converts the results of the Murtagh clustering code into
a cluster tree, which is returned in a single-entry list
"""
cs = [None] * nPts
for i in range(nPts):
cs[i] = Clusters.Cluster(metric=0.0, data=i, index=(i + 1))
nClus = len(ia) - 1
for i in range(nClus):
idx1 = ia[i] - 1
idx2 = ib[i] - 1
c1 = cs[idx1]
c2 = cs[idx2]
newClust = Clusters.Cluster(metric=crit[i], children=[c1, c2], index=nPts + i + 1)
cs[idx1] = newClust
return [newClust]
def ClusterData(data, nPts, method, isDistData=0):
""" clusters the data points passed in and returns the cluster tree
**Arguments**
- data: a list of lists (or array, or whatever) with the input
data (see discussion of _isDistData_ argument for the exception)
- nPts: the number of points to be used
- method: determines which clustering algorithm should be used.
The defined constants for these are:
'WARDS, SLINK, CLINK, UPGMA'
- isDistData: set this toggle when the data passed in is a
distance matrix. The distance matrix should be stored
symmetrically so that _LookupDist (above) can retrieve
the results:
for i<j: d_ij = dists[j*(j-1)/2 + i]
**Returns**
- a single entry list with the cluster tree
"""
data = numpy.array(data)
if not isDistData:
sz = data.shape[1]
ia, ib, crit = MurtaghCluster(data, nPts, sz, method)
else:
ia, ib, crit = MurtaghDistCluster(data, nPts, method)
c = _ToClusters(data, nPts, ia, ib, crit, isDistData=isDistData)
return c
|
{
"content_hash": "2d375c1b0218c5b580abdd45b4fa2516",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 86,
"avg_line_length": 27.1010101010101,
"alnum_prop": 0.6470368989936638,
"repo_name": "jandom/rdkit",
"id": "852d2b53a3472f3dd78f9f2d530d4c04de7ef9c6",
"size": "2981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdkit/ML/Cluster/Murtagh.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "226290"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7893576"
},
{
"name": "CMake",
"bytes": "611439"
},
{
"name": "CSS",
"bytes": "3231"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "63047"
},
{
"name": "Java",
"bytes": "291815"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "29594"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15435"
},
{
"name": "Objective-C",
"bytes": "298"
},
{
"name": "Python",
"bytes": "3119784"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "12651"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49429"
}
],
"symlink_target": ""
}
|
"""A distributed computation library for TF.
See [tensorflow/contrib/distribute/README.md](
https://www.tensorflow.org/code/tensorflow/contrib/distribute/README.md)
for overview and examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.distribute.python.collective_all_reduce_strategy import CollectiveAllReduceStrategy
from tensorflow.contrib.distribute.python.mirrored_strategy import MirroredStrategy
from tensorflow.contrib.distribute.python.monitor import Monitor
from tensorflow.contrib.distribute.python.one_device_strategy import OneDeviceStrategy
from tensorflow.contrib.distribute.python.parameter_server_strategy import ParameterServerStrategy
from tensorflow.contrib.distribute.python.step_fn import *
from tensorflow.contrib.distribute.python.tpu_strategy import TPUStrategy
from tensorflow.python.distribute.cross_device_ops import *
from tensorflow.python.distribute.distribute_config import DistributeConfig
from tensorflow.python.distribute.distribute_coordinator import run_standard_tensorflow_server
from tensorflow.python.training.distribute import *
from tensorflow.python.training.distribution_strategy_context import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'AllReduceCrossDeviceOps',
'CollectiveAllReduceStrategy',
'CrossDeviceOps',
'DistributeConfig',
'DistributionStrategy',
'DistributionStrategyExtended',
'MirroredStrategy',
'Monitor',
'MultiWorkerAllReduce',
'OneDeviceStrategy',
'ParameterServerStrategy',
'ReductionToOneDeviceCrossDeviceOps',
'Step',
'StandardInputStep',
'StandardSingleLossStep',
'ReplicaContext',
'TPUStrategy',
'get_cross_replica_context',
'get_distribution_strategy',
'get_loss_reduction',
'get_replica_context',
'has_distribution_strategy',
'in_cross_replica_context',
'require_replica_context',
'run_standard_tensorflow_server',
'UpdateContext',
]
remove_undocumented(__name__, _allowed_symbols)
|
{
"content_hash": "1857f9d788aad565bcd9c8e4f191bf59",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 107,
"avg_line_length": 37.03448275862069,
"alnum_prop": 0.7886405959031657,
"repo_name": "brchiu/tensorflow",
"id": "8ec73654e30e4967f318c558ba94301e84a206e4",
"size": "2837",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "473950"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "51674376"
},
{
"name": "CMake",
"bytes": "199085"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285435"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "875500"
},
{
"name": "Jupyter Notebook",
"bytes": "2623054"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "63390"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41718475"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "490100"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import os
import pandas
import unittest
import inspect
from px4tools.analysis import *
from px4tools.mapping import *
from px4tools.ulog import *
have_control = False
try:
from px4tools.logsysid import *
have_control = True
except ImportError as e:
print(e)
TEST_PATH = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
class Test(unittest.TestCase):
def test_iekf_extract_P(self):
dt = 0.1
filename = os.path.join(TEST_PATH, 'log', 'vx0_vy1_vz0_alt3.ulg')
print("filename{:s}".format(filename))
data = read_ulog(filename).concat(dt=dt)
data = compute_data(data)
res = estimator_analysis(data, plot=False)
covariance_list = extract_P(
data, msg_name='t_estimator_status_0__f_covariances_', num_states=28)
def test_process_data(self):
filename = os.path.join(TEST_PATH, 'log', '01_07_59.csv')
print("filename: {:s}".format(filename))
with open(filename, 'r') as f:
data = process_data(pandas.read_csv(f))
data = process_lpe_health(data)
data = project_lat_lon(data)
data = process_lpe_health(data)
find_meas_period(data['LPOS_VX'])
# all_new_sample(data['LPOS_VX'])
new_sample(data['LPOS_VX'])
find_lpe_gains(data, printing=True)
set_time_series(data)
get_auto_data(data)
get_float_data(data)
isfloatarray(data['LPOS_VX'])
octa_cox_data_to_ss(data)
# fails on miniconda, windows
# filter_finite(data)
@unittest.skip("skip sysid, controllib issue")
def test_logsysid(self):
if not have_control:
return
filename = os.path.join(TEST_PATH, 'log', '01_07_59.csv')
print("filename: {:s}".format(filename))
with open(filename, 'r') as f:
log_data = pandas.read_csv(f)
gains = control_design(log_data)
@unittest.skip("skip plotting test for CI")
def test_plotting(self):
filename = os.path.join(TEST_PATH, 'log', '01_07_59.csv')
print("filename: {:s}".format(filename))
with open(filename, 'r') as f:
data = process_data(pandas.read_csv(f))
data = process_lpe_health(data)
data = project_lat_lon(data)
alt_analysis(data)
statistics(data, ['LPOS_VX'], plot=True)
data = process_lpe_health(data)
plot_modes(data)
find_meas_period(data['LPOS_VX'])
plot_control_loops(data)
plot_position_loops(data)
plot_velocity_loops(data)
plot_attitude_rate_loops(data)
plot_attitude_loops(data)
plot_faults(data)
pos_analysis(data)
|
{
"content_hash": "167f68c238429cd976da31698323f179",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 81,
"avg_line_length": 35.2875,
"alnum_prop": 0.581650726177825,
"repo_name": "dronecrew/px4tools",
"id": "91dcbf6b2a0a9d1dfd65c6f1020f280b128d5d40",
"size": "2823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_analysis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "182319"
}
],
"symlink_target": ""
}
|
from haystack import indexes
from speeches.models import Section, Speech
# @note We want to use decay functions to decrease the relevance of older
# speeches, but Haystack doesn't support scoring out-of-the-box.
# @see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
# @see http://www.stamkracht.com/extending-haystacks-elasticsearch-backend/
class SpeechIndex(indexes.SearchIndex, indexes.Indexable):
# @see http://django-haystack.readthedocs.org/en/latest/searchfield_api.html
text = indexes.CharField(document=True, model_attr='text')
title = indexes.CharField(model_attr='title', boost=1.5)
speaker = indexes.IntegerField(model_attr='speaker_id', null=True)
start_date = indexes.DateTimeField(model_attr='start_date', null=True)
# @see http://django-haystack.readthedocs.org/en/latest/searchindex_api.html
# @see https://github.com/toastdriven/django-haystack/blob/master/haystack/indexes.py
def get_model(self):
return Speech
def index_queryset(self, using=None):
return self.get_model()._default_manager.select_related('section')
def load_all_queryset(self):
"""
Reduce the number of SQL queries to render search results. We might
alternatively store a rendered result or more fields in ElasticSearch.
"""
return self.get_model()._default_manager.all().prefetch_related('speaker', 'section', 'section__parent', 'section__parent__parent')
# @see http://django-haystack.readthedocs.org/en/latest/boost.html
def prepare(self, obj):
"""
Decrease the relevance of written notices, of narrative and of speeches
by anonymous speakers and by roles, including the Speaker, the Sergeant-
at-Arms and the clerks.
A narrative may contain the only mention of a bill at its introduction;
therefore, we must include narratives.
"""
data = super(SpeechIndex, self).prepare(obj)
if not obj.speaker_id and obj.speaker_display not in ('THE PREMIER', 'THE LIEUTENANT GOVERNOR', 'THE ADMINISTRATOR'):
data['boost'] = 0.5
elif obj.section.title == 'NOTICES OF MOTION UNDER RULE 32(3)':
data['boost'] = 0.5
return data
def get_updated_field(self):
return 'modified'
class SectionIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='title')
def get_model(self):
return Section
def load_all_queryset(self):
"""
Reduce the number of SQL queries to render search results. We might
alternatively store a rendered result or more fields in ElasticSearch.
"""
return self.get_model()._default_manager.all().prefetch_related('parent', 'parent__parent')
def get_updated_field(self):
return 'modified'
|
{
"content_hash": "8262102d6941cfc09346a2b6d76d58c1",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 139,
"avg_line_length": 45.171875,
"alnum_prop": 0.6911103424420616,
"repo_name": "ciudadanointeligente/openhousens.ca",
"id": "55f164debf73c2f0bf3f9b114bdcd0ebc9f8aa23",
"size": "2891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "legislature/search_indexes.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
opbeat.utils
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011-2015 Opbeat
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from opbeat.utils import six
default_ports = {
"https": 433,
"http": 80,
"postgresql": 5432
}
def varmap(func, var, context=None, name=None):
"""
Executes ``func(key_name, value)`` on all values
recurisively discovering dict and list scoped
values.
"""
if context is None:
context = {}
objid = id(var)
if objid in context:
return func(name, '<...>')
context[objid] = 1
if isinstance(var, dict):
ret = dict((k, varmap(func, v, context, k)) for k, v in six.iteritems(var))
elif isinstance(var, (list, tuple)):
ret = [varmap(func, f, context, name) for f in var]
else:
ret = func(name, var)
del context[objid]
return ret
def disabled_due_to_debug(opbeat_config, debug):
"""
Compares module and app configs to determine whether to log to Opbeat
:param opbeat_config: Dictionary containing module config
:param debug: Boolean denoting app DEBUG state
:return: Boolean True if logging is disabled
"""
return debug and not opbeat_config.get('DEBUG', False)
def get_name_from_func(func):
# If no view was set we ignore the request
module = func.__module__
if hasattr(func, '__name__'):
view_name = func.__name__
else: # Fall back if there's no __name__
view_name = func.__class__.__name__
return '{0}.{1}'.format(module, view_name)
|
{
"content_hash": "c85757146a6d8c89446e2cb8e6329c5e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 83,
"avg_line_length": 26.080645161290324,
"alnum_prop": 0.6196660482374768,
"repo_name": "tarkatronic/opbeat_python",
"id": "3dee433258aa80f02161ddde5b62e5ca99b23cec",
"size": "1617",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opbeat/utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "81877"
},
{
"name": "HTML",
"bytes": "377"
},
{
"name": "Makefile",
"bytes": "135"
},
{
"name": "Python",
"bytes": "407655"
}
],
"symlink_target": ""
}
|
"""
This module provides
"""
from __future__ import unicode_literals, division, print_function
from collections import namedtuple, OrderedDict
from monty.string import is_string
from monty.json import MSONable #, MontyEncoder
from monty.functools import lazy_property
from pymatgen.core.libxcfunc import LibxcFunc
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "3.0.0" # The libxc version used to generate this file!
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2016"
class XcFunc(MSONable):
"""
This object stores information about the XC correlation functional.
Client code usually creates the object by calling the class methods:
- from_name
- from_type_name
or code-specific methods such as:
- from_abinit_ixc
Ax XcFunc instance is hashable and can therefore be used as key in dictionaries.
The implementation is based on the libxc conventions
and is inspired to the XML specification for atomic PAW datasets documented at:
https://wiki.fysik.dtu.dk/gpaw/setups/pawxml.html
For convenience, part of the pawxml documentation is reported here.
The xc_functional element defines the exchange-correlation functional used for
generating the dataset. It has the two attributes type and name.
The type attribute can be LDA, GGA, MGGA or HYB.
The name attribute designates the exchange-correlation functional
and can be specified in the following ways:
[1] Taking the names from the LibXC library. The correlation and exchange names
are stripped from their XC_ part and combined with a + sign.
Here is an example for an LDA functional:
<xc_functional type="LDA", name="LDA_X+LDA_C_PW"/>
and this is what PBE will look like:
<xc_functional type="GGA", name="GGA_X_PBE+GGA_C_PBE"/>
[2] Using one of the following pre-defined aliases:
type name LibXC equivalent Reference
LDA PW LDA_X+LDA_C_PW LDA exchange; Perdew, Wang, PRB 45, 13244 (1992)
GGA PW91 GGA_X_PW91+GGA_C_PW91 Perdew et al PRB 46, 6671 (1992)
GGA PBE GGA_X_PBE+GGA_C_PBE Perdew, Burke, Ernzerhof, PRL 77, 3865 (1996)
GGA RPBE GGA_X_RPBE+GGA_C_PBE Hammer, Hansen, Nørskov, PRB 59, 7413 (1999)
GGA revPBE GGA_X_PBE_R+GGA_C_PBE Zhang, Yang, PRL 80, 890 (1998)
GGA PBEsol GGA_X_PBE_SOL+GGA_C_PBE_SOL Perdew et al, PRL 100, 136406 (2008)
GGA AM05 GGA_X_AM05+GGA_C_AM05 Armiento, Mattsson, PRB 72, 085108 (2005)
GGA BLYP GGA_X_B88+GGA_C_LYP Becke, PRA 38, 3098 (1988); Lee, Yang, Parr, PRB 37, 785
"""
type_name = namedtuple("type_name", "type, name")
xcf = LibxcFunc
defined_aliases = OrderedDict([ # (x, c) --> type_name
# LDAs
((xcf.LDA_X, xcf.LDA_C_PW), type_name("LDA", "PW")), # ixc 7
((xcf.LDA_X, xcf.LDA_C_PW_MOD), type_name("LDA", "PW_MOD")),
((xcf.LDA_X, xcf.LDA_C_PZ), type_name("LDA", "PZ")), # ixc 2
((xcf.LDA_X, xcf.LDA_C_WIGNER), type_name("LDA", "W")), # ixc 4
((xcf.LDA_X, xcf.LDA_C_HL), type_name("LDA", "HL")), # ixc 5
((xcf.LDA_X, xcf.LDA_C_GL), type_name("LDA", "GL")),
((xcf.LDA_X, xcf.LDA_C_VWN), type_name("LDA", "VWN")),
# GGAs
((xcf.GGA_X_PW91, xcf.GGA_C_PW91), type_name("GGA", "PW91")),
((xcf.GGA_X_PBE, xcf.GGA_C_PBE), type_name("GGA", "PBE")),
((xcf.GGA_X_RPBE, xcf.GGA_C_PBE), type_name("GGA", "RPBE")), # ixc 15
((xcf.GGA_X_PBE_R, xcf.GGA_C_PBE), type_name("GGA", "revPBE")), # ixc 14
((xcf.GGA_X_PBE_SOL, xcf.GGA_C_PBE_SOL), type_name("GGA", "PBEsol")),
((xcf.GGA_X_AM05, xcf.GGA_C_AM05), type_name("GGA", "AM05")),
((xcf.GGA_X_B88, xcf.GGA_C_LYP), type_name("GGA", "BLYP")),
])
del type_name
# Correspondence between Abinit ixc notation and libxc notation.
# see: http://www.abinit.org/doc/helpfiles/for-v7.8/input_variables/varbas.html#ixc
# and 42_libpaw/m_pawpsp.F90 for the implementation.
# Fortunately, all the other cases are handled with libxc.
abinitixc_to_libxc = {
1: dict(xc=xcf.LDA_XC_TETER93),
2: dict(x=xcf.LDA_X, c=xcf.LDA_C_PZ), # PZ 001009
4: dict(x=xcf.LDA_X, c=xcf.LDA_C_WIGNER), # W
5: dict(x=xcf.LDA_X, c=xcf.LDA_C_HL), # HL
7: dict(x=xcf.LDA_X, c=xcf.LDA_C_PW), # PW 001012
11: dict(x=xcf.GGA_X_PBE, c=xcf.GGA_C_PBE), # PBE
14: dict(x=xcf.GGA_X_PBE_R, c=xcf.GGA_C_PBE), # revPBE
15: dict(x=xcf.GGA_X_RPBE, c=xcf.GGA_C_PBE), # RPBE
}
del xcf
@classmethod
def aliases(cls):
"""List of registered names."""
return [nt.name for nt in cls.defined_aliases.values()]
@classmethod
def asxc(cls, obj):
"""Convert object into Xcfunc."""
if isinstance(obj, cls): return obj
if is_string(obj): return cls.from_name(obj)
raise TypeError("Don't know how to convert <%s:%s> to Xcfunc" % (type(obj), str(obj)))
@classmethod
def from_abinit_ixc(cls, ixc):
"""Build the object from Abinit ixc (integer)"""
ixc = int(ixc)
if ixc >= 0:
return cls(**cls.abinitixc_to_libxc[ixc])
else:
# libxc notation employed in Abinit: a six-digit number in the form XXXCCC or CCCXXX
#ixc = str(ixc)
#assert len(ixc[1:]) == 6
#first, last = ixc[1:4], ixc[4:]
ixc = abs(ixc)
first = ixc // 1000
last = ixc - first * 1000
x, c = LibxcFunc(int(first)), LibxcFunc(int(last))
if not x.is_x_kind: x, c = c, x # Swap
assert x.is_x_kind and c.is_c_kind
return cls(x=x, c=c)
@classmethod
def from_name(cls, name):
"""Build the object from one of the registered names"""
return cls.from_type_name(None, name)
@classmethod
def from_type_name(cls, typ, name):
"""Build the object from (type, name)."""
# Try aliases first.
for k, nt in cls.defined_aliases.items():
if typ is not None and typ != nt.type: continue
if name == nt.name:
if len(k) == 1: return cls(xc=k)
if len(k) == 2: return cls(x=k[0], c=k[1])
raise ValueError("Wrong key: %s" % k)
# At this point, we should have something in the form
# name="GGA_X_PBE+GGA_C_PBE" or name=""LDA_XC_TETER93"
if "+" in name:
assert typ is None
x, c = (s.strip() for s in name.split("+"))
x, c = LibxcFunc[x], LibxcFunc[c]
return cls(x=x, c=c)
else:
assert typ is None
xc = LibxcFunc[name]
return cls(xc=xc)
if typ is None:
raise ValueError("Cannot find name=%s in defined_aliases" % name)
else:
raise ValueError("Cannot find type=%s, name=%s in defined_aliases" % (typ, name))
@classmethod
def from_dict(cls, d):
"""
Makes XcFunc obey the general json interface used in pymatgen for easier serialization.
"""
return cls(xc=d.get("xc"), x=d.get("x"), c=d.get("c"))
def as_dict(self):
"""
Makes XcFunc obey the general json interface used in pymatgen for easier serialization.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
# print("in as_dict", type(self.x), type(self.c), type(self.xc))
if self.x is not None: d["x"] = self.x.as_dict()
if self.c is not None: d["c"] = self.c.as_dict()
if self.xc is not None: d["xc"] = self.xc.as_dict()
return d
# def to_json(self):
# """
# Returns a json string representation of the MSONable object.
# """
# return json.dumps(self.as_dict()) #, cls=MontyEncoder)
def __init__(self, xc=None, x=None, c=None):
"""
Args:
xc: LibxcFunc for XC functional.
x, c: LibxcFunc for exchange and correlation part. Mutually exclusive with xc.
"""
# Consistency check
if xc is None:
if x is None or c is None:
raise ValueError("x or c must be specified when xc is None")
else:
if x is not None or c is not None:
raise ValueError("x and c should be None when xc is specified")
self.xc, self.x, self.c = xc, x, c
@lazy_property
def type(self):
"""The type of the functional."""
if self.xc in self.defined_aliases: return self.defined_aliases[self.xc].type
xc = (self.x, self.c)
if xc in self.defined_aliases: return self.defined_aliases[xc].type
# If self is not in defined_aliases, use LibxcFunc family
if self.xc is not None: return self.xc.family
return "+".join([self.x.family, self.c.family])
@lazy_property
def name(self):
"""
The name of the functional. If the functional is not found in the aliases,
the string has the form X_NAME+C_NAME
"""
if self.xc in self.defined_aliases: return self.defined_aliases[self.xc].name
xc = (self.x, self.c)
if xc in self.defined_aliases: return self.defined_aliases[xc].name
if self.xc is not None: return self.xc.name
return "+".join([self.x.name, self.c.name])
def __repr__(self):
return "%s" % self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if other is None: return False
if isinstance(other, XcFunc): return self.name == other.name
# assume other is a string
return self.name == other
def __ne__(self, other):
return not self == other
# @property
# def refs(self):
#def info_dict()
# if self.xc is not None:
# return {"xc", self.xc.info_dict}
# else:
# return {"x", self.x.info_dict, "c", self.c.info_dict}
|
{
"content_hash": "1a9177477140d322b38288cc6f93820b",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 105,
"avg_line_length": 39.396153846153844,
"alnum_prop": 0.5768817729180904,
"repo_name": "tallakahath/pymatgen",
"id": "2eb710ad76d9465cfae831eafc3c9b699f953b96",
"size": "10353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/core/xcfunc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "5529281"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
}
|
from yaml import load
import os
DEFAULT_CONFIG_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_DIR = os.environ.get('WW_CONFIG_DIR', DEFAULT_CONFIG_DIR)
CONFIG_PATH = os.path.join(CONFIG_DIR, 'config.yml')
# Open our yaml config and override settings values with it's config
try:
config_file = open(CONFIG_PATH)
for key, value in load(config_file.read()).items():
globals()[key] = value
config_file.close()
except Exception, e:
print "Could not find, or open config located at %s" % CONFIG_PATH
print "The error occured was: %s" % e
|
{
"content_hash": "bccd5385b227955470494f81a6549c84",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 33.705882352941174,
"alnum_prop": 0.6928446771378709,
"repo_name": "osu-cass/working-waterfronts-api",
"id": "32656d8d924097fc8cfcd0ede9d57c0b6874d1d7",
"size": "573",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "working_waterfronts/yaml_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9349"
},
{
"name": "HTML",
"bytes": "24058"
},
{
"name": "Python",
"bytes": "250409"
},
{
"name": "Ruby",
"bytes": "867"
}
],
"symlink_target": ""
}
|
""" Missing statistics functions"""
from _NetworKit import gini
|
{
"content_hash": "fb265b0793d104f30e8fd3a5325d930e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.7538461538461538,
"repo_name": "fmaschler/networkit",
"id": "9a16206ed9db2e4a4f17ce258147ea79a5661a44",
"size": "65",
"binary": false,
"copies": "3",
"ref": "refs/heads/SCD-weighted",
"path": "networkit/stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10112"
},
{
"name": "C++",
"bytes": "2589116"
},
{
"name": "CSS",
"bytes": "16109"
},
{
"name": "HTML",
"bytes": "10110"
},
{
"name": "JavaScript",
"bytes": "4583"
},
{
"name": "Jupyter Notebook",
"bytes": "35441"
},
{
"name": "Matlab",
"bytes": "238"
},
{
"name": "Python",
"bytes": "606841"
},
{
"name": "Shell",
"bytes": "846"
},
{
"name": "TeX",
"bytes": "5547"
}
],
"symlink_target": ""
}
|
import importlib
import os
from . import global_settings as gl
class _Settings:
def __init__(self):
settings = os.environ.get("GLUE_SETTINGS_MODULE")
self.settings = importlib.import_module(settings)
self._setup()
def _setup(self):
for d in dir(self.settings):
if d.isupper():
attr = getattr(self.settings, d)
self.__dict__[d] = attr
for d in dir(gl):
if not d in self.__dict__:
attr = getattr(gl, d)
self.__dict__[d] = attr
if self.CUDA_VISIBLE_DEVICES != None:
os.environ.setdefault("CUDA_VISIBLE_DEVICES", str(self.CUDA_VISIBLE_DEVICES))
def add_arguments(self, parser):
allowed_types = [str, bool, int, float]
for arg in filter(lambda x: x.isupper(), dir(self)):
attr = getattr(self, arg)
arg_type = type(attr)
if arg_type in allowed_types:
arg = arg.lower()
add_args = {'type': arg_type}
if arg_type == bool:
add_args['action'] = 'store_true'
parser.add_argument(f'--{arg}', **add_args)
def apply_arguments(self, **kwargs):
print(kwargs)
for k, v in kwargs.items():
if v == None:
continue
k = k.upper()
if k in dir(self):
setattr(self, k, v)
settings = _Settings()
|
{
"content_hash": "101250f453a84ec70021f3cdab41fe0e",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 89,
"avg_line_length": 26.267857142857142,
"alnum_prop": 0.504418762746431,
"repo_name": "WhatDo/FlowFairy",
"id": "ef63be908a5dea4bdc2e4546afc3127bea6e5b25",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowfairy/conf/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "12429"
},
{
"name": "Python",
"bytes": "13818"
}
],
"symlink_target": ""
}
|
import ctypes
# Binary Ninja components
import binaryninja
from binaryninja import highlight
from binaryninja import _binaryninjacore as core
from binaryninja.enums import BranchType, HighlightColorStyle, HighlightStandardColor, InstructionTextTokenType
# 2-3 compatibility
from binaryninja import range
class BasicBlockEdge(object):
def __init__(self, branch_type, source, target, back_edge, fall_through):
self._type = branch_type
self._source = source
self._target = target
self._back_edge = back_edge
self._fall_through = fall_through
def __repr__(self):
if self._type == BranchType.UnresolvedBranch:
return "<%s>" % BranchType(self._type).name
elif self._target.arch:
return "<%s: %s@%#x>" % (BranchType(self._type).name, self._target.arch.name, self._target.start)
else:
return "<%s: %#x>" % (BranchType(self._type).name, self._target.start)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self._type, self._source, self._target, self._back_edge, self._fall_through) == \
(other._type, other._source, other._target, other._back_edge, other._fall_through)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash((self._type, self._source, self._target, self.back_edge, self.fall_through))
@property
def type(self):
""" """
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def source(self):
""" """
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def target(self):
""" """
return self._target
@target.setter
def target(self, value):
self._target = value
@property
def back_edge(self):
""" """
return self._back_edge
@back_edge.setter
def back_edge(self, value):
self._back_edge = value
@property
def fall_through(self):
""" """
return self._fall_through
@fall_through.setter
def fall_through(self, value):
self._fall_through = value
class BasicBlock(object):
def __init__(self, handle, view = None):
self._view = view
self.handle = core.handle_of_type(handle, core.BNBasicBlock)
self._arch = None
self._func = None
self._instStarts = None
self._instLengths = None
def __del__(self):
core.BNFreeBasicBlock(self.handle)
def __repr__(self):
arch = self.arch
if arch:
return "<block: %s@%#x-%#x>" % (arch.name, self.start, self.end)
else:
return "<block: %#x-%#x>" % (self.start, self.end)
def __len__(self):
return int(core.BNGetBasicBlockLength(self.handle))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash((self.start, self.end, self.arch.name))
def __setattr__(self, name, value):
try:
object.__setattr__(self, name, value)
except AttributeError:
raise AttributeError("attribute '%s' is read only" % name)
def __iter__(self):
if self._instStarts is None:
# don't add instruction start cache--the object is likely ephemeral
idx = self.start
while idx < self.end:
data = self._view.read(idx, min(self.arch.max_instr_length, self.end - idx))
inst_text = self.arch.get_instruction_text(data, idx)
if inst_text[1] == 0:
break
yield inst_text
idx += inst_text[1]
else:
for start, length in zip(self._instStarts, self._instLengths):
inst_text = self.arch.get_instruction_text(self._view.read(start, length), start)
if inst_text[1] == 0:
break
yield inst_text
def __getitem__(self, i):
self._buildStartCache()
if isinstance(i, slice):
return [self[index] for index in range(*i.indices(len(self._instStarts)))]
start = self._instStarts[i]
length = self._instLengths[i]
data = self._view.read(start, length)
return self.arch.get_instruction_text(data, start)
def _buildStartCache(self):
if self._instStarts is None:
# build the instruction start cache
self._instLengths = []
self._instStarts = []
start = self.start
while start < self.end:
length = self.view.get_instruction_length(start)
if length == 0: # invalid instruction. avoid infinite loop
break
self._instLengths.append(length)
self._instStarts.append(start)
start += length
def _create_instance(self, handle, view):
"""Internal method used to instantiate child instances"""
return BasicBlock(handle, view)
@property
def instruction_count(self):
self._buildStartCache()
return len(self._instStarts)
@property
def function(self):
"""Basic block function (read-only)"""
if self._func is not None:
return self._func
func = core.BNGetBasicBlockFunction(self.handle)
if func is None:
return None
self._func =binaryninja.function.Function(self._view, func)
return self._func
@property
def view(self):
"""Binary view that contains the basic block (read-only)"""
if self._view is not None:
return self._view
self._view = self.function.view
return self._view
@property
def arch(self):
"""Basic block architecture (read-only)"""
# The arch for a BasicBlock isn't going to change so just cache
# it the first time we need it
if self._arch is not None:
return self._arch
arch = core.BNGetBasicBlockArchitecture(self.handle)
if arch is None:
return None
self._arch = binaryninja.architecture.CoreArchitecture._from_cache(arch)
return self._arch
@property
def source_block(self):
"""Basic block source block (read-only)"""
block = core.BNGetBasicBlockSource(self.handle)
if block is None:
return None
return BasicBlock(block, self._view)
@property
def start(self):
"""Basic block start (read-only)"""
return core.BNGetBasicBlockStart(self.handle)
@property
def end(self):
"""Basic block end (read-only)"""
return core.BNGetBasicBlockEnd(self.handle)
@property
def length(self):
"""Basic block length (read-only)"""
return core.BNGetBasicBlockLength(self.handle)
@property
def index(self):
"""Basic block index in list of blocks for the function (read-only)"""
return core.BNGetBasicBlockIndex(self.handle)
@property
def outgoing_edges(self):
"""List of basic block outgoing edges (read-only)"""
count = ctypes.c_ulonglong(0)
edges = core.BNGetBasicBlockOutgoingEdges(self.handle, count)
result = []
for i in range(0, count.value):
branch_type = BranchType(edges[i].type)
if edges[i].target:
target = self._create_instance(core.BNNewBasicBlockReference(edges[i].target), self.view)
else:
target = None
result.append(BasicBlockEdge(branch_type, self, target, edges[i].backEdge, edges[i].fallThrough))
core.BNFreeBasicBlockEdgeList(edges, count.value)
return result
@property
def incoming_edges(self):
"""List of basic block incoming edges (read-only)"""
count = ctypes.c_ulonglong(0)
edges = core.BNGetBasicBlockIncomingEdges(self.handle, count)
result = []
for i in range(0, count.value):
branch_type = BranchType(edges[i].type)
if edges[i].target:
target = self._create_instance(core.BNNewBasicBlockReference(edges[i].target), self.view)
else:
target = None
result.append(BasicBlockEdge(branch_type, target, self, edges[i].backEdge, edges[i].fallThrough))
core.BNFreeBasicBlockEdgeList(edges, count.value)
return result
@property
def has_undetermined_outgoing_edges(self):
"""Whether basic block has undetermined outgoing edges (read-only)"""
return core.BNBasicBlockHasUndeterminedOutgoingEdges(self.handle)
@property
def can_exit(self):
"""Whether basic block can return or is tagged as 'No Return'"""
return core.BNBasicBlockCanExit(self.handle)
@can_exit.setter
def can_exit(self, value):
"""Sets whether basic block can return or is tagged as 'No Return'"""
BNBasicBlockSetCanExit(self.handle, value)
@property
def has_invalid_instructions(self):
"""Whether basic block has any invalid instructions (read-only)"""
return core.BNBasicBlockHasInvalidInstructions(self.handle)
@property
def dominators(self):
"""List of dominators for this basic block (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetBasicBlockDominators(self.handle, count, False)
result = []
for i in range(0, count.value):
result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def post_dominators(self):
"""List of dominators for this basic block (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetBasicBlockDominators(self.handle, count, True)
result = []
for i in range(0, count.value):
result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def strict_dominators(self):
"""List of strict dominators for this basic block (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetBasicBlockStrictDominators(self.handle, count, False)
result = []
for i in range(0, count.value):
result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def immediate_dominator(self):
"""Immediate dominator of this basic block (read-only)"""
result = core.BNGetBasicBlockImmediateDominator(self.handle, False)
if not result:
return None
return self._create_instance(result, self.view)
@property
def immediate_post_dominator(self):
"""Immediate dominator of this basic block (read-only)"""
result = core.BNGetBasicBlockImmediateDominator(self.handle, True)
if not result:
return None
return self._create_instance(result, self.view)
@property
def dominator_tree_children(self):
"""List of child blocks in the dominator tree for this basic block (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetBasicBlockDominatorTreeChildren(self.handle, count, False)
result = []
for i in range(0, count.value):
result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def post_dominator_tree_children(self):
"""List of child blocks in the post dominator tree for this basic block (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetBasicBlockDominatorTreeChildren(self.handle, count, True)
result = []
for i in range(0, count.value):
result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def dominance_frontier(self):
"""Dominance frontier for this basic block (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetBasicBlockDominanceFrontier(self.handle, count, False)
result = []
for i in range(0, count.value):
result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def post_dominance_frontier(self):
"""Post dominance frontier for this basic block (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetBasicBlockDominanceFrontier(self.handle, count, True)
result = []
for i in range(0, count.value):
result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def annotations(self):
"""List of automatic annotations for the start of this block (read-only)"""
return self.function.get_block_annotations(self.start, self.arch)
@property
def disassembly_text(self):
"""
``disassembly_text`` property which returns a list of binaryninja.function.DisassemblyTextLine objects for the current basic block.
:Example:
>>> current_basic_block.disassembly_text
[<0x100000f30: _main:>, ...]
"""
return self.get_disassembly_text()
@property
def highlight(self):
"""Gets or sets the highlight color for basic block
:Example:
>>> current_basic_block.highlight = HighlightStandardColor.BlueHighlightColor
>>> current_basic_block.highlight
<color: blue>
"""
return highlight.HighlightColor._from_core_struct(core.BNGetBasicBlockHighlight(self.handle))
@highlight.setter
def highlight(self, value):
self.set_user_highlight(value)
@property
def is_il(self):
"""Whether the basic block contains IL"""
return core.BNIsILBasicBlock(self.handle)
@property
def is_low_level_il(self):
"""Whether the basic block contains Low Level IL"""
return core.BNIsLowLevelILBasicBlock(self.handle)
@property
def is_medium_level_il(self):
"""Whether the basic block contains Medium Level IL"""
return core.BNIsMediumLevelILBasicBlock(self.handle)
@classmethod
def get_iterated_dominance_frontier(self, blocks):
if len(blocks) == 0:
return []
block_set = (ctypes.POINTER(core.BNBasicBlock) * len(blocks))()
for i in range(len(blocks)):
block_set[i] = blocks[i].handle
count = ctypes.c_ulonglong()
out_blocks = core.BNGetBasicBlockIteratedDominanceFrontier(block_set, len(blocks), count)
result = []
for i in range(0, count.value):
result.append(BasicBlock(core.BNNewBasicBlockReference(out_blocks[i]), blocks[0].view))
core.BNFreeBasicBlockList(out_blocks, count.value)
return result
def mark_recent_use(self):
core.BNMarkBasicBlockAsRecentlyUsed(self.handle)
def get_disassembly_text(self, settings=None):
"""
``get_disassembly_text`` returns a list of binaryninja.function.DisassemblyTextLine objects for the current basic block.
:param DisassemblySettings settings: (optional) DisassemblySettings object
:Example:
>>> current_basic_block.get_disassembly_text()
[<0x100000f30: _main:>, <0x100000f30: push rbp>, ... ]
"""
settings_obj = None
if settings:
settings_obj = settings.handle
count = ctypes.c_ulonglong()
lines = core.BNGetBasicBlockDisassemblyText(self.handle, settings_obj, count)
result = []
for i in range(0, count.value):
addr = lines[i].addr
if (lines[i].instrIndex != 0xffffffffffffffff) and hasattr(self, 'il_function'):
il_instr = self.il_function[lines[i].instrIndex] # pylint: disable=no-member
else:
il_instr = None
color = highlight.HighlightColor._from_core_struct(lines[i].highlight)
tokens = binaryninja.function.InstructionTextToken.get_instruction_lines(lines[i].tokens, lines[i].count)
result.append(binaryninja.function.DisassemblyTextLine(tokens, addr, il_instr, color))
core.BNFreeDisassemblyTextLines(lines, count.value)
return result
def set_auto_highlight(self, color):
"""
``set_auto_highlight`` highlights the current BasicBlock with the supplied color.
.. warning:: Use only in analysis plugins. Do not use in regular plugins, as colors won't be saved to the database.
:param HighlightStandardColor or highlight.HighlightColor color: Color value to use for highlighting
"""
if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
if isinstance(color, HighlightStandardColor):
color = highlight.HighlightColor(color)
core.BNSetAutoBasicBlockHighlight(self.handle, color._get_core_struct())
def set_user_highlight(self, color):
"""
``set_user_highlight`` highlights the current BasicBlock with the supplied color
:param HighlightStandardColor or highlight.HighlightColor color: Color value to use for highlighting
:Example:
>>> current_basic_block.set_user_highlight(highlight.HighlightColor(red=0xff, blue=0xff, green=0))
>>> current_basic_block.set_user_highlight(HighlightStandardColor.BlueHighlightColor)
"""
if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
if isinstance(color, HighlightStandardColor):
color = highlight.HighlightColor(color)
core.BNSetUserBasicBlockHighlight(self.handle, color._get_core_struct())
def get_instruction_containing_address(self, addr):
start = ctypes.c_uint64()
ret = core.BNGetBasicBlockInstructionContainingAddress(self.handle, addr, start)
return ret, start.value
@property
def source_block(self):
"""Source of this basic block (read-only)"""
result = core.BNGetBasicBlockSourceBlock(self.handle)
if not result:
return None
return self._create_instance(result, self.view)
|
{
"content_hash": "539dd6c77df28a6c94e5690f321fec85",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 133,
"avg_line_length": 32.339731285988485,
"alnum_prop": 0.7232476704848952,
"repo_name": "joshwatson/binaryninja-api",
"id": "2394f25f936dfe07cf24b1402296564f8b586dc7",
"size": "17947",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "python/basicblock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7757"
},
{
"name": "C",
"bytes": "239495"
},
{
"name": "C++",
"bytes": "1991771"
},
{
"name": "CMake",
"bytes": "2691"
},
{
"name": "CSS",
"bytes": "51"
},
{
"name": "JavaScript",
"bytes": "1783"
},
{
"name": "Makefile",
"bytes": "10627"
},
{
"name": "Python",
"bytes": "3793811"
},
{
"name": "Shell",
"bytes": "4667"
}
],
"symlink_target": ""
}
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import json
import copy
import re
def is_job_finished(job):
if 'status' in job:
desiredNumberScheduled = job['status'].get('desiredNumberScheduled',1)
numberReady = job['status'].get('numberReady',0)
if desiredNumberScheduled == numberReady and desiredNumberScheduled > 0:
return True
return False
def new_daemon(job):
daemon = copy.deepcopy(job)
daemon['apiVersion'] = 'apps/v1'
daemon['kind'] = 'DaemonSet'
daemon['metadata'] = {}
daemon['metadata']['name'] = '%s-dj' % (job['metadata']['name'])
daemon['metadata']['labels'] = copy.deepcopy(job['spec']['template']['metadata']['labels'])
daemon['spec'] = {}
daemon['spec']['template'] = copy.deepcopy(job['spec']['template'])
daemon['spec']['template']['spec']['initContainers'] = copy.deepcopy(job['spec']['template']['spec']['containers'])
daemon['spec']['template']['spec']['containers'] = [{
'name': "pause",
'image': job['spec'].get('pauseImage', 'gcr.io/google_containers/pause'),
'resources': {'requests': {'cpu': '10m'}}
}]
daemon['spec']['selector'] = {'matchLabels': copy.deepcopy(job['spec']['template']['metadata']['labels'])}
return daemon
class Controller(BaseHTTPRequestHandler):
def sync(self, job, children):
desired_status = {}
child = '%s-dj' % (job['metadata']['name'])
self.log_message(" Children: %s", children)
# If the job already finished at some point, freeze the status,
# delete children, and take no further action.
if is_job_finished(job):
desired_status = copy.deepcopy(job['status'])
desired_status['conditions'] = [{'type': 'Complete', 'status': 'True'}]
return {'status': desired_status, 'children': []}
# Compute status based on what we observed, before building desired state.
# Our .status is just a copy of the DaemonSet .status with extra fields.
desired_status = copy.deepcopy(children['DaemonSet.apps/v1'].get(child, {}).get('status',{}))
if is_job_finished(children['DaemonSet.apps/v1'].get(child, {})):
desired_status['conditions'] = [{'type': 'Complete', 'status': 'True'}]
else:
desired_status['conditions'] = [{'type': 'Complete', 'status': 'False'}]
# Always generate desired state for child if we reach this point.
# We should not delete children until after we know we've recorded
# completion in our status, which was the first check we did above.
desired_child = new_daemon(job)
return {'status': desired_status, 'children': [desired_child]}
def do_POST(self):
observed = json.loads(self.rfile.read(int(self.headers.getheader('content-length'))))
desired = self.sync(observed['parent'], observed['children'])
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(desired))
HTTPServer(('', 80), Controller).serve_forever()
|
{
"content_hash": "6ef155245e628a79f182c0c76838d4ef",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 117,
"avg_line_length": 41.67605633802817,
"alnum_prop": 0.6610341331530922,
"repo_name": "GoogleCloudPlatform/metacontroller",
"id": "3393ec737c64ac08f68ead1c1833264f13fc241c",
"size": "3559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/daemonjob/sync.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "444"
},
{
"name": "Go",
"bytes": "239016"
},
{
"name": "Makefile",
"bytes": "2026"
},
{
"name": "Ruby",
"bytes": "3684"
},
{
"name": "Shell",
"bytes": "1171"
}
],
"symlink_target": ""
}
|
import re
from django.db.models import Q
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def get_query(query_string, search_fields):
query = None
terms = normalize_query(query_string)
for term in terms:
or_query = None
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
|
{
"content_hash": "457b0d1329a4f03aee7e408db0580fb2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 84,
"avg_line_length": 30.115384615384617,
"alnum_prop": 0.5261813537675607,
"repo_name": "cedricmeston/pierrecedric",
"id": "4081133d3694a119b12decfd9a62b8d0caef4b32",
"size": "783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/snipped.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27656"
},
{
"name": "HTML",
"bytes": "12466"
},
{
"name": "Makefile",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "17204"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MediaBase'
db.create_table('multimedia_mediabase', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, db_index=True)),
('date_added', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('uploaded', self.gf('django.db.models.fields.BooleanField')(default=False)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('file_type', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('multimedia', ['MediaBase'])
# Adding model 'Audio'
db.create_table('multimedia_audio', (
('mediabase_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['multimedia.MediaBase'], unique=True, primary_key=True)),
))
db.send_create_signal('multimedia', ['Audio'])
# Adding model 'Video'
db.create_table('multimedia_video', (
('mediabase_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['multimedia.MediaBase'], unique=True, primary_key=True)),
('thumbnail_image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True)),
('auto_thumbnail', self.gf('django.db.models.fields.BooleanField')(default=False)),
('thumbnail_offset', self.gf('django.db.models.fields.PositiveIntegerField')(default=4, blank=True)),
))
db.send_create_signal('multimedia', ['Video'])
def backwards(self, orm):
# Deleting model 'MediaBase'
db.delete_table('multimedia_mediabase')
# Deleting model 'Audio'
db.delete_table('multimedia_audio')
# Deleting model 'Video'
db.delete_table('multimedia_video')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'multimedia.audio': {
'Meta': {'ordering': "('-date_added',)", 'object_name': 'Audio', '_ormbases': ['multimedia.MediaBase']},
'mediabase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['multimedia.MediaBase']", 'unique': 'True', 'primary_key': 'True'})
},
'multimedia.media': {
'Meta': {'ordering': "('-date_added',)", 'object_name': 'Media'},
'auto_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'thumbnail_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'thumbnail_offset': ('django.db.models.fields.PositiveIntegerField', [], {'default': '4', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'multimedia.mediabase': {
'Meta': {'ordering': "('-date_added',)", 'object_name': 'MediaBase'},
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'multimedia.video': {
'Meta': {'ordering': "('-date_added',)", 'object_name': 'Video', '_ormbases': ['multimedia.MediaBase']},
'auto_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mediabase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['multimedia.MediaBase']", 'unique': 'True', 'primary_key': 'True'}),
'thumbnail_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'thumbnail_offset': ('django.db.models.fields.PositiveIntegerField', [], {'default': '4', 'blank': 'True'})
}
}
complete_apps = ['multimedia']
|
{
"content_hash": "243c5da3dbbe973df53ed6f543a67bc2",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 182,
"avg_line_length": 77.72413793103448,
"alnum_prop": 0.5614463176574978,
"repo_name": "jbittel/django-multimedia",
"id": "600721c9011fc9c410bbc63e2b9a1d5e4f24533e",
"size": "13542",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "multimedia/south_migrations/0003_auto__add_mediabase__add_audio__add_video.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "308439"
}
],
"symlink_target": ""
}
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('sub1', 'sub2')
test.write('build.py', r"""
import sys
contents = open(sys.argv[2], 'rb').read()
file = open(sys.argv[1], 'wb')
file.write(contents)
file.close()
""")
test.write('SConstruct', """
import SCons.Defaults
B = Builder(action=r'%(_python_)s build.py $TARGET $SOURCES')
env = Environment()
env['BUILDERS']['B'] = B
env.B(target = 'sub1/foo.out', source = 'sub1/foo.in')
Export('env')
SConscript('sub1/SConscript')
SConscript('sub2/SConscript')
""" % locals())
test.write(['sub1', 'SConscript'], """
Import('env')
env.B(target = 'foo.out', source = 'foo.in')
Default('.')
""")
test.write(['sub1', 'foo.in'], "sub1/foo.in")
test.write(['sub2', 'SConscript'], """
Import('env')
env.Alias('bar', env.B(target = 'bar.out', source = 'bar.in'))
Default('.')
""")
test.write(['sub2', 'bar.in'], "sub2/bar.in")
test.run(arguments = '-D', chdir = 'sub1')
test.must_match(['sub1', 'foo.out'], "sub1/foo.in")
test.must_match(['sub2', 'bar.out'], "sub2/bar.in")
test.unlink(['sub1', 'foo.out'])
test.unlink(['sub2', 'bar.out'])
test.run(arguments = '-D bar', chdir = 'sub1')
test.must_not_exist(test.workpath('sub1', 'foo.out'))
test.must_exist(test.workpath('sub2', 'bar.out'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "314c4d591a5a5ee43cdecc1f494905fb",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 62,
"avg_line_length": 21.940298507462686,
"alnum_prop": 0.6312925170068027,
"repo_name": "Distrotech/scons",
"id": "57c08534c657e854c852f9fdc3e8b0799d6b7fc0",
"size": "2572",
"binary": false,
"copies": "5",
"ref": "refs/heads/distrotech-scons",
"path": "test/Climb/option--D.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "12517068"
},
{
"name": "Shell",
"bytes": "20589"
}
],
"symlink_target": ""
}
|
"""
SoftLayer.tests.CLI.modules.call_api_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import testing
import json
class CallCliTests(testing.TestCase):
def test_options(self):
mock = self.set_mock('SoftLayer_Service', 'method')
mock.return_value = 'test'
result = self.run_command(['call-api', 'Service', 'method',
'--mask=some.mask',
'--limit=20',
'--offset=40',
'--id=100'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output), 'test')
self.assert_called_with('SoftLayer_Service', 'method',
mask='mask[some.mask]',
limit=20,
offset=40,
identifier='100')
def test_object(self):
mock = self.set_mock('SoftLayer_Service', 'method')
mock.return_value = {'string': 'string',
'int': 10,
'float': 1.0,
'None': None,
'Bool': True}
result = self.run_command(['call-api', 'Service', 'method'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
{'string': 'string',
'int': 10,
'float': 1.0,
'None': None,
'Bool': True})
def test_object_table(self):
mock = self.set_mock('SoftLayer_Service', 'method')
mock.return_value = {'string': 'string',
'int': 10,
'float': 1.0,
'None': None,
'Bool': True}
result = self.run_command(['call-api', 'Service', 'method'],
fmt='table')
self.assertEqual(result.exit_code, 0)
# NOTE(kmcdonald): Order is not guaranteed
self.assertIn(":........:........:", result.output)
self.assertIn(": name : value :", result.output)
self.assertIn(": int : 10 :", result.output)
self.assertIn(": None : None :", result.output)
self.assertIn(": float : 1.0 :", result.output)
self.assertIn(": Bool : True :", result.output)
self.assertIn(": string : string :", result.output)
self.assertIn(":........:........:", result.output)
def test_object_nested(self):
mock = self.set_mock('SoftLayer_Service', 'method')
mock.return_value = {'this': {'is': [{'pretty': 'nested'}]}}
result = self.run_command(['call-api', 'Service', 'method'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
{'this': {'is': [{'pretty': 'nested'}]}})
def test_list(self):
mock = self.set_mock('SoftLayer_Service', 'method')
mock.return_value = [{'string': 'string',
'int': 10,
'float': 1.0,
'None': None,
'Bool': True}]
result = self.run_command(['call-api', 'Service', 'method'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
[{'string': 'string',
'int': 10,
'float': 1.0,
'None': None,
'Bool': True}])
def test_list_table(self):
mock = self.set_mock('SoftLayer_Service', 'method')
mock.return_value = [{'string': 'string',
'int': 10,
'float': 1.0,
'None': None,
'Bool': True}]
result = self.run_command(['call-api', 'Service', 'method'],
fmt='table')
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output,
""":......:......:.......:.....:........:
: Bool : None : float : int : string :
:......:......:.......:.....:........:
: True : None : 1.0 : 10 : string :
:......:......:.......:.....:........:
""")
def test_parameters(self):
mock = self.set_mock('SoftLayer_Service', 'method')
mock.return_value = {}
result = self.run_command(['call-api', 'Service', 'method',
'arg1', '1234'])
self.assertEqual(result.exit_code, 0)
self.assert_called_with('SoftLayer_Service', 'method',
args=('arg1', '1234'))
|
{
"content_hash": "39ab274f32fd13fb9fd887cb860062a9",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 68,
"avg_line_length": 38.16279069767442,
"alnum_prop": 0.42514726792606133,
"repo_name": "underscorephil/softlayer-python",
"id": "14164efa7b69c58d2507f2510e97f307cd2271cc",
"size": "4923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/CLI/modules/call_api_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Python",
"bytes": "770885"
}
],
"symlink_target": ""
}
|
import collections
import re
from importlib import import_module
from django import forms
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.utils.encoding import force_str
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
__all__ = ['BaseBlock', 'Block', 'BoundBlock', 'DeclarativeSubBlocksMetaclass', 'BlockWidget', 'BlockField']
# =========================================
# Top-level superclasses and helper objects
# =========================================
class BaseBlock(type):
def __new__(mcs, name, bases, attrs):
meta_class = attrs.pop('Meta', None)
cls = super(BaseBlock, mcs).__new__(mcs, name, bases, attrs)
# Get all the Meta classes from all the bases
meta_class_bases = [meta_class] + [getattr(base, '_meta_class', None)
for base in bases]
meta_class_bases = tuple(filter(bool, meta_class_bases))
cls._meta_class = type(str(name + 'Meta'), meta_class_bases, {})
return cls
class Block(metaclass=BaseBlock):
name = ''
creation_counter = 0
TEMPLATE_VAR = 'value'
class Meta:
label = None
icon = "placeholder"
classname = None
group = ''
# Attributes of Meta which can legally be modified after the block has been instantiated.
# Used to implement __eq__. label is not included here, despite it technically being mutable via
# set_name, since its value must originate from either the constructor arguments or set_name,
# both of which are captured by the equality test, so checking label as well would be redundant.
MUTABLE_META_ATTRIBUTES = []
"""
Setting a 'dependencies' list serves as a shortcut for the common case where a complex block type
(such as struct, list or stream) relies on one or more inner block objects, and needs to ensure that
the responses from the 'media' and 'html_declarations' include the relevant declarations for those inner
blocks, as well as its own. Specifying these inner block objects in a 'dependencies' list means that
the base 'media' and 'html_declarations' methods will return those declarations; the outer block type can
then add its own declarations to the list by overriding those methods and using super().
"""
dependencies = []
def __new__(cls, *args, **kwargs):
# adapted from django.utils.deconstruct.deconstructible; capture the arguments
# so that we can return them in the 'deconstruct' method
obj = super(Block, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def all_blocks(self):
"""
Return a list consisting of self and all block objects that are direct or indirect dependencies
of this block
"""
result = [self]
for dep in self.dependencies:
result.extend(dep.all_blocks())
return result
def all_media(self):
media = forms.Media()
# In cases where the same block definition appears multiple times within different
# container blocks (e.g. a RichTextBlock appearing at the top level of a StreamField as
# well as both sides of a StructBlock for producing two-column layouts), we will encounter
# identical media declarations. Adding these to the final combined media declaration would
# be redundant and add processing time when determining the final media ordering. To avoid
# this, we keep a cache of previously-seen declarations and only add unique ones.
media_cache = set()
for block in self.all_blocks():
key = block.media.__repr__()
if key not in media_cache:
media += block.media
media_cache.add(key)
return media
def all_html_declarations(self):
declarations = filter(bool, [block.html_declarations() for block in self.all_blocks()])
return mark_safe('\n'.join(declarations))
def __init__(self, **kwargs):
if 'classname' in self._constructor_args[1]:
# Adding this so that migrations are not triggered
# when form_classname is used instead of classname
# in the initialisation of the FieldBlock
classname = self._constructor_args[1].pop('classname')
self._constructor_args[1].setdefault('form_classname', classname)
self.meta = self._meta_class()
for attr, value in kwargs.items():
setattr(self.meta, attr, value)
# Increase the creation counter, and save our local copy.
self.creation_counter = Block.creation_counter
Block.creation_counter += 1
self.definition_prefix = 'blockdef-%d' % self.creation_counter
self.label = self.meta.label or ''
def set_name(self, name):
self.name = name
if not self.meta.label:
self.label = capfirst(force_str(name).replace('_', ' '))
def set_meta_options(self, opts):
"""
Update this block's meta options (out of the ones designated as mutable) from the given dict.
Used by the StreamField constructor to pass on kwargs that are to be handled by the block,
since the block object has already been created by that point, e.g.:
body = StreamField(SomeStreamBlock(), max_num=5)
"""
for attr, value in opts.items():
if attr in self.MUTABLE_META_ATTRIBUTES:
setattr(self.meta, attr, value)
else:
raise TypeError("set_meta_options received unexpected option: %r" % attr)
@property
def media(self):
return forms.Media()
def html_declarations(self):
"""
Return an HTML fragment to be rendered on the form page once per block definition -
as opposed to once per occurrence of the block. For example, the block definition
ListBlock(label="Shopping list", CharBlock(label="Product"))
needs to output a <script type="text/template"></script> block containing the HTML for
a 'product' text input, to that these can be dynamically added to the list. This
template block must only occur once in the page, even if there are multiple 'shopping list'
blocks on the page.
Any element IDs used in this HTML fragment must begin with definition_prefix.
(More precisely, they must either be definition_prefix itself, or begin with definition_prefix
followed by a '-' character)
"""
return ''
def js_initializer(self):
"""
Returns a JavaScript expression string, or None if this block does not require any
JavaScript behaviour. This expression evaluates to an initializer function, a function that
takes the ID prefix and applies JS behaviour to the block instance with that value and prefix.
The parent block of this block (or the top-level page code) must ensure that this
expression is not evaluated more than once. (The resulting initializer function can and will be
called as many times as there are instances of this block, though.)
"""
return None
def render_form(self, value, prefix='', errors=None):
"""
Render the HTML for this block with 'value' as its content.
"""
raise NotImplementedError('%s.render_form' % self.__class__)
def value_from_datadict(self, data, files, prefix):
raise NotImplementedError('%s.value_from_datadict' % self.__class__)
def value_omitted_from_data(self, data, files, name):
"""
Used only for top-level blocks wrapped by BlockWidget (i.e.: typically only StreamBlock)
to inform ModelForm logic on Django >=1.10.2 whether the field is absent from the form
submission (and should therefore revert to the field default).
"""
return name not in data
def bind(self, value, prefix=None, errors=None):
"""
Return a BoundBlock which represents the association of this block definition with a value
and a prefix (and optionally, a ValidationError to be rendered).
BoundBlock primarily exists as a convenience to allow rendering within templates:
bound_block.render() rather than blockdef.render(value, prefix) which can't be called from
within a template.
"""
return BoundBlock(self, value, prefix=prefix, errors=errors)
def get_default(self):
"""
Return this block's default value (conventionally found in self.meta.default),
converted to the value type expected by this block. This caters for the case
where that value type is not something that can be expressed statically at
model definition type (e.g. something like StructValue which incorporates a
pointer back to the block definion object).
"""
return self.meta.default
def prototype_block(self):
"""
Return a BoundBlock that can be used as a basis for new empty block instances to be added on the fly
(new list items, for example). This will have a prefix of '__PREFIX__' (to be dynamically replaced with
a real prefix when it's inserted into the page) and a value equal to the block's default value.
"""
return self.bind(self.get_default(), '__PREFIX__')
def clean(self, value):
"""
Validate value and return a cleaned version of it, or throw a ValidationError if validation fails.
The thrown ValidationError instance will subsequently be passed to render() to display the
error message; the ValidationError must therefore include all detail necessary to perform that
rendering, such as identifying the specific child block(s) with errors, in the case of nested
blocks. (It is suggested that you use the 'params' attribute for this; using error_list /
error_dict is unreliable because Django tends to hack around with these when nested.)
"""
return value
def to_python(self, value):
"""
Convert 'value' from a simple (JSON-serialisable) value to a (possibly complex) Python value to be
used in the rest of the block API and within front-end templates . In simple cases this might be
the value itself; alternatively, it might be a 'smart' version of the value which behaves mostly
like the original value but provides a native HTML rendering when inserted into a template; or it
might be something totally different (e.g. an image chooser will use the image ID as the clean
value, and turn this back into an actual image object here).
"""
return value
def bulk_to_python(self, values):
"""
Apply the to_python conversion to a list of values. The default implementation simply
iterates over the list; subclasses may optimise this, e.g. by combining database lookups
into a single query.
"""
return [self.to_python(value) for value in values]
def get_prep_value(self, value):
"""
The reverse of to_python; convert the python value into JSON-serialisable form.
"""
return value
def get_context(self, value, parent_context=None):
"""
Return a dict of context variables (derived from the block value and combined with the parent_context)
to be used as the template context when rendering this value through a template.
"""
context = parent_context or {}
context.update({
'self': value,
self.TEMPLATE_VAR: value,
})
return context
def get_template(self, context=None):
"""
Return the template to use for rendering the block if specified on meta class.
This extraction was added to make dynamic templates possible if you override this method
"""
return getattr(self.meta, 'template', None)
def render(self, value, context=None):
"""
Return a text rendering of 'value', suitable for display on templates. By default, this will
use a template (with the passed context, supplemented by the result of get_context) if a
'template' property is specified on the block, and fall back on render_basic otherwise.
"""
template = self.get_template(context=context)
if not template:
return self.render_basic(value, context=context)
if context is None:
new_context = self.get_context(value)
else:
new_context = self.get_context(value, parent_context=dict(context))
return mark_safe(render_to_string(template, new_context))
def get_api_representation(self, value, context=None):
"""
Can be used to customise the API response and defaults to the value returned by get_prep_value.
"""
return self.get_prep_value(value)
def render_basic(self, value, context=None):
"""
Return a text rendering of 'value', suitable for display on templates. render() will fall back on
this if the block does not define a 'template' property.
"""
return force_str(value)
def get_searchable_content(self, value):
"""
Returns a list of strings containing text content within this block to be used in a search engine.
"""
return []
def check(self, **kwargs):
"""
Hook for the Django system checks framework -
returns a list of django.core.checks.Error objects indicating validity errors in the block
"""
return []
def _check_name(self, **kwargs):
"""
Helper method called by container blocks as part of the system checks framework,
to validate that this block's name is a valid identifier.
(Not called universally, because not all blocks need names)
"""
errors = []
if not self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block name cannot be empty",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if ' ' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block names cannot contain spaces",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if '-' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot contain dashes",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if self.name and self.name[0].isdigit():
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot begin with a digit",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if not errors and not re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', self.name):
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names should follow standard Python conventions for "
"variable names: alpha-numeric and underscores, and cannot "
"begin with a digit",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
return errors
def id_for_label(self, prefix):
"""
Return the ID to be used as the 'for' attribute of <label> elements that refer to this block,
when the given field prefix is in use. Return None if no 'for' attribute should be used.
"""
return None
@property
def required(self):
"""
Flag used to determine whether labels for this block should display a 'required' asterisk.
False by default, since Block does not provide any validation of its own - it's up to subclasses
to define what required-ness means.
"""
return False
def deconstruct(self):
# adapted from django.utils.deconstruct.deconstructible
module_name = self.__module__
name = self.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
% (name, module_name))
# if the module defines a DECONSTRUCT_ALIASES dictionary, see if the class has an entry in there;
# if so, use that instead of the real path
try:
path = module.DECONSTRUCT_ALIASES[self.__class__]
except (AttributeError, KeyError):
path = '%s.%s' % (module_name, name)
return (
path,
self._constructor_args[0],
self._constructor_args[1],
)
def __eq__(self, other):
"""
Implement equality on block objects so that two blocks with matching definitions are considered
equal. Block objects are intended to be immutable with the exception of set_name() and any meta
attributes identified in MUTABLE_META_ATTRIBUTES, so checking these along with the result of
deconstruct (which captures the constructor arguments) is sufficient to identify (valid) differences.
This was originally necessary as a workaround for https://code.djangoproject.com/ticket/24340
in Django <1.9; the deep_deconstruct function used to detect changes for migrations did not
recurse into the block lists, and left them as Block instances. This __eq__ method therefore
came into play when identifying changes within migrations.
As of Django >=1.9, this *probably* isn't required any more. However, it may be useful in
future as a way of identifying blocks that can be re-used within StreamField definitions
(https://github.com/wagtail/wagtail/issues/4298#issuecomment-367656028).
"""
if not isinstance(other, Block):
# if the other object isn't a block at all, it clearly isn't equal.
return False
# Note that we do not require the two blocks to be of the exact same class. This is because
# we may wish the following blocks to be considered equal:
#
# class FooBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# class BarBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# FooBlock() == BarBlock() == StructBlock([('first_name', CharBlock()), ('surname': CharBlock())])
#
# For this to work, StructBlock will need to ensure that 'deconstruct' returns the same signature
# in all of these cases, including reporting StructBlock as the path:
#
# FooBlock().deconstruct() == (
# 'wagtail.core.blocks.StructBlock',
# [('first_name', CharBlock()), ('surname': CharBlock())],
# {}
# )
#
# This has the bonus side effect that the StructBlock field definition gets frozen into
# the migration, rather than leaving the migration vulnerable to future changes to FooBlock / BarBlock
# in models.py.
return (
self.name == other.name
and self.deconstruct() == other.deconstruct()
and all(
getattr(self.meta, attr, None) == getattr(other.meta, attr, None)
for attr in self.MUTABLE_META_ATTRIBUTES
)
)
class BoundBlock:
def __init__(self, block, value, prefix=None, errors=None):
self.block = block
self.value = value
self.prefix = prefix
self.errors = errors
def render_form(self):
return self.block.render_form(self.value, self.prefix, errors=self.errors)
def render(self, context=None):
return self.block.render(self.value, context=context)
def render_as_block(self, context=None):
"""
Alias for render; the include_block tag will specifically check for the presence of a method
with this name. (This is because {% include_block %} is just as likely to be invoked on a bare
value as a BoundBlock. If we looked for a `render` method instead, we'd run the risk of finding
an unrelated method that just happened to have that name - for example, when called on a
PageChooserBlock it could end up calling page.render.
"""
return self.block.render(self.value, context=context)
def id_for_label(self):
return self.block.id_for_label(self.prefix)
def __str__(self):
"""Render the value according to the block's native rendering"""
return self.block.render(self.value)
def __repr__(self):
return "<block %s: %r>" % (self.block.name or type(self.block).__name__, self.value)
class DeclarativeSubBlocksMetaclass(BaseBlock):
"""
Metaclass that collects sub-blocks declared on the base classes.
(cheerfully stolen from https://github.com/django/django/blob/master/django/forms/forms.py)
"""
def __new__(mcs, name, bases, attrs):
# Collect sub-blocks declared on the current class.
# These are available on the class as `declared_blocks`
current_blocks = []
for key, value in list(attrs.items()):
if isinstance(value, Block):
current_blocks.append((key, value))
value.set_name(key)
attrs.pop(key)
current_blocks.sort(key=lambda x: x[1].creation_counter)
attrs['declared_blocks'] = collections.OrderedDict(current_blocks)
new_class = (super(DeclarativeSubBlocksMetaclass, mcs).__new__(
mcs, name, bases, attrs))
# Walk through the MRO, collecting all inherited sub-blocks, to make
# the combined `base_blocks`.
base_blocks = collections.OrderedDict()
for base in reversed(new_class.__mro__):
# Collect sub-blocks from base class.
if hasattr(base, 'declared_blocks'):
base_blocks.update(base.declared_blocks)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in base_blocks:
base_blocks.pop(attr)
new_class.base_blocks = base_blocks
return new_class
# ========================
# django.forms integration
# ========================
class BlockWidget(forms.Widget):
"""Wraps a block object as a widget so that it can be incorporated into a Django form"""
def __init__(self, block_def, attrs=None):
super().__init__(attrs=attrs)
self.block_def = block_def
def render_with_errors(self, name, value, attrs=None, errors=None, renderer=None):
bound_block = self.block_def.bind(value, prefix=name, errors=errors)
js_initializer = self.block_def.js_initializer()
if js_initializer:
js_snippet = """
<script>
$(function() {
var initializer = %s;
initializer('%s');
})
</script>
""" % (js_initializer, name)
else:
js_snippet = ''
return mark_safe(bound_block.render_form() + js_snippet)
def render(self, name, value, attrs=None, renderer=None):
return self.render_with_errors(name, value, attrs=attrs, errors=None, renderer=renderer)
@property
def media(self):
return self.block_def.all_media() + forms.Media(
css={'all': [
'wagtailadmin/css/panels/streamfield.css',
]}
)
def value_from_datadict(self, data, files, name):
return self.block_def.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.block_def.value_omitted_from_data(data, files, name)
class BlockField(forms.Field):
"""Wraps a block object as a form field so that it can be incorporated into a Django form"""
def __init__(self, block=None, **kwargs):
if block is None:
raise ImproperlyConfigured("BlockField was not passed a 'block' object")
self.block = block
if 'widget' not in kwargs:
kwargs['widget'] = BlockWidget(block)
super().__init__(**kwargs)
def clean(self, value):
return self.block.clean(value)
def has_changed(self, initial_value, data_value):
return self.block.get_prep_value(initial_value) != self.block.get_prep_value(data_value)
DECONSTRUCT_ALIASES = {
Block: 'wagtail.core.blocks.Block',
}
|
{
"content_hash": "e1e3d9fdaa27ad8d1d3a933b95d28490",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 114,
"avg_line_length": 41.7203947368421,
"alnum_prop": 0.622486793345423,
"repo_name": "kaedroho/wagtail",
"id": "af3881570e9d41007dfee49fdf3f01decebec2bf",
"size": "25366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/core/blocks/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3323"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "505436"
},
{
"name": "JavaScript",
"bytes": "279901"
},
{
"name": "Makefile",
"bytes": "977"
},
{
"name": "Python",
"bytes": "4671883"
},
{
"name": "SCSS",
"bytes": "201389"
},
{
"name": "Shell",
"bytes": "7662"
},
{
"name": "TypeScript",
"bytes": "30266"
}
],
"symlink_target": ""
}
|
from app import app
app.run(debug=True,host="0.0.0.0",threaded=True)
|
{
"content_hash": "5f8c42d895670b994527f9ee9f80a030",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 48,
"avg_line_length": 34.5,
"alnum_prop": 0.7391304347826086,
"repo_name": "Illinois-tech-ITM/ITMT-492-593",
"id": "ecc911a1dbfaaa4fa4181409a67cddcad1c8a04d",
"size": "104",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Spring-2015-final-documents/mEcosystemFinalChristopherHannon/mEcosystem-master/Linux/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "121797"
},
{
"name": "C++",
"bytes": "16093"
},
{
"name": "HTML",
"bytes": "285970"
},
{
"name": "Java",
"bytes": "48095"
},
{
"name": "MATLAB",
"bytes": "3131"
},
{
"name": "PHP",
"bytes": "11634"
},
{
"name": "Processing",
"bytes": "14797"
},
{
"name": "Python",
"bytes": "7563"
},
{
"name": "nesC",
"bytes": "316716"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now, cint
from frappe.model import no_value_fields
from frappe.model.document import Document
from frappe.model.db_schema import type_map
from frappe.core.doctype.property_setter.property_setter import make_property_setter
from frappe.core.doctype.notification_count.notification_count import delete_notification_count_for
from frappe.modules import make_boilerplate
form_grid_templates = {
"fields": "templates/form_grid/fields.html"
}
class DocType(Document):
def validate(self):
if not frappe.conf.get("developer_mode"):
frappe.throw(_("Not in Developer Mode! Set in site_config.json"))
for c in [".", "/", "#", "&", "=", ":", "'", '"']:
if c in self.name:
frappe.throw(_("{0} not allowed in name").format(c))
self.validate_series()
self.scrub_field_names()
self.validate_title_field()
validate_fields(self.get("fields"))
if self.istable:
# no permission records for child table
self.permissions = []
else:
validate_permissions(self)
self.make_amendable()
def change_modified_of_parent(self):
if frappe.flags.in_import:
return
parent_list = frappe.db.sql("""SELECT parent
from tabDocField where fieldtype="Table" and options=%s""", self.name)
for p in parent_list:
frappe.db.sql('UPDATE tabDocType SET modified=%s WHERE `name`=%s', (now(), p[0]))
def scrub_field_names(self):
restricted = ('name','parent','creation','modified','modified_by',
'parentfield','parenttype',"file_list")
for d in self.get("fields"):
if d.fieldtype:
if (not getattr(d, "fieldname", None)):
if d.label:
d.fieldname = d.label.strip().lower().replace(' ','_')
if d.fieldname in restricted:
d.fieldname = d.fieldname + '1'
else:
d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx)
def validate_title_field(self):
if self.title_field and \
self.title_field not in [d.fieldname for d in self.get("fields")]:
frappe.throw(_("Title field must be a valid fieldname"))
def validate_series(self, autoname=None, name=None):
if not autoname: autoname = self.autoname
if not name: name = self.name
if not autoname and self.get("fields", {"fieldname":"naming_series"}):
self.autoname = "naming_series:"
if autoname and (not autoname.startswith('field:')) \
and (not autoname.startswith('eval:')) \
and (not autoname in ('Prompt', 'hash')) \
and (not autoname.startswith('naming_series:')):
prefix = autoname.split('.')[0]
used_in = frappe.db.sql('select name from tabDocType where substring_index(autoname, ".", 1) = %s and name!=%s', (prefix, name))
if used_in:
frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0]))
def on_update(self):
from frappe.model.db_schema import updatedb
updatedb(self.name)
self.change_modified_of_parent()
make_module_and_roles(self)
from frappe import conf
if not (frappe.flags.in_import or frappe.flags.in_test) and conf.get('developer_mode') or 0:
self.export_doc()
self.make_controller_template()
# update index
if not getattr(self, "custom", False):
from frappe.modules import load_doctype_module
module = load_doctype_module(self.name, self.module)
if hasattr(module, "on_doctype_update"):
module.on_doctype_update()
delete_notification_count_for(doctype=self.name)
frappe.clear_cache(doctype=self.name)
def before_rename(self, old, new, merge=False):
if merge:
frappe.throw(_("DocType can not be merged"))
def after_rename(self, old, new, merge=False):
if self.issingle:
frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old))
else:
frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new))
def before_reload(self):
if not (self.issingle and self.istable):
self.preserve_naming_series_options_in_property_setter()
def preserve_naming_series_options_in_property_setter(self):
"""preserve naming_series as property setter if it does not exist"""
naming_series = self.get("fields", {"fieldname": "naming_series"})
if not naming_series:
return
# check if atleast 1 record exists
if not (frappe.db.table_exists("tab" + self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))):
return
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name,
"property": "options", "field_name": "naming_series"})
if not existing_property_setter:
make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False)
if naming_series[0].default:
make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False)
def export_doc(self):
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['DocType', self.name]])
def import_doc(self):
from frappe.modules.import_module import import_from_files
import_from_files(record_list=[[self.module, 'doctype', self.name]])
def make_controller_template(self):
make_boilerplate("controller.py", self)
if not (self.istable or self.issingle):
make_boilerplate("test_controller.py", self)
make_boilerplate("test_records.json", self)
def make_amendable(self):
"""
if is_submittable is set, add amended_from docfields
"""
if self.is_submittable:
if not frappe.db.sql("""select name from tabDocField
where fieldname = 'amended_from' and parent = %s""", self.name):
self.append("fields", {
"label": "Amended From",
"fieldtype": "Link",
"fieldname": "amended_from",
"options": self.name,
"read_only": 1,
"print_hide": 1,
"no_copy": 1
})
def get_max_idx(self):
max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""",
self.name)
return max_idx and max_idx[0][0] or 0
def validate_fields_for_doctype(doctype):
validate_fields(frappe.get_meta(doctype).get("fields"))
# this is separate because it is also called via custom field
def validate_fields(fields):
def check_illegal_characters(fieldname):
for c in ['.', ',', ' ', '-', '&', '%', '=', '"', "'", '*', '$',
'(', ')', '[', ']', '/']:
if c in fieldname:
frappe.throw(_("{0} not allowed in fieldname {1}").format(c, fieldname))
def check_unique_fieldname(fieldname):
duplicates = filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields))
if len(duplicates) > 1:
frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates)))
def check_illegal_mandatory(d):
if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd:
frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype))
def check_link_table_options(d):
if d.fieldtype in ("Link", "Table"):
if not d.options:
frappe.throw(_("Options requried for Link or Table type field {0} in row {1}").format(d.label, d.idx))
if d.options=="[Select]" or d.options==d.parent:
return
if d.options != d.parent and not frappe.db.exists("DocType", d.options):
frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx))
def check_hidden_and_mandatory(d):
if d.hidden and d.reqd and not d.default:
frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx))
def check_min_items_in_list(fields):
if len(filter(lambda d: d.in_list_view, fields))==0:
for d in fields[:5]:
if d.fieldtype in type_map:
d.in_list_view = 1
def check_width(d):
if d.fieldtype == "Currency" and cint(d.width) < 100:
frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx))
def check_in_list_view(d):
if d.in_list_view and d.fieldtype!="Image" and (d.fieldtype in no_value_fields):
frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx))
def check_dynamic_link_options(d):
if d.fieldtype=="Dynamic Link":
doctype_pointer = filter(lambda df: df.fieldname==d.options, fields)
if not doctype_pointer or (doctype_pointer[0].fieldtype!="Link") \
or (doctype_pointer[0].options!="DocType"):
frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'"))
def check_fold(fields):
fold_exists = False
for i, f in enumerate(fields):
if f.fieldtype=="Fold":
if fold_exists:
frappe.throw(_("There can be only one Fold in a form"))
fold_exists = True
if i < len(fields)-1:
nxt = fields[i+1]
if nxt.fieldtype != "Section Break" \
or (nxt.fieldtype=="Section Break" and not nxt.label):
frappe.throw(_("Fold must come before a labelled Section Break"))
else:
frappe.throw(_("Fold can not be at the end of the form"))
for d in fields:
if not d.permlevel: d.permlevel = 0
if not d.fieldname:
frappe.throw(_("Fieldname is required in row {0}").format(d.idx))
check_illegal_characters(d.fieldname)
check_unique_fieldname(d.fieldname)
check_illegal_mandatory(d)
check_link_table_options(d)
check_dynamic_link_options(d)
check_hidden_and_mandatory(d)
check_in_list_view(d)
check_min_items_in_list(fields)
check_fold(fields)
def validate_permissions_for_doctype(doctype, for_remove=False):
doctype = frappe.get_doc("DocType", doctype)
if frappe.conf.developer_mode and not frappe.flags.in_test:
# save doctype
doctype.save()
else:
validate_permissions(doctype, for_remove)
# save permissions
for perm in doctype.get("permissions"):
perm.db_update()
def validate_permissions(doctype, for_remove=False):
permissions = doctype.get("permissions")
if not permissions:
frappe.throw(_('Enter at least one permission row'), frappe.MandatoryError)
issingle = issubmittable = isimportable = False
if doctype:
issingle = cint(doctype.issingle)
issubmittable = cint(doctype.is_submittable)
isimportable = cint(doctype.allow_import)
def get_txt(d):
return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx)
def check_atleast_one_set(d):
if not d.read and not d.write and not d.submit and not d.cancel and not d.create:
frappe.throw(_("{0}: No basic permissions set").format(get_txt(d)))
def check_double(d):
has_similar = False
for p in permissions:
if (p.role==d.role and p.permlevel==d.permlevel
and p.apply_user_permissions==d.apply_user_permissions and p!=d):
has_similar = True
break
if has_similar:
frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and Apply User Permissions").format(get_txt(d)))
def check_level_zero_is_set(d):
if cint(d.permlevel) > 0 and d.role != 'All':
has_zero_perm = False
for p in permissions:
if p.role==d.role and (p.permlevel or 0)==0 and p!=d:
has_zero_perm = True
break
if not has_zero_perm:
frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d)))
if d.create or d.submit or d.cancel or d.amend or d.match:
frappe.throw(_("{0}: Create, Submit, Cancel and Amend only valid at level 0").format(get_txt(d)))
def check_permission_dependency(d):
if d.cancel and not d.submit:
frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d)))
if (d.submit or d.cancel or d.amend) and not d.write:
frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d)))
if d.amend and not d.write:
frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d)))
if d.get("import") and not d.create:
frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d)))
def remove_rights_for_single(d):
if not issingle:
return
if d.report:
frappe.msgprint(_("Report cannot be set for Single types"))
d.report = 0
d.set("import", 0)
d.set("export", 0)
for ptype, label in (
("set_user_permissions", _("Set User Permissions")),
("apply_user_permissions", _("Apply User Permissions"))):
if d.get(ptype):
d.set(ptype, 0)
frappe.msgprint(_("{0} cannot be set for Single types").format(label))
def check_if_submittable(d):
if d.submit and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d)))
elif d.amend and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d)))
def check_if_importable(d):
if d.get("import") and not isimportable:
frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype))
for d in permissions:
if not d.permlevel:
d.permlevel=0
check_atleast_one_set(d)
if not for_remove:
check_double(d)
check_permission_dependency(d)
check_if_submittable(d)
check_if_importable(d)
check_level_zero_is_set(d)
remove_rights_for_single(d)
def make_module_and_roles(doc, perm_fieldname="permissions"):
try:
if not frappe.db.exists("Module Def", doc.module):
m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module})
m.app_name = frappe.local.module_app[frappe.scrub(doc.module)]
m.ignore_mandatory = m.ignore_permissions = True
m.insert()
default_roles = ["Administrator", "Guest", "All"]
roles = [p.role for p in doc.get("permissions") or []] + default_roles
for role in list(set(roles)):
if not frappe.db.exists("Role", role):
r = frappe.get_doc({"doctype": "Role", "role_name": role})
r.role_name = role
r.ignore_mandatory = r.ignore_permissions = True
r.insert()
except frappe.DoesNotExistError, e:
pass
except frappe.SQLError, e:
if e.args[0]==1146:
pass
else:
raise
def init_list(doctype):
doc = frappe.get_meta(doctype)
make_boilerplate("controller_list.js", doc)
make_boilerplate("controller_list.html", doc)
|
{
"content_hash": "fb615e23ead3ca81802a2b637a4ce787",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 132,
"avg_line_length": 35.55216284987277,
"alnum_prop": 0.6802891497280275,
"repo_name": "rohitw1991/smarttailorfrappe",
"id": "e52aaa6804c03507d51d14de034357970b8383c2",
"size": "14076",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/doctype/doctype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87036"
},
{
"name": "JavaScript",
"bytes": "1516335"
},
{
"name": "Python",
"bytes": "937485"
}
],
"symlink_target": ""
}
|
import sys
from colors import Colors
class Log:
@classmethod
def print_msg(cls, title, msg, color, new_line = True):
Log.raw("{0}{1}{2}: {3}".format(color, title, Colors.NORMAL, msg), new_line)
@classmethod
def msg(cls, msg, new_line = True):
Log.print_msg("Message", msg, Colors.MAGENTA_FG, new_line)
@classmethod
def info(cls, msg, new_line = True):
Log.print_msg("Info", msg, Colors.CYAN_FG, new_line)
@classmethod
def warn(cls, msg, new_line = True):
Log.print_msg("Warning", msg, Colors.YELLOW_FG, new_line)
@classmethod
def note(cls, msg, new_line = True):
Log.print_msg("Note", msg, Colors.YELLOW_FG, new_line)
@classmethod
def err(cls, msg, new_line = True):
Log.print_msg("Error", msg, Colors.RED_FG, new_line)
@classmethod
def fatal(cls, msg, new_line = True):
Log.print_msg("Fatal", msg, Colors.RED_FG, new_line)
exit(1)
@classmethod
def raw(cls, msg, new_line = True):
if new_line and msg[-1:] != "\n":
msg += "\n"
sys.stdout.write("{0}".format(msg))
|
{
"content_hash": "8983930204510801866abed025e27231",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 26.923076923076923,
"alnum_prop": 0.6323809523809524,
"repo_name": "JBarberU/python-utils",
"id": "dc1163d61bf2b27f533b34e1a9fe05eed7810a2c",
"size": "1050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9769"
}
],
"symlink_target": ""
}
|
import wmi # Tim Golden's wmi module.
computer = wmi.WMI()
for item in computer.Win32_Process()[:2]:
print item
|
{
"content_hash": "f2a41adbd2fd0f17abc61b819a151078",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 41,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.6864406779661016,
"repo_name": "nzavagli/UnrealPy",
"id": "d434174b5cca0f9449b224b0e63d32b4b72ac378",
"size": "118",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/py2exe-0.6.8/py2exe/samples/advanced/test_wmi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886156"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925097"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
import sys
# Import renderdoc if not already imported (e.g. in the UI)
if 'renderdoc' not in sys.modules and '_renderdoc' not in sys.modules:
import renderdoc
# Alias renderdoc for legibility
rd = renderdoc
def printVar(v, indent = ''):
print(indent + v.name + ":")
if len(v.members) == 0:
valstr = ""
for r in range(0, v.rows):
valstr += indent + ' '
for c in range(0, v.columns):
valstr += '%.3f ' % v.value.fv[r*v.columns + c]
if r < v.rows-1:
valstr += "\n"
print(valstr)
for v in v.members:
printVar(v, indent + ' ')
def sampleCode(controller):
print("Available disassembly formats:")
targets = controller.GetDisassemblyTargets(True)
for disasm in targets:
print(" - " + disasm)
target = targets[0]
state = controller.GetPipelineState()
# For some APIs, it might be relevant to set the PSO id or entry point name
pipe = state.GetGraphicsPipelineObject()
entry = state.GetShaderEntryPoint(rd.ShaderStage.Pixel)
# Get the pixel shader's reflection object
ps = state.GetShaderReflection(rd.ShaderStage.Pixel)
cb = state.GetConstantBuffer(rd.ShaderStage.Pixel, 0, 0)
print("Pixel shader:")
print(controller.DisassembleShader(pipe, ps, target))
cbufferVars = controller.GetCBufferVariableContents(pipe, ps.resourceId, entry, 0, cb.resourceId, 0, 0)
for v in cbufferVars:
printVar(v)
def loadCapture(filename):
# Open a capture file handle
cap = rd.OpenCaptureFile()
# Open a particular file - see also OpenBuffer to load from memory
status = cap.OpenFile(filename, '', None)
# Make sure the file opened successfully
if status != rd.ReplayStatus.Succeeded:
raise RuntimeError("Couldn't open file: " + str(status))
# Make sure we can replay
if not cap.LocalReplaySupport():
raise RuntimeError("Capture cannot be replayed")
# Initialise the replay
status,controller = cap.OpenCapture(rd.ReplayOptions(), None)
if status != rd.ReplayStatus.Succeeded:
raise RuntimeError("Couldn't initialise replay: " + str(status))
return (cap, controller)
if 'pyrenderdoc' in globals():
pyrenderdoc.Replay().BlockInvoke(sampleCode)
else:
rd.InitialiseReplay(rd.GlobalEnvironment(), [])
if len(sys.argv) <= 1:
print('Usage: python3 {} filename.rdc'.format(sys.argv[0]))
sys.exit(0)
cap,controller = loadCapture(sys.argv[1])
sampleCode(controller)
controller.Shutdown()
cap.Shutdown()
rd.ShutdownReplay()
|
{
"content_hash": "bb32088783dd3cd0662b5ef714307c7a",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 104,
"avg_line_length": 24.489795918367346,
"alnum_prop": 0.71125,
"repo_name": "moradin/renderdoc",
"id": "f5f5c01d9b5ca45d7c5b976041ac173eced3fc7a",
"size": "2400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/python_api/examples/renderdoc/fetch_shader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2544577"
},
{
"name": "C#",
"bytes": "2112824"
},
{
"name": "C++",
"bytes": "10579561"
},
{
"name": "CMake",
"bytes": "110710"
},
{
"name": "GLSL",
"bytes": "38561"
},
{
"name": "HLSL",
"bytes": "46226"
},
{
"name": "Java",
"bytes": "526"
},
{
"name": "Objective-C",
"bytes": "57753"
},
{
"name": "Perl",
"bytes": "7374"
},
{
"name": "QMake",
"bytes": "4548"
},
{
"name": "Shell",
"bytes": "5486"
}
],
"symlink_target": ""
}
|
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseForbidden
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView, UpdateView, CreateView
from pretix.base.models import Organizer, OrganizerPermission
from pretix.control.forms.organizer import OrganizerUpdateForm, OrganizerForm
from pretix.control.permissions import OrganizerPermissionRequiredMixin
class OrganizerList(ListView):
model = Organizer
context_object_name = 'organizers'
template_name = 'pretixcontrol/organizers/index.html'
paginate_by = 30
def get_queryset(self):
if self.request.user.is_superuser:
return Organizer.objects.current.all()
else:
return Organizer.objects.current.filter(
permitted__id__exact=self.request.user.pk
)
class OrganizerUpdate(OrganizerPermissionRequiredMixin, UpdateView):
model = Organizer
form_class = OrganizerUpdateForm
template_name = 'pretixcontrol/organizers/detail.html'
permission = None
context_object_name = 'organizer'
def get_object(self, queryset=None) -> Organizer:
return self.request.organizer
def form_valid(self, form):
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def get_success_url(self) -> str:
return reverse('control:organizer.edit', kwargs={
'organizer': self.request.organizer.slug,
})
class OrganizerCreate(CreateView):
model = Organizer
form_class = OrganizerForm
template_name = 'pretixcontrol/organizers/create.html'
context_object_name = 'organizer'
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
return HttpResponseForbidden() # TODO
return super().dispatch(request, *args, **kwargs)
def form_valid(self, form):
messages.success(self.request, _('The new organizer has been created.'))
ret = super().form_valid(form)
OrganizerPermission.objects.create(
organizer=form.instance, user=self.request.user,
can_create_events=True
)
return ret
def get_success_url(self) -> str:
return reverse('control:organizers')
|
{
"content_hash": "f58553a40a8098ef7494430b91a04aa7",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 34.470588235294116,
"alnum_prop": 0.6945392491467577,
"repo_name": "Unicorn-rzl/pretix",
"id": "53250be2c4bc408e54fca91e7923a785266426e4",
"size": "2344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pretix/control/views/organizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "39129"
},
{
"name": "HTML",
"bytes": "153518"
},
{
"name": "JavaScript",
"bytes": "8986"
},
{
"name": "Makefile",
"bytes": "423"
},
{
"name": "Python",
"bytes": "593486"
},
{
"name": "Shell",
"bytes": "287"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from tempest.lib import exceptions
from magnum.tests.functional.api.v1.models import bay_model
from magnum.tests.functional.common import client
from magnum.tests.functional.common import utils
class BayClient(client.MagnumClient):
"""Encapsulates REST calls and maps JSON to/from models"""
LOG = logging.getLogger(__name__)
@classmethod
def bays_uri(cls, filters=None):
"""Construct bays uri with optional filters
:param filters: Optional k:v dict that's converted to url query
:returns: url string
"""
url = "/bays"
if filters:
url = cls.add_filters(url, filters)
return url
@classmethod
def bay_uri(cls, bay_id):
"""Construct bay uri
:param bay_id: bay uuid or name
:returns: url string
"""
return "{0}/{1}".format(cls.bays_uri(), bay_id)
def list_bays(self, filters=None, **kwargs):
"""Makes GET /bays request and returns BayCollection
Abstracts REST call to return all bays
:param filters: Optional k:v dict that's converted to url query
:returns: response object and BayCollection object
"""
resp, body = self.get(self.bays_uri(filters), **kwargs)
return self.deserialize(resp, body, bay_model.BayCollection)
def get_bay(self, bay_id, **kwargs):
"""Makes GET /bay request and returns BayEntity
Abstracts REST call to return a single bay based on uuid or name
:param bay_id: bay uuid or name
:returns: response object and BayCollection object
"""
resp, body = self.get(self.bay_uri(bay_id))
return self.deserialize(resp, body, bay_model.BayEntity)
def post_bay(self, model, **kwargs):
"""Makes POST /bay request and returns BayEntity
Abstracts REST call to create new bay
:param model: BayEntity
:returns: response object and BayEntity object
"""
resp, body = self.post(
self.bays_uri(),
body=model.to_json(), **kwargs)
return self.deserialize(resp, body, bay_model.BayEntity)
def patch_bay(self, bay_id, baypatch_listmodel, **kwargs):
"""Makes PATCH /bay request and returns BayEntity
Abstracts REST call to update bay attributes
:param bay_id: UUID of bay
:param baypatch_listmodel: BayPatchCollection
:returns: response object and BayEntity object
"""
resp, body = self.patch(
self.bay_uri(bay_id),
body=baypatch_listmodel.to_json(), **kwargs)
return self.deserialize(resp, body, bay_model.BayEntity)
def delete_bay(self, bay_id, **kwargs):
"""Makes DELETE /bay request and returns response object
Abstracts REST call to delete bay based on uuid or name
:param bay_id: UUID or name of bay
:returns: response object
"""
return self.delete(self.bay_uri(bay_id), **kwargs)
def wait_for_bay_to_delete(self, bay_id):
utils.wait_for_condition(
lambda: self.does_bay_not_exist(bay_id), 10, 600)
def wait_for_created_bay(self, bay_id, delete_on_error=True):
try:
utils.wait_for_condition(
lambda: self.does_bay_exist(bay_id), 10, 1800)
except Exception:
# In error state. Clean up the bay id if desired
self.LOG.error('Bay %s entered an exception state.', bay_id)
if delete_on_error:
self.LOG.error('We will attempt to delete bays now.')
self.delete_bay(bay_id)
self.wait_for_bay_to_delete(bay_id)
raise
def wait_for_final_state(self, bay_id):
utils.wait_for_condition(
lambda: self.is_bay_in_final_state(bay_id), 10, 1800)
def is_bay_in_final_state(self, bay_id):
try:
resp, model = self.get_bay(bay_id)
if model.status in ['CREATED', 'CREATE_COMPLETE',
'ERROR', 'CREATE_FAILED']:
self.LOG.info('Bay %s succeeded.', bay_id)
return True
else:
return False
except exceptions.NotFound:
self.LOG.warning('Bay %s is not found.', bay_id)
return False
def does_bay_exist(self, bay_id):
try:
resp, model = self.get_bay(bay_id)
if model.status in ['CREATED', 'CREATE_COMPLETE']:
self.LOG.info('Bay %s is created.', bay_id)
return True
elif model.status in ['ERROR', 'CREATE_FAILED']:
self.LOG.error('Bay %s is in fail state.', bay_id)
raise exceptions.ServerFault(
"Got into an error condition: %s for %s",
(model.status, bay_id))
else:
return False
except exceptions.NotFound:
self.LOG.warning('Bay %s is not found.', bay_id)
return False
def does_bay_not_exist(self, bay_id):
try:
self.get_bay(bay_id)
except exceptions.NotFound:
self.LOG.warning('Bay %s is not found.', bay_id)
return True
return False
|
{
"content_hash": "3fbef7d4a9c3d45fccebf54cda7b066e",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 72,
"avg_line_length": 33.41139240506329,
"alnum_prop": 0.5847698427732525,
"repo_name": "openstack/magnum",
"id": "844ed4a2bafafdac19b6808ca17ea81f2ab954d2",
"size": "5820",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magnum/tests/functional/api/v1/clients/bay_client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8788"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "2302791"
},
{
"name": "Shell",
"bytes": "547968"
}
],
"symlink_target": ""
}
|
"""
This script differs from extract-unassembled-reads.py in that it only tags
the assembly, not the reads. This is more efficient (and streaming for
the reads!) but it will erroneously extract some small fraction of reads
because they miss the tags for reasons of length or errors.
Procedure:
* hard trim the reads at an abundance of ~5 (to avoid low abundance, and
eliminate erroneous paths)
* and/or variable-coverage trim the reads at an abundance of 3, to eliminate
erroneous paths from super-high-abundance data
* run this script with the assembly & the remaining reads.
"""
import sys
import os.path
import khmer, khmer.utils
import screed
import argparse
DEFAULT_KSIZE=31
NODEGRAPH_SIZE=1e8
def main():
p = argparse.ArgumentParser()
p.add_argument('assembly')
p.add_argument('readfiles', nargs='+')
p.add_argument('-o', '--output', default=None)
p.add_argument('-k', '--ksize', default=DEFAULT_KSIZE, type=int)
p.add_argument('-x', '--tablesize', default=NODEGRAPH_SIZE,
type=float)
args = p.parse_args()
ng = khmer.Nodegraph(args.ksize, args.tablesize, 4)
ng._set_tag_density(20)
print('loading & tagging assembly from:', args.assembly)
ng.consume_seqfile_and_tag(args.assembly)
if args.output:
outfp = open(args.output, 'w')
n = 0
m = 0
for readfile in args.readfiles:
print('loading reads from:', readfile)
if not args.output:
outfile = os.path.basename(readfile) + '.leftover'
outfp = open(outfile, 'w')
print('writing to:', outfile, file=sys.stderr)
for record in screed.open(readfile):
if n % 100000 == 0 and n:
print('...', readfile, n, m, file=sys.stderr)
x = ng.get_tags_and_positions(record.sequence)
if not x:
khmer.utils.write_record(record, outfp)
m += 1
n += 1
if not args.output:
outfp.close()
print('%d left out of assembly, of %d reads' % (m, n), file=sys.stderr)
if __name__ == '__main__':
main()
|
{
"content_hash": "2a97e7589a44c74ba292cde7dfce06b4",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 76,
"avg_line_length": 30.257142857142856,
"alnum_prop": 0.6279508970727101,
"repo_name": "souravsingh/khmer",
"id": "be993481a734224b7d1cfe293e2c4711dba9451a",
"size": "2141",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sandbox/extract-unassembled-reads-2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "500623"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "22719"
},
{
"name": "Python",
"bytes": "1282062"
},
{
"name": "Roff",
"bytes": "9581"
},
{
"name": "Shell",
"bytes": "5544"
}
],
"symlink_target": ""
}
|
"""
Demonstrates the various Barbican API calls, against an unauthenticated local
Barbican server. This script is intended to be a lightweight way to demonstrate
and 'smoke test' the Barbican API via it's REST API, with no other dependencies
required including the Barbican Python client. Note that this script is not
intended to replace DevStack or Tempest style testing.
"""
import logging
import requests
import sys
from oslo_serialization import jsonutils as json
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler(sys.stdout))
# Project ID:
proj = '12345678'
# Endpoint:
end_point = 'http://localhost:9311'
version = 'v1'
# Basic header info:
hdrs = {'X-Project-Id': proj, 'content-type': 'application/json'}
# Consumer data.
payload_consumer = {
'name': 'foo-service',
'URL': 'https://www.fooservice.com/widgets/1234'
}
def demo_version():
"""Get version"""
v = requests.get(end_point, headers=hdrs)
LOG.info('Version: {0}\n'.format(v.text))
def demo_store_secret_one_step_text(suffix=None, suppress=False):
"""Store secret (1-step):"""
ep_1step = '/'.join([end_point, version, 'secrets'])
secret = 'my-secret-here'
if suffix:
secret = '-'.join([secret, suffix])
# POST metadata:
payload = {
'payload': secret,
'payload_content_type': 'text/plain'
}
pr = requests.post(ep_1step, data=json.dumps(payload), headers=hdrs)
pr_j = pr.json()
secret_ref = pr.json().get('secret_ref')
# GET secret:
hdrs_get = dict(hdrs)
hdrs_get.update({
'accept': 'text/plain'})
gr = requests.get(secret_ref, headers=hdrs_get)
if not suppress:
LOG.info('Get secret 1-step (text): {0}\n'.format(gr.content))
return secret_ref
def demo_store_secret_two_step_binary():
"""Store secret (2-step):"""
secret = 'bXktc2VjcmV0LWhlcmU=' # base64 of 'my secret'
ep_2step = '/'.join([end_point, version, 'secrets'])
# POST metadata:
payload = {}
pr = requests.post(ep_2step, data=json.dumps(payload), headers=hdrs)
pr_j = pr.json()
secret_ref = pr_j.get('secret_ref')
assert secret_ref
# PUT data to store:
hdrs_put = dict(hdrs)
hdrs_put.update({
'content-type': 'application/octet-stream',
'content-encoding': 'base64'}
)
requests.put(secret_ref, data=secret, headers=hdrs_put)
# GET secret:
hdrs_get = dict(hdrs)
hdrs_get.update({
'accept': 'application/octet-stream'})
gr = requests.get(secret_ref, headers=hdrs_get)
LOG.info('Get secret 2-step (binary): {0}\n'.format(gr.content))
return secret_ref
def demo_retrieve_secret_list():
ep_list = '/'.join([end_point, version, 'secrets'])
hdrs_get = dict(hdrs)
gr = requests.get(ep_list, headers=hdrs_get)
gr_j = gr.json()
LOG.info('Get secret list:')
for secret_info in gr_j.get('secrets'):
LOG.info(' {0}'.format(secret_info.get('secret_ref')))
LOG.info('\n')
def demo_store_container_rsa(suffix=None):
"""Store secret (2-step):"""
ep_cont = '/'.join([end_point, version, 'containers'])
secret_prk = demo_store_secret_one_step_text(suffix=suffix, suppress=True)
secret_puk = demo_store_secret_one_step_text(suffix=suffix, suppress=True)
secret_pp = demo_store_secret_one_step_text(suffix=suffix, suppress=True)
# POST metadata:
payload = {
"name": "container name",
"type": "rsa",
"secret_refs": [{
"name": "private_key",
"secret_ref": secret_prk
},
{
"name": "public_key",
"secret_ref": secret_puk
},
{
"name": "private_key_passphrase",
"secret_ref": secret_pp
}]
}
pr = requests.post(ep_cont, data=json.dumps(payload), headers=hdrs)
pr_j = pr.json()
container_ref = pr.json().get('container_ref')
# GET container:
hdrs_get = dict(hdrs)
gr = requests.get(container_ref, headers=hdrs_get)
LOG.info('Get RSA container: {0}\n'.format(gr.content))
return container_ref
def demo_retrieve_container_list():
ep_list = '/'.join([end_point, version, 'containers'])
hdrs_get = dict(hdrs)
gr = requests.get(ep_list, headers=hdrs_get)
gr_j = gr.json()
LOG.info('Get container list:')
for secret_info in gr_j.get('containers'):
LOG.info(' {0}'.format(secret_info.get('container_ref')))
LOG.info('\n')
def demo_delete_secret(secret_ref):
"""Delete secret by its HATEOAS reference"""
ep_delete = secret_ref
# DELETE secret:
dr = requests.delete(ep_delete, headers=hdrs)
gr = requests.get(secret_ref, headers=hdrs)
assert(404 == gr.status_code)
LOG.info('...Deleted Secret: {0}\n'.format(secret_ref))
def demo_delete_container(container_ref):
"""Delete container by its HATEOAS reference"""
ep_delete = container_ref
# DELETE container:
dr = requests.delete(ep_delete, headers=hdrs)
gr = requests.get(container_ref, headers=hdrs)
assert(404 == gr.status_code)
LOG.info('...Deleted Container: {0}\n'.format(container_ref))
def demo_consumers_add(container_ref):
"""Add consumer to a container:"""
ep_add = '/'.join([container_ref, 'consumers'])
# POST metadata:
pr = requests.post(ep_add, data=json.dumps(payload_consumer), headers=hdrs)
pr_consumers = pr.json().get('consumers')
assert pr_consumers
assert(len(pr_consumers) == 1)
LOG.info('...Consumer response: {0}'.format(pr_consumers))
def demo_consumers_delete(container_ref):
"""Delete consumer from a container:"""
ep_delete = '/'.join([container_ref, 'consumers'])
# POST metadata:
pr = requests.delete(
ep_delete, data=json.dumps(payload_consumer), headers=hdrs)
pr_consumers = pr.json().get('consumers')
assert(not pr_consumers)
LOG.info('...Deleted Consumer from: {0}'.format(container_ref))
if __name__ == '__main__':
demo_version()
# Demonstrate secret actions:
secret_ref = demo_store_secret_one_step_text()
secret_ref2 = demo_store_secret_two_step_binary()
demo_retrieve_secret_list()
demo_delete_secret(secret_ref)
demo_delete_secret(secret_ref2)
# Demonstrate container and consumer actions:
container_ref = demo_store_container_rsa(suffix='1')
container_ref2 = demo_store_container_rsa(suffix='2')
demo_retrieve_container_list()
demo_consumers_add(container_ref)
demo_consumers_add(container_ref) # Should be idempotent
demo_consumers_delete(container_ref)
demo_consumers_add(container_ref)
demo_delete_container(container_ref)
demo_delete_container(container_ref2)
|
{
"content_hash": "15caefa2d300773cb62209a95fc07ce3",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 29.186147186147185,
"alnum_prop": 0.6404627706911895,
"repo_name": "openstack/barbican",
"id": "6bff3308289636505d5257b75d86ab1785ebc41c",
"size": "7311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/demo_requests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "1586"
},
{
"name": "Mako",
"bytes": "979"
},
{
"name": "Python",
"bytes": "2626403"
},
{
"name": "Shell",
"bytes": "43567"
}
],
"symlink_target": ""
}
|
"""This file implements a VFS abstraction on the client."""
import functools
import platform
from typing import Any, Optional, Callable, Dict, Type
from grr_response_client.unprivileged.filesystem import vfs as unprivileged_vfs
from grr_response_client.vfs_handlers import base as vfs_base
from grr_response_client.vfs_handlers import files # pylint: disable=unused-import
from grr_response_client.vfs_handlers import ntfs
from grr_response_client.vfs_handlers import sleuthkit # pylint: disable=unused-import
# pylint: disable=g-import-not-at-top
if platform.system() == "Windows":
from grr_response_client.vfs_handlers import registry as vfs_registry # pylint: disable=unused-import
else:
vfs_registry = None
from grr_response_core import config
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import context
from grr_response_core.lib.util import precondition
# pylint: enable=g-import-not-at-top
VFSHandler = vfs_base.VFSHandler
UnsupportedHandlerError = vfs_base.UnsupportedHandlerError
# A registry of all VFSHandler registered
# TODO: Dictionary keys are of type rdf_paths.PathSpec.PathType,
# but this is currently not representable as type information in Python.
VFS_HANDLERS = {} # type: Dict[Any, Type[vfs_base.VFSHandler]]
_VFS_HANDLERS_DIRECT = {} # type: Dict[Any, Type[vfs_base.VFSHandler]]
_VFS_HANDLERS_SANDBOX = {} # type: Dict[Any, Type[vfs_base.VFSHandler]]
# The paths we should use as virtual root for VFS operations.
_VFS_VIRTUALROOTS = {}
def Init():
"""Register all known vfs handlers to open a pathspec types."""
VFS_HANDLERS.clear()
_VFS_HANDLERS_DIRECT.clear()
_VFS_HANDLERS_SANDBOX.clear()
_VFS_VIRTUALROOTS.clear()
vfs_virtualroots = config.CONFIG["Client.vfs_virtualroots"]
VFS_HANDLERS[files.File.supported_pathtype] = files.File
VFS_HANDLERS[files.TempFile.supported_pathtype] = files.TempFile
if config.CONFIG["Client.use_filesystem_sandboxing"]:
VFS_HANDLERS[unprivileged_vfs.UnprivilegedNtfsFile
.supported_pathtype] = unprivileged_vfs.UnprivilegedNtfsFile
VFS_HANDLERS[unprivileged_vfs.UnprivilegedTskFile
.supported_pathtype] = unprivileged_vfs.UnprivilegedTskFile
else:
VFS_HANDLERS[sleuthkit.TSKFile.supported_pathtype] = sleuthkit.TSKFile
VFS_HANDLERS[ntfs.NTFSFile.supported_pathtype] = ntfs.NTFSFile
if vfs_registry is not None:
VFS_HANDLERS[vfs_registry.RegistryFile
.supported_pathtype] = vfs_registry.RegistryFile
_VFS_HANDLERS_DIRECT.update(VFS_HANDLERS)
_VFS_HANDLERS_DIRECT[sleuthkit.TSKFile.supported_pathtype] = sleuthkit.TSKFile
_VFS_HANDLERS_DIRECT[ntfs.NTFSFile.supported_pathtype] = ntfs.NTFSFile
_VFS_HANDLERS_SANDBOX.update(VFS_HANDLERS)
_VFS_HANDLERS_SANDBOX[
unprivileged_vfs.UnprivilegedNtfsFile
.supported_pathtype] = unprivileged_vfs.UnprivilegedNtfsFile
_VFS_HANDLERS_SANDBOX[
unprivileged_vfs.UnprivilegedTskFile
.supported_pathtype] = unprivileged_vfs.UnprivilegedTskFile
for vfs_virtualroot in vfs_virtualroots:
try:
handler_string, root = vfs_virtualroot.split(":", 1)
except ValueError:
raise ValueError(
"Badly formatted vfs virtual root: %s. Correct format is "
"os:/path/to/virtual_root" % vfs_virtualroot)
handler_string = handler_string.upper()
handler = rdf_paths.PathSpec.PathType.enum_dict.get(handler_string)
if handler is None:
raise ValueError(
"VFSHandler {} could not be registered, because it was not found in"
" PathSpec.PathType {}".format(handler_string,
rdf_paths.PathSpec.PathType.enum_dict))
# We need some translation here, TSK needs an OS virtual root base. For
# every other handler we can just keep the type the same.
if handler in (rdf_paths.PathSpec.PathType.TSK,
rdf_paths.PathSpec.PathType.NTFS):
base_type = rdf_paths.PathSpec.PathType.OS
else:
base_type = handler
_VFS_VIRTUALROOTS[handler] = rdf_paths.PathSpec(
path=root, pathtype=base_type, is_virtualroot=True)
def _GetVfsHandlers(
pathspec: rdf_paths.PathSpec) -> Dict[Any, Type[vfs_base.VFSHandler]]:
"""Returns the table of VFS handlers for the given pathspec."""
for i, element in enumerate(pathspec):
if element.HasField("implementation_type") and i != 0:
raise ValueError(
"implementation_type must be set on the top-level component of "
"a pathspec.")
if (pathspec.implementation_type ==
rdf_paths.PathSpec.ImplementationType.DIRECT):
return _VFS_HANDLERS_DIRECT
elif (pathspec.implementation_type ==
rdf_paths.PathSpec.ImplementationType.SANDBOX):
return _VFS_HANDLERS_SANDBOX
else:
return VFS_HANDLERS
def VFSOpen(pathspec: rdf_paths.PathSpec,
progress_callback: Optional[Callable[[], None]] = None
) -> VFSHandler:
"""Expands pathspec to return an expanded Path.
A pathspec is a specification of how to access the file by recursively opening
each part of the path by different drivers. For example the following
pathspec:
pathtype: OS
path: "/dev/sda1"
nested_path {
pathtype: TSK
path: "/home/image2.img"
nested_path {
pathtype: TSK
path: "/home/a.txt"
}
}
Instructs the system to:
1) open /dev/sda1 using the OS driver.
2) Pass the obtained filelike object to the TSK driver to open
"/home/image2.img".
3) The obtained filelike object should be passed to the TSK driver to open
"/home/a.txt".
The problem remains how to get to this expanded path specification. Since the
server is not aware of all the files on the client, the server may request
this:
pathtype: OS
path: "/dev/sda1"
nested_path {
pathtype: TSK
path: "/home/image2.img/home/a.txt"
}
Or even this:
pathtype: OS
path: "/dev/sda1/home/image2.img/home/a.txt"
This function converts the pathspec requested by the server into an expanded
pathspec required to actually open the file. This is done by expanding each
component of the pathspec in turn.
Expanding the component is done by opening each leading directory in turn and
checking if it is a directory of a file. If its a file, we examine the file
headers to determine the next appropriate driver to use, and create a nested
pathspec.
Note that for some clients there might be a virtual root specified. This
is a directory that gets prepended to all pathspecs of a given
pathtype. For example if there is a virtual root defined as
["os:/virtualroot"], a path specification like
pathtype: OS
path: "/home/user/*"
will get translated into
pathtype: OS
path: "/virtualroot"
is_virtualroot: True
nested_path {
pathtype: OS
path: "/dev/sda1"
}
Args:
pathspec: A Path() protobuf to normalize.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Returns:
The open filelike object. This will contain the expanded Path() protobuf as
the member fd.pathspec.
Raises:
IOError: if one of the path components can not be opened.
"""
# Initialize the dictionary of VFS handlers lazily, if not yet done.
if not VFS_HANDLERS:
Init()
handlers = _GetVfsHandlers(pathspec)
fd = None
# Adjust the pathspec in case we are using a vfs_virtualroot.
vroot = _VFS_VIRTUALROOTS.get(pathspec.pathtype)
# If we have a virtual root for this vfs handler, we need to prepend
# it to the incoming pathspec except if the pathspec is explicitly
# marked as containing a virtual root already or if it isn't marked but
# the path already contains the virtual root.
if (not vroot or pathspec.is_virtualroot or
pathspec.CollapsePath().startswith(vroot.CollapsePath())):
# No virtual root but opening changes the pathspec so we always work on a
# copy.
working_pathspec = pathspec.Copy()
else:
# We're in a virtual root, put the target pathspec inside the virtual root
# as a nested path.
working_pathspec = vroot.Copy()
working_pathspec.last.nested_path = pathspec.Copy()
# For each pathspec step, we get the handler for it and instantiate it with
# the old object, and the current step.
while working_pathspec:
component = working_pathspec.Pop()
try:
handler = handlers[component.pathtype]
except KeyError:
raise UnsupportedHandlerError(component.pathtype)
orig_component = component.Copy()
orig_working_pathspec = working_pathspec.Copy()
# Open the component.
fd = handler.Open(
fd=fd,
component=component,
handlers=dict(handlers),
pathspec=working_pathspec,
progress_callback=progress_callback)
# If the handler uses `client_utils.GetRawDevice`, it will rewrite
# `working_pathspec`, adding 3 new entries (only the first 2 matter).
# If there was an `implementation_type` set, we need to add it to
# the new top-level entry and remove it from the original entry (which is
# now modified at index 1).
if (orig_component.HasField("implementation_type") and
len(working_pathspec) >= len(orig_working_pathspec) + 2):
working_pathspec.implementation_type = orig_component.implementation_type
working_pathspec[1].implementation_type = None
if fd is None:
raise ValueError("VFSOpen cannot be called with empty PathSpec.")
return fd
def VFSMultiOpen(pathspecs, progress_callback=None):
"""Opens multiple files specified by given path-specs.
See documentation for `VFSOpen` for more information.
Args:
pathspecs: A list of pathspec instances of files to open.
progress_callback: A callback function to call to notify about progress
Returns:
A context manager yielding file-like objects.
"""
precondition.AssertIterableType(pathspecs, rdf_paths.PathSpec)
vfs_open = functools.partial(VFSOpen, progress_callback=progress_callback)
return context.MultiContext(list(map(vfs_open, pathspecs)))
def ReadVFS(pathspec, offset, length, progress_callback=None):
"""Read from the VFS and return the contents.
Args:
pathspec: path to read from
offset: number of bytes to skip
length: number of bytes to read
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Returns:
VFS file contents
"""
fd = VFSOpen(pathspec, progress_callback=progress_callback)
fd.Seek(offset)
return fd.Read(length)
|
{
"content_hash": "859c6f8c9dfcb15eba950c10144b5576",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 104,
"avg_line_length": 35.648648648648646,
"alnum_prop": 0.719294920394238,
"repo_name": "google/grr",
"id": "a20049ab89d097db06b879ed241f499feb01fd2f",
"size": "10574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/client/grr_response_client/vfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
}
|
import time
def test_delete(test_awscc):
factory = test_awscc.replay_flight_data("awscc_log_delete")
p = test_awscc.load_policy(
{
"name": "log-del",
"resource": "awscc.logs_loggroup",
"filters": [{"LogGroupName": "/aws/apigateway/welcome"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
assert len(resources) == 1
if test_awscc.recording:
time.sleep(2)
client = factory().client("logs")
assert (
client.describe_log_groups(logGroupNamePrefix="/aws/apigateway/welcome").get(
"logGroups"
)
== []
)
def test_update(test_awscc):
factory = test_awscc.replay_flight_data("awscc_log_update")
group_name = "/aws/codebuild/custodian-build-python"
p = test_awscc.load_policy(
{
"name": "log-up",
"resource": "awscc.logs_loggroup",
"filters": [{"LogGroupName": group_name}],
"actions": [
{
"type": "update",
"RetentionInDays": 7,
"Tags": [{"Key": "Owner", "Value": "Kapil"}],
}
],
},
session_factory=factory,
)
resources = p.run()
assert len(resources) == 1
assert "RetentionInDays" not in resources[0]
assert "Tags" not in resources[0]
if test_awscc.recording:
time.sleep(2)
updated = p.resource_manager.get_resources([group_name])
assert updated[0] == {
"Arn": (
"arn:aws:logs:us-east-1:644160558196:"
"log-group:/aws/codebuild/custodian-build-python:*"
),
"LogGroupName": "/aws/codebuild/custodian-build-python",
"RetentionInDays": 7,
"Tags": [{"Key": "Owner", "Value": "Kapil"}],
}
|
{
"content_hash": "493e52a50f379acf37f63bd19b64d663",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 85,
"avg_line_length": 27.397058823529413,
"alnum_prop": 0.5217391304347826,
"repo_name": "thisisshi/cloud-custodian",
"id": "01a8ea258482f3cfaeb06949529b6e78ff1a6b00",
"size": "1942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_awscc/tests/test_ccaction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2126"
},
{
"name": "Go",
"bytes": "146637"
},
{
"name": "HCL",
"bytes": "62085"
},
{
"name": "Jinja",
"bytes": "19775"
},
{
"name": "Makefile",
"bytes": "14242"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "6684814"
},
{
"name": "Shell",
"bytes": "15323"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forms', '0005_auto_20170613_0824'),
]
operations = [
migrations.AlterField(
model_name='filledform',
name='data',
field=models.CharField(blank=True, max_length=20000, null=True),
),
migrations.AlterField(
model_name='question',
name='question_type',
field=models.CharField(choices=[('Short_answer', 'One Line Answer'), ('Paragraph', 'Multiple Line Answer'), ('Integer', 'Integer Answer'), ('ChoiceField', 'Choice'), ('MultipleChoiceField', 'Multiple-choice')], max_length=50, null=True),
),
]
|
{
"content_hash": "9229846c768db46bfead9e49200eae64",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 249,
"avg_line_length": 33.26086956521739,
"alnum_prop": 0.6052287581699346,
"repo_name": "aniketp41/Gymkhana-Nominations",
"id": "3df069d33b150620ac340bb10e5f4c970a37a38d",
"size": "838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "forms/migrations/0006_auto_20170613_1626.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13871"
},
{
"name": "HTML",
"bytes": "187973"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "249674"
}
],
"symlink_target": ""
}
|
"""Support for IoTaWatt Energy monitor."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
import logging
from iotawattpy.sensor import Sensor
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_POWER_FACTOR,
DEVICE_CLASS_VOLTAGE,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_WATT_HOUR,
FREQUENCY_HERTZ,
PERCENTAGE,
POWER_VOLT_AMPERE,
POWER_WATT,
)
from homeassistant.core import callback
from homeassistant.helpers import entity, entity_registry, update_coordinator
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util import dt
from .const import (
ATTR_LAST_UPDATE,
DOMAIN,
VOLT_AMPERE_REACTIVE,
VOLT_AMPERE_REACTIVE_HOURS,
)
from .coordinator import IotawattUpdater
_LOGGER = logging.getLogger(__name__)
@dataclass
class IotaWattSensorEntityDescription(SensorEntityDescription):
"""Class describing IotaWatt sensor entities."""
value: Callable | None = None
ENTITY_DESCRIPTION_KEY_MAP: dict[str, IotaWattSensorEntityDescription] = {
"Amps": IotaWattSensorEntityDescription(
"Amps",
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=STATE_CLASS_MEASUREMENT,
device_class=DEVICE_CLASS_CURRENT,
entity_registry_enabled_default=False,
),
"Hz": IotaWattSensorEntityDescription(
"Hz",
native_unit_of_measurement=FREQUENCY_HERTZ,
state_class=STATE_CLASS_MEASUREMENT,
icon="mdi:flash",
entity_registry_enabled_default=False,
),
"PF": IotaWattSensorEntityDescription(
"PF",
native_unit_of_measurement=PERCENTAGE,
state_class=STATE_CLASS_MEASUREMENT,
device_class=DEVICE_CLASS_POWER_FACTOR,
value=lambda value: value * 100,
entity_registry_enabled_default=False,
),
"Watts": IotaWattSensorEntityDescription(
"Watts",
native_unit_of_measurement=POWER_WATT,
state_class=STATE_CLASS_MEASUREMENT,
device_class=DEVICE_CLASS_POWER,
),
"WattHours": IotaWattSensorEntityDescription(
"WattHours",
native_unit_of_measurement=ENERGY_WATT_HOUR,
state_class=STATE_CLASS_TOTAL,
device_class=DEVICE_CLASS_ENERGY,
),
"VA": IotaWattSensorEntityDescription(
"VA",
native_unit_of_measurement=POWER_VOLT_AMPERE,
state_class=STATE_CLASS_MEASUREMENT,
icon="mdi:flash",
entity_registry_enabled_default=False,
),
"VAR": IotaWattSensorEntityDescription(
"VAR",
native_unit_of_measurement=VOLT_AMPERE_REACTIVE,
state_class=STATE_CLASS_MEASUREMENT,
icon="mdi:flash",
entity_registry_enabled_default=False,
),
"VARh": IotaWattSensorEntityDescription(
"VARh",
native_unit_of_measurement=VOLT_AMPERE_REACTIVE_HOURS,
state_class=STATE_CLASS_MEASUREMENT,
icon="mdi:flash",
entity_registry_enabled_default=False,
),
"Volts": IotaWattSensorEntityDescription(
"Volts",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=STATE_CLASS_MEASUREMENT,
device_class=DEVICE_CLASS_VOLTAGE,
entity_registry_enabled_default=False,
),
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add sensors for passed config_entry in HA."""
coordinator: IotawattUpdater = hass.data[DOMAIN][config_entry.entry_id]
created = set()
@callback
def _create_entity(key: str) -> IotaWattSensor:
"""Create a sensor entity."""
created.add(key)
data = coordinator.data["sensors"][key]
description = ENTITY_DESCRIPTION_KEY_MAP.get(
data.getUnit(), IotaWattSensorEntityDescription("base_sensor")
)
if data.getUnit() == "WattHours" and not data.getFromStart():
return IotaWattAccumulatingSensor(
coordinator=coordinator, key=key, entity_description=description
)
return IotaWattSensor(
coordinator=coordinator,
key=key,
entity_description=description,
)
async_add_entities(_create_entity(key) for key in coordinator.data["sensors"])
@callback
def new_data_received():
"""Check for new sensors."""
entities = [
_create_entity(key)
for key in coordinator.data["sensors"]
if key not in created
]
if entities:
async_add_entities(entities)
coordinator.async_add_listener(new_data_received)
class IotaWattSensor(update_coordinator.CoordinatorEntity, SensorEntity):
"""Defines a IoTaWatt Energy Sensor."""
entity_description: IotaWattSensorEntityDescription
coordinator: IotawattUpdater
def __init__(
self,
coordinator: IotawattUpdater,
key: str,
entity_description: IotaWattSensorEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator=coordinator)
self._key = key
data = self._sensor_data
if data.getType() == "Input":
self._attr_unique_id = (
f"{data.hub_mac_address}-input-{data.getChannel()}-{data.getUnit()}"
)
self.entity_description = entity_description
@property
def _sensor_data(self) -> Sensor:
"""Return sensor data."""
return self.coordinator.data["sensors"][self._key]
@property
def name(self) -> str | None:
"""Return name of the entity."""
return self._sensor_data.getName()
@property
def device_info(self) -> entity.DeviceInfo | None:
"""Return device info."""
return {
"connections": {
(CONNECTION_NETWORK_MAC, self._sensor_data.hub_mac_address)
},
"manufacturer": "IoTaWatt",
"model": "IoTaWatt",
}
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
if self._key not in self.coordinator.data["sensors"]:
if self._attr_unique_id:
entity_registry.async_get(self.hass).async_remove(self.entity_id)
else:
self.hass.async_create_task(self.async_remove())
return
super()._handle_coordinator_update()
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return the extra state attributes of the entity."""
data = self._sensor_data
attrs = {"type": data.getType()}
if attrs["type"] == "Input":
attrs["channel"] = data.getChannel()
return attrs
@property
def native_value(self) -> entity.StateType:
"""Return the state of the sensor."""
if func := self.entity_description.value:
return func(self._sensor_data.getValue())
return self._sensor_data.getValue()
class IotaWattAccumulatingSensor(IotaWattSensor, RestoreEntity):
"""Defines a IoTaWatt Accumulative Energy (High Accuracy) Sensor."""
def __init__(
self,
coordinator: IotawattUpdater,
key: str,
entity_description: IotaWattSensorEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator, key, entity_description)
if self._attr_unique_id is not None:
self._attr_unique_id += ".accumulated"
self._accumulated_value: float | None = None
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
assert (
self._accumulated_value is not None
), "async_added_to_hass must have been called first"
self._accumulated_value += float(self._sensor_data.getValue())
super()._handle_coordinator_update()
@property
def native_value(self) -> entity.StateType:
"""Return the state of the sensor."""
if self._accumulated_value is None:
return None
return round(self._accumulated_value, 1)
async def async_added_to_hass(self) -> None:
"""Load the last known state value of the entity if the accumulated type."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
self._accumulated_value = 0.0
if state:
try:
# Previous value could be `unknown` if the connection didn't originally
# complete.
self._accumulated_value = float(state.state)
except (ValueError) as err:
_LOGGER.warning("Could not restore last state: %s", err)
else:
if ATTR_LAST_UPDATE in state.attributes:
last_run = dt.parse_datetime(state.attributes[ATTR_LAST_UPDATE])
if last_run is not None:
self.coordinator.update_last_run(last_run)
# Force a second update from the iotawatt to ensure that sensors are up to date.
await self.coordinator.async_request_refresh()
@property
def name(self) -> str | None:
"""Return name of the entity."""
return f"{self._sensor_data.getSourceName()} Accumulated"
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return the extra state attributes of the entity."""
attrs = super().extra_state_attributes
assert (
self.coordinator.api is not None
and self.coordinator.api.getLastUpdateTime() is not None
)
attrs[ATTR_LAST_UPDATE] = self.coordinator.api.getLastUpdateTime().isoformat()
return attrs
|
{
"content_hash": "2b635edab3df8b323aa1859340167002",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 88,
"avg_line_length": 33.00986842105263,
"alnum_prop": 0.6316890881913303,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "ec2918b0ce655f0b60fe1cb00e11c19056b1b401",
"size": "10035",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/iotawatt/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
__author__ = "Osman Baskaya"
"""
Useful functions for Wordnet 3.0
"""
import sys
from nltk.corpus import wordnet as wn
w_synsets = dict()
def get_synsets_for_sent(sentence):
if not isinstance(sentence, list):
sentence = sentence.split()
s = []
for token in sentence:
if token in w_synsets:
synsets = w_synsets[token]
else:
synsets = wn.synsets(token)
w_synsets[token] = synsets
s.append(synsets)
return s
def get_synsets_for_sents(sentences):
syns_sents = []
for sentence in sentences:
syns_sents.append(get_synsets_for_sent(sentence))
return syns_sents
def get_synsets_for_sents_tuple(sents_tuples):
""" This function is for task3 """
syns_sents = []
for s1, s2 in sents_tuples:
syns1 = get_synsets_for_sent(s1)
syns2 = get_synsets_for_sent(s2)
syns_sents.append((syns1, syns2))
sys.stderr.write("all synsets are fetched.\n")
return syns_sents
|
{
"content_hash": "5284adfb1bebdc529cd65e3d1ba7411d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 57,
"avg_line_length": 23.325581395348838,
"alnum_prop": 0.6201395812562313,
"repo_name": "osmanbaskaya/semeval14-task3",
"id": "bd1fd406b0f7e95b652fbec7bfccc7785b040afd",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run/wn_utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22682"
},
{
"name": "Scala",
"bytes": "1451"
},
{
"name": "Shell",
"bytes": "532"
},
{
"name": "TeX",
"bytes": "215954"
}
],
"symlink_target": ""
}
|
''' Provide a request handler that returns a page displaying a document.
'''
from __future__ import absolute_import, print_function
import logging
log = logging.getLogger(__name__)
from six.moves.urllib.parse import urlparse
from tornado import gen
from bokeh.core.templates import AUTOLOAD_JS
from bokeh.util.string import encode_utf8
from bokeh.util.compiler import bundle_all_models
from bokeh.embed.standalone import script_for_render_items
from .session_handler import SessionHandler
class AutoloadJsHandler(SessionHandler):
''' Implements a custom Tornado handler for the autoload JS chunk
'''
@gen.coroutine
def get(self, *args, **kwargs):
session = yield self.get_session()
element_id = self.get_argument("bokeh-autoload-element", default=None)
if not element_id:
self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
return
app_path = self.get_argument("bokeh-app-path", default="/")
absolute_url = self.get_argument("bokeh-absolute-url", default=None)
if absolute_url:
server_url = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(absolute_url))
else:
server_url = None
resources = self.application.resources(server_url)
bundle = bundle_all_models()
render_items = [dict(sessionid=session.id, elementid=element_id, use_for_title=False)]
script = script_for_render_items(None, render_items, app_path=app_path, absolute_url=absolute_url)
resources_param = self.get_argument("resources", "default")
if resources_param == "none":
js_urls = []
css_urls = []
else:
js_urls = resources.js_files
css_urls = resources.css_files
js = AUTOLOAD_JS.render(
js_urls = js_urls,
css_urls = css_urls,
js_raw = resources.js_raw + [bundle, script],
css_raw = resources.css_raw_str,
elementid = element_id,
)
self.set_header("Content-Type", 'application/javascript')
self.write(encode_utf8(js))
|
{
"content_hash": "b363a61d79ce71aa27948d07b7e8e798",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 106,
"avg_line_length": 34.111111111111114,
"alnum_prop": 0.6430898092135877,
"repo_name": "rs2/bokeh",
"id": "2f71e5463477a61c046bc3d2544e505595e4ac3c",
"size": "2149",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bokeh/server/views/autoload_js_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "101858"
},
{
"name": "CoffeeScript",
"bytes": "1220192"
},
{
"name": "HTML",
"bytes": "48230"
},
{
"name": "JavaScript",
"bytes": "57773"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2648330"
},
{
"name": "Shell",
"bytes": "8519"
},
{
"name": "TypeScript",
"bytes": "236495"
}
],
"symlink_target": ""
}
|
"""Unit Tests: Forseti Server."""
from builtins import object
import argparse
import unittest
import unittest.mock as mock
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.services import server
class NameSpace(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class ServerTest(ForsetiTestCase):
"""Test Forseti Server."""
@mock.patch.object(argparse.ArgumentParser, 'parse_args')
@mock.patch.object(argparse.ArgumentParser, 'print_usage')
@mock.patch('sys.stderr', autospec=True)
def test_services_not_specified(self, mock_sys_error, mock_print_usage, mock_argparse):
"""Test main() with no service specified."""
expected_exit_code = 1
mock_argparse.return_value = NameSpace(
endpoint='[::]:50051',
services=None,
forseti_db=None,
config_file_path=None,
log_level='info',
enable_console_log=False)
mock_print_usage.return_value = None
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEqual(expected_exit_code, e.exception.code)
self.assertTrue(mock_sys_error.write.called)
@mock.patch.object(argparse.ArgumentParser, 'parse_args')
@mock.patch.object(argparse.ArgumentParser, 'print_usage')
@mock.patch('sys.stderr', autospec=True)
def test_config_file_path_not_specified(self, mock_sys_error, mock_print_usage, mock_argparse):
"""Test main() with no config_file_path specified."""
expected_exit_code = 2
mock_argparse.return_value = NameSpace(
endpoint='[::]:50051',
services=['scanner'],
forseti_db=None,
config_file_path=None,
log_level='info',
enable_console_log=False)
mock_print_usage.return_value = None
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEqual(expected_exit_code, e.exception.code)
self.assertTrue(mock_sys_error.write.called)
@mock.patch.object(argparse.ArgumentParser, 'parse_args')
@mock.patch.object(argparse.ArgumentParser, 'print_usage')
@mock.patch('sys.stderr', autospec=True)
def test_config_file_path_non_readable_file(self, mock_sys_error, mock_print_usage, mock_argparse):
"""Test main() with non-readable config file."""
expected_exit_code = 3
mock_argparse.return_value = NameSpace(
endpoint='[::]:50051',
services=['scanner'],
forseti_db=None,
config_file_path='/this/does/not/exist',
log_level='info',
enable_console_log=False)
mock_print_usage.return_value = None
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEqual(expected_exit_code, e.exception.code)
self.assertTrue(mock_sys_error.write.called)
@mock.patch.object(argparse.ArgumentParser, 'parse_args')
@mock.patch.object(argparse.ArgumentParser, 'print_usage')
@mock.patch('sys.stderr', autospec=True)
def test_config_file_path_non_existent_file(self, mock_sys_error, mock_print_usage, mock_argparse):
"""Test main() with non-existent config file."""
expected_exit_code = 4
mock_argparse.return_value = NameSpace(
endpoint='[::]:50051',
services=['scanner'],
forseti_db=None,
config_file_path='/what/ever',
log_level='info',
enable_console_log=False)
mock_print_usage.return_value = None
with mock.patch.object(file_loader, "isfile") as mock_isfile:
mock_isfile.return_value = True
with mock.patch.object(file_loader, "access") as mock_access:
mock_access.return_value = False
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEqual(expected_exit_code, e.exception.code)
self.assertTrue(mock_sys_error.write.called)
@mock.patch.object(argparse.ArgumentParser, 'parse_args')
@mock.patch.object(argparse.ArgumentParser, 'print_usage')
@mock.patch('sys.stderr', autospec=True)
def test_forseti_db_not_set(self, mock_sys_error, mock_print_usage, mock_argparse):
"""Test main() with forseti_db not set."""
expected_exit_code = 5
mock_argparse.return_value = NameSpace(
endpoint='[::]:50051',
services=['scanner'],
forseti_db=None,
config_file_path='/what/ever',
log_level='info',
enable_console_log=False)
mock_print_usage.return_value = None
with mock.patch.object(file_loader, "isfile") as mock_isfile:
mock_isfile.return_value = True
with mock.patch.object(file_loader, "access") as mock_access:
mock_access.return_value = True
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEqual(expected_exit_code, e.exception.code)
self.assertTrue(mock_sys_error.write.called)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ca6a18bef1c7decbb69651e6cac0222d",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 103,
"avg_line_length": 39.67424242424242,
"alnum_prop": 0.6257399274393737,
"repo_name": "forseti-security/forseti-security",
"id": "c3174ae4784a3107b51c4da400667a5e5ec1cc8e",
"size": "5850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/services/server_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3652"
},
{
"name": "HCL",
"bytes": "37409"
},
{
"name": "JavaScript",
"bytes": "1833"
},
{
"name": "Jinja",
"bytes": "6379"
},
{
"name": "Makefile",
"bytes": "5427"
},
{
"name": "Open Policy Agent",
"bytes": "3600"
},
{
"name": "Python",
"bytes": "4140122"
},
{
"name": "Ruby",
"bytes": "37434"
},
{
"name": "Shell",
"bytes": "17062"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_thune.iff"
result.attribute_template_id = 9
result.stfName("monster_name","thune")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "bc98dc9cd7f3ab09547e1162478c629a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 51,
"avg_line_length": 21.46153846153846,
"alnum_prop": 0.6845878136200717,
"repo_name": "obi-two/Rebelion",
"id": "5d3c9b2c6d308a27016281d64c35a2a03b9f3961",
"size": "424",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_thune.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import argparse
import os
import time
from typing import Any, Iterator, Tuple
from tools.wpt.testfiles import get_git_cmd
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
def calculate_cutoff_date(until: int, epoch: int, offset: int) -> int:
return ((((until - offset) // epoch)) * epoch) + offset
def parse_epoch(string: str) -> int:
UNIT_DICT = {"h": 3600, "d": 86400, "w": 604800}
base = string[:-1]
unit = string[-1:]
if base.isdigit() and unit in UNIT_DICT:
return int(base) * UNIT_DICT[unit]
raise argparse.ArgumentTypeError('must be digits followed by h/d/w')
def get_tagged_revisions(pattern: str) -> Iterator[Tuple[str, str, int]]:
'''
Iterates the tagged revisions as (tag name, commit sha, committer date) tuples.
'''
git = get_git_cmd(wpt_root)
args = [
pattern,
u'--sort=-committerdate',
u'--format=%(refname:lstrip=2) %(objectname) %(committerdate:raw)',
u'--count=100000'
]
ref_list = git("for-each-ref", *args) # type: ignore
for line in ref_list.splitlines():
if not line:
continue
tag, commit, date, _ = line.split(" ")
date = int(date)
yield tag, commit, date
def get_epoch_revisions(epoch: int, until: int, max_count: int) -> Iterator[str]:
# Set an offset to start to count the the weekly epoch from
# Monday 00:00:00. This is particularly important for the weekly epoch
# because fix the start of the epoch to Monday. This offset is calculated
# from Thursday, 1 January 1970 0:00:00 to Monday, 5 January 1970 0:00:00
epoch_offset = 345600
count = 0
# Iterates the tagged revisions in descending order finding the more
# recent commit still older than a "cutoff_date" value.
# When a commit is found "cutoff_date" is set to a new value multiplier of
# "epoch" but still below of the date of the current commit found.
# This needed to deal with intervals where no candidates were found
# for the current "epoch" and the next candidate found is yet below
# the lower values of the interval (it is the case of J and I for the
# interval between Wed and Tue, in the example). The algorithm fix
# the next "cutoff_date" value based on the date value of the current one
# skipping the intermediate values.
# The loop ends once we reached the required number of revisions to return
# or the are no more tagged revisions or the cutoff_date reach zero.
#
# Fri Sat Sun Mon Tue Wed Thu Fri Sat
# | | | | | | | | |
# -A---B-C---DEF---G---H--IJ----------K-----L-M----N--O--
# ^
# now
# Expected result: N,M,K,J,H,G,F,C,A
cutoff_date = calculate_cutoff_date(until, epoch, epoch_offset)
for _, commit, date in get_tagged_revisions("refs/tags/merge_pr_*"):
if count >= max_count:
return
if date < cutoff_date:
yield commit
count += 1
cutoff_date = calculate_cutoff_date(date, epoch, epoch_offset)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("--epoch",
default="1d",
type=parse_epoch,
help="regular interval of time selected to get the "
"tagged revisions. Valid values are digits "
"followed by h/d/w (e.x. 9h, 9d, 9w ...) where "
"the mimimun selectable interval is one hour "
"(1h)")
parser.add_argument("--max-count",
default=1,
type=int,
help="maximum number of revisions to be returned by "
"the command")
return parser
def run_rev_list(**kwargs: Any) -> None:
# "epoch_threshold" is a safety margin. After this time it is fine to
# assume that any tags are created and pushed.
epoch_threshold = 600
until = int(time.time()) - epoch_threshold
for line in get_epoch_revisions(kwargs["epoch"], until, kwargs["max_count"]):
print(line)
|
{
"content_hash": "ec588e94a41190d3034e81dd274883e6",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 83,
"avg_line_length": 40.89719626168224,
"alnum_prop": 0.5808957952468007,
"repo_name": "scheib/chromium",
"id": "b3f1f3486f7df0b39085573ef725035cbe50b9bc",
"size": "4376",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "third_party/wpt_tools/wpt/tools/wpt/revlist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_aggregate
short_description: NetApp ONTAP manage aggregates.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, delete, or manage aggregates on ONTAP.
options:
state:
description:
- Whether the specified aggregate should exist or not.
choices: ['present', 'absent']
default: 'present'
service_state:
description:
- Whether the specified aggregate should be enabled or disabled. Creates aggregate if doesnt exist.
choices: ['online', 'offline']
name:
required: true
description:
- The name of the aggregate to manage.
from_name:
description:
- Name of the aggregate to be renamed.
version_added: '2.7'
nodes:
description:
- Node(s) for the aggregate to be created on. If no node specified, mgmt lif home will be used.
- If multiple nodes specified an aggr stripe will be made.
disk_type:
description:
- Type of disk to use to build aggregate
choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'VMDISK']
version_added: '2.7'
disk_count:
description:
- Number of disks to place into the aggregate, including parity disks.
- The disks in this newly-created aggregate come from the spare disk pool.
- The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
- Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
- Required when C(state=present).
disk_size:
description:
- Disk size to use in 4K block size. Disks within 10% of specified size will be used.
version_added: '2.7'
raid_size:
description:
- Sets the maximum number of drives per raid group.
version_added: '2.7'
raid_type:
description:
- Specifies the type of RAID groups to use in the new aggregate.
choices: ['raid4', 'raid_dp', 'raid_tec']
version_added: '2.7'
unmount_volumes:
type: bool
description:
- If set to "TRUE", this option specifies that all of the volumes hosted by the given aggregate are to be unmounted
- before the offline operation is executed.
- By default, the system will reject any attempt to offline an aggregate that hosts one or more online volumes.
disks:
type: list
description:
- Specific list of disks to use for the new aggregate.
- To create a "mirrored" aggregate with a specific list of disks, both 'disks' and 'mirror_disks' options must be supplied.
Additionally, the same number of disks must be supplied in both lists.
version_added: '2.8'
is_mirrored:
type: bool
description:
- Specifies that the new aggregate be mirrored (have two plexes).
- If set to true, then the indicated disks will be split across the two plexes. By default, the new aggregate will not be mirrored.
- This option cannot be used when a specific list of disks is supplied with either the 'disks' or 'mirror_disks' options.
version_added: '2.8'
mirror_disks:
type: list
description:
- List of mirror disks to use. It must contain the same number of disks specified in 'disks'.
version_added: '2.8'
spare_pool:
description:
- Specifies the spare pool from which to select spare disks to use in creation of a new aggregate.
choices: ['Pool0', 'Pool1']
version_added: '2.8'
wait_for_online:
description:
- Set this parameter to 'true' for synchronous execution during create (wait until aggregate status is online)
- Set this parameter to 'false' for asynchronous execution
- For asynchronous, execution exits as soon as the request is sent, without checking aggregate status
type: bool
default: false
version_added: '2.8'
time_out:
description:
- time to wait for aggregate creation in seconds
- default is set to 100 seconds
default: 100
version_added: "2.8"
'''
EXAMPLES = """
- name: Create Aggregates and wait 5 minutes until aggregate is online
na_ontap_aggregate:
state: present
service_state: online
name: ansibleAggr
disk_count: 1
wait_for_online: True
time_out: 300
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Manage Aggregates
na_ontap_aggregate:
state: present
service_state: offline
unmount_volumes: true
name: ansibleAggr
disk_count: 1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Rename Aggregates
na_ontap_aggregate:
state: present
service_state: online
from_name: ansibleAggr
name: ansibleAggr2
disk_count: 1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete Aggregates
na_ontap_aggregate:
state: absent
service_state: offline
unmount_volumes: true
name: ansibleAggr
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapAggregate(object):
''' object initialize and class methods '''
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
disks=dict(required=False, type='list'),
disk_count=dict(required=False, type='int', default=None),
disk_size=dict(required=False, type='int'),
disk_type=dict(required=False, choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'VMDISK']),
from_name=dict(required=False, type='str'),
mirror_disks=dict(required=False, type='list'),
nodes=dict(required=False, type='list'),
is_mirrored=dict(required=False, type='bool'),
raid_size=dict(required=False, type='int'),
raid_type=dict(required=False, choices=['raid4', 'raid_dp', 'raid_tec']),
service_state=dict(required=False, choices=['online', 'offline']),
spare_pool=dict(required=False, choices=['Pool0', 'Pool1']),
state=dict(required=False, choices=['present', 'absent'], default='present'),
unmount_volumes=dict(required=False, type='bool'),
wait_for_online=dict(required=False, type='bool', default=False),
time_out=dict(required=False, type='int', default=100)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('service_state', 'offline', ['unmount_volumes']),
],
mutually_exclusive=[
('is_mirrored', 'disks'),
('is_mirrored', 'mirror_disks'),
('is_mirrored', 'spare_pool'),
('spare_pool', 'disks')
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if self.parameters.get('mirror_disks') is not None and self.parameters.get('disks') is None:
self.module.fail_json(mgs="mirror_disks require disks options to be set")
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def aggr_get_iter(self, name):
"""
Return aggr-get-iter query results
:param name: Name of the aggregate
:return: NaElement if aggregate found, None otherwise
"""
aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-attributes', **{'aggregate-name': name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
aggr_get_iter.add_child_elem(query)
result = None
try:
result = self.server.invoke_successfully(aggr_get_iter, enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
# Error 13040 denotes an aggregate not being found.
if to_native(error.code) == "13040":
pass
else:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
return result
def get_aggr(self, name=None):
"""
Fetch details if aggregate exists.
:param name: Name of the aggregate to be fetched
:return:
Dictionary of current details if aggregate found
None if aggregate is not found
"""
if name is None:
name = self.parameters['name']
aggr_get = self.aggr_get_iter(name)
if (aggr_get and aggr_get.get_child_by_name('num-records') and
int(aggr_get.get_child_content('num-records')) >= 1):
current_aggr = dict()
attr = aggr_get.get_child_by_name('attributes-list').get_child_by_name('aggr-attributes')
current_aggr['service_state'] = attr.get_child_by_name('aggr-raid-attributes').get_child_content('state')
return current_aggr
return None
def aggregate_online(self):
"""
Set state of an offline aggregate to online
:return: None
"""
online_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-online', **{'aggregate': self.parameters['name'],
'force-online': 'true'})
try:
self.server.invoke_successfully(online_aggr,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' %
(self.parameters['name'], self.parameters['service_state'], to_native(error)),
exception=traceback.format_exc())
def aggregate_offline(self):
"""
Set state of an online aggregate to offline
:return: None
"""
offline_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-offline', **{'aggregate': self.parameters['name'],
'force-offline': 'false',
'unmount-volumes': str(self.parameters['unmount_volumes'])})
try:
self.server.invoke_successfully(offline_aggr, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' %
(self.parameters['name'], self.parameters['service_state'], to_native(error)),
exception=traceback.format_exc())
def create_aggr(self):
"""
Create aggregate
:return: None
"""
if not self.parameters.get('disk_count'):
self.module.fail_json(msg='Error provisioning aggregate %s: \
disk_count is required' % self.parameters['name'])
options = {'aggregate': self.parameters['name'],
'disk-count': str(self.parameters['disk_count'])
}
if self.parameters.get('disk_type'):
options['disk-type'] = self.parameters['disk_type']
if self.parameters.get('raid_size'):
options['raid-size'] = str(self.parameters['raid_size'])
if self.parameters.get('raid_type'):
options['raid-type'] = self.parameters['raid_type']
if self.parameters.get('disk_size'):
options['disk-size'] = str(self.parameters['disk_size'])
if self.parameters.get('is_mirrored'):
options['is-mirrored'] = str(self.parameters['is_mirrored'])
if self.parameters.get('spare_pool'):
options['spare-pool'] = self.parameters['spare_pool']
if self.parameters.get('raid_type'):
options['raid-type'] = self.parameters['raid_type']
aggr_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-create', **options)
if self.parameters.get('nodes'):
nodes_obj = netapp_utils.zapi.NaElement('nodes')
aggr_create.add_child_elem(nodes_obj)
for node in self.parameters['nodes']:
nodes_obj.add_new_child('node-name', node)
if self.parameters.get('disks'):
disks_obj = netapp_utils.zapi.NaElement('disk-info')
for disk in self.parameters.get('disks'):
disks_obj.add_new_child('name', disk)
aggr_create.add_child_elem(disks_obj)
if self.parameters.get('mirror_disks'):
mirror_disks_obj = netapp_utils.zapi.NaElement('disk-info')
for disk in self.parameters.get('mirror_disks'):
mirror_disks_obj.add_new_child('name', disk)
aggr_create.add_child_elem(mirror_disks_obj)
try:
self.server.invoke_successfully(aggr_create, enable_tunneling=False)
if self.parameters.get('wait_for_online'):
# round off time_out
retries = (self.parameters['time_out'] + 5) / 10
current = self.get_aggr()
status = None if current is None else current['service_state']
while status != 'online' and retries > 0:
time.sleep(10)
retries = retries - 1
current = self.get_aggr()
status = None if current is None else current['service_state']
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error provisioning aggregate %s: %s"
% (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def delete_aggr(self):
"""
Delete aggregate.
:return: None
"""
aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-destroy', **{'aggregate': self.parameters['name']})
try:
self.server.invoke_successfully(aggr_destroy,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def rename_aggregate(self):
"""
Rename aggregate.
"""
aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-rename', **{'aggregate': self.parameters['from_name'],
'new-aggregate-name': self.parameters['name']})
try:
self.server.invoke_successfully(aggr_rename, enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error renaming aggregate %s: %s"
% (self.parameters['from_name'], to_native(error)),
exception=traceback.format_exc())
def modify_aggr(self, modify):
"""
Modify state of the aggregate
:param modify: dictionary of parameters to be modified
:return: None
"""
if modify['service_state'] == 'offline':
self.aggregate_offline()
elif modify['service_state'] == 'online':
self.aggregate_online()
def asup_log_for_cserver(self, event_name):
"""
Fetch admin vserver for the given cluster
Create and Autosupport log event with the given module name
:param event_name: Name of the event log
:return: None
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event(event_name, cserver)
def apply(self):
"""
Apply action to the aggregate
:return: None
"""
self.asup_log_for_cserver("na_ontap_aggregate")
current = self.get_aggr()
# rename and create are mutually exclusive
rename, cd_action = None, None
if self.parameters.get('from_name'):
rename = self.na_helper.is_rename_action(self.get_aggr(self.parameters['from_name']), current)
if rename is None:
self.module.fail_json(msg="Error renaming: aggregate %s does not exist" % self.parameters['from_name'])
else:
cd_action = self.na_helper.get_cd_action(current, self.parameters)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if rename:
self.rename_aggregate()
elif cd_action == 'create':
self.create_aggr()
elif cd_action == 'delete':
self.delete_aggr()
elif modify:
self.modify_aggr(modify)
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Create Aggregate class instance and invoke apply
:return: None
"""
obj_aggr = NetAppOntapAggregate()
obj_aggr.apply()
if __name__ == '__main__':
main()
|
{
"content_hash": "c0dae6722a4baa71ce84b29ff3577038",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 135,
"avg_line_length": 39.002132196162044,
"alnum_prop": 0.6010824404111087,
"repo_name": "thaim/ansible",
"id": "ac76e77abd6d1768770b11e156b4ebca3f2b5ffc",
"size": "18434",
"binary": false,
"copies": "42",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/storage/netapp/na_ontap_aggregate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import absolute_import
import __builtin__
import errno
from itertools import chain, count
import os
import logging
import stat
import sys
import fuse
from pyfs.mapping import (
add_module,
add_symlink,
is_dir,
is_executable,
is_file,
is_symlink,
get_content,
get_elements,
logcall,
PATH_BIN_PREFIX,
PATH_DOT_PREFIX,
PATH_LIB_PREFIX,
PATH_MODULES,
read_from_string,
reset_modules_list,
CannotResolve,
)
class PyFS(fuse.Operations):
def __init__(self, path_to_projectdir=None):
super(PyFS, self).__init__()
self._path_to_projectdir = path_to_projectdir
self._next_fh = count()
self._flags_for_open_files = {} # file handle -> fh
for name in ("__builtin__", "json", "os", "re", "string", "sys"):
add_module(name)
for name in dir(__builtin__):
sourcepath = "{}{}".format(PATH_BIN_PREFIX, name)
add_symlink(
sourcepath,
"{}{}{}/{}".format(
"../" * (sourcepath.count("/") - 1),
PATH_LIB_PREFIX[1:],
"__builtin__",
name
)
)
self._log = logging.getLogger(self.__class__.__name__)
@logcall
def getattr(self, path, fh=None):
try:
return self.try_to_getattr(path, fh)
except CannotResolve:
raise fuse.FuseOSError(errno.ENOENT)
@logcall
def try_to_getattr(self, path, fh):
if path == '/' or path == "/." or path == "/..":
return dict(
st_mode=stat.S_IFDIR | 0555,
st_nlink=2,
)
elif path.startswith(PATH_DOT_PREFIX) and not "." in path:
return dict(
st_mode=stat.S_IFREG | 0555,
st_nlink=1,
st_size=len(get_content(path, self._path_to_projectdir)),
)
elif is_symlink(path):
return dict(
st_mode=stat.S_IFLNK | 0777,
st_nlink=1,
st_size=len(get_content(path, self._path_to_projectdir)),
)
elif is_dir(path):
return dict(
st_mode=stat.S_IFDIR | 0555,
st_nlink=3,
)
elif is_file(path):
def _get_file_mode():
if path == PATH_MODULES:
return 0666
elif is_executable(path):
return 0555
else:
return 0444
return dict(
st_mode=stat.S_IFREG | _get_file_mode(),
st_nlink=1,
st_size=len(get_content(path, self._path_to_projectdir)),
)
else:
raise fuse.FuseOSError(errno.ENOENT)
@logcall
def read(self, path, size, offset, fh):
return read_from_string(
get_content(path, self._path_to_projectdir),
size,
offset,
)
@logcall
def readdir(self, path, fh):
return [name for name in chain([".", ".."], get_elements(path))]
@logcall
def readlink(self, path):
return get_content(path, self._path_to_projectdir)
def open(self, path, flags):
if path == PATH_MODULES:
if flags & os.O_RDWR:
self._log.debug(
"Cannot allow readwrite access. Flags: {}".format(flags))
raise fuse.FuseOSError(errno.EPERM)
if flags & os.O_TRUNC:
reset_modules_list()
else:
if flags & os.O_WRONLY or flags & os.O_RDWR:
self._log.debug(
"Cannot write to Python objects. Flags: {}".format(flags))
raise fuse.FuseOSError(errno.EPERM)
fh = self._next_fh.next()
self._flags_for_open_files[fh] = flags
return fh
def truncate(self, path, length, fh=None):
if path != PATH_MODULES:
raise fuse.FuseOSError(errno.EPERM)
if length != 0:
self._log.debug("Must completely truncate the modules file.")
raise IOError(errno.EPERM)
reset_modules_list()
def release(self, path, fh):
if fh not in self._flags_for_open_files:
# EBADFD = "File descriptor in bad state" (not sure it's correct)
raise fuse.FuseOSError(errno.EBADFD)
del self._flags_for_open_files[fh]
return fh
def write(self, path, data, offset, fh):
if fh not in self._flags_for_open_files:
# EBADFD = "File descriptor in bad state" (not sure it's correct)
raise fuse.FuseOSError(errno.EBADFD)
if not self._flags_for_open_files[fh] & os.O_APPEND and offset != 0:
self._log.debug("Must either append to or truncate a file.")
raise fuse.FuseOSError(-errno.EPERM)
if data.strip():
add_module(data.strip())
return len(data)
if __name__ == '__main__':
logging.basicConfig(filename="pyfs.log", filemode="w+")
logging.getLogger().setLevel(logging.DEBUG)
fuse.FUSE(PyFS(os.getcwd()), sys.argv[1])
|
{
"content_hash": "dde66d3fd6e87e7aba80d370a4cde61d",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 78,
"avg_line_length": 31.311377245508982,
"alnum_prop": 0.5207496653279786,
"repo_name": "mknecht/pyfs",
"id": "365e541b75cf3a21051b3c590f619eaf29666f8d",
"size": "5377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfs/filesystem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14147"
}
],
"symlink_target": ""
}
|
"""
Django settings for sscore project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# here() gives us file paths from the root of the system to the directory
# holding the current file.
here = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
PROJECT_ROOT = here("..")
# root() gives us file paths from the root of the system to whatever
# folder(s) we pass it starting at the parent directory of the current file.
root = lambda *x: os.path.join(os.path.abspath(PROJECT_ROOT), *x)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6gw^^w=g*!s0%zqgfv6nm1o5g43r8nmfz+gch8*6#x0v42rc91'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
'pytz',
'restless'
)
LOCAL_APPS = (
'nav',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sscore.urls'
WSGI_APPLICATION = 'sscore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = root('..', 'static')
MEDIA_ROOT = root('..', 'uploads')
STATICFILES_DIRS = (
root('assets'),
)
TEMPLATE_DIRS = (
root('templates'),
)
# Tastypie settings
API_LIMIT_PER_PAGE = 0
|
{
"content_hash": "0755642e884359e57bd333dbb3cad2f5",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 78,
"avg_line_length": 23.965811965811966,
"alnum_prop": 0.7097004279600571,
"repo_name": "andremrsantos/s-score-view",
"id": "ab486e65429a88323c3d2bb0800489eae91d6d05",
"size": "2804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sscore/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6594"
},
{
"name": "Python",
"bytes": "14640"
}
],
"symlink_target": ""
}
|
import unittest
import usaddress
class TestTagging(unittest.TestCase) :
def test_broadway(self) :
s1 = '1775 Broadway And 57th, Newyork NY'
usaddress.tag(s1)
|
{
"content_hash": "7353d03236d7dd4065d484b7d53b911d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 49,
"avg_line_length": 18.2,
"alnum_prop": 0.6813186813186813,
"repo_name": "yl2695/usaddress",
"id": "87b37eae4d76c7523b009dabde93a66d5f097f89",
"size": "182",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_tagging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41712"
}
],
"symlink_target": ""
}
|
from common import startfile, SIZES, classname, cells, mklabel
if __name__ == '__main__':
fd = startfile("functions", "Transpose")
def line(s=""):
print >> fd, s
for rows in SIZES:
for cols in SIZES:
line("import org.dnikulin.jula.fixed.%s;" % classname(rows, cols))
line()
line("public final class Transpose {")
for rows in SIZES:
for cols in SIZES:
iclass = classname(rows, cols)
oclass = classname(cols, rows)
line(" public static void transpose(final %s a, final %s b) {" % (iclass, oclass))
for (row, col, ilabel) in cells(rows, cols):
olabel = mklabel(col, row)
line(" b.%s = a.%s;" % (olabel, ilabel))
line(" }")
line()
line(" private Transpose() {}")
line("}")
fd.flush()
fd.close()
|
{
"content_hash": "3708baac832b2335cfaf45ff18ad081a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 97,
"avg_line_length": 28.838709677419356,
"alnum_prop": 0.5134228187919463,
"repo_name": "dnikulin/jula",
"id": "3d428e830d89e7e90644827cd91a530a732e603d",
"size": "1477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/make_transpose.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "5689"
},
{
"name": "Python",
"bytes": "17121"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("warehouse", "0014_remove_warehouse_company_name"),
]
operations = [
migrations.AlterField(
model_name="stock",
name="quantity",
field=models.IntegerField(default=0),
),
]
|
{
"content_hash": "e8e6652fd7f6b8feb355abb181ff72cc",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 60,
"avg_line_length": 21.875,
"alnum_prop": 0.5857142857142857,
"repo_name": "mociepka/saleor",
"id": "1943d386d94badfcfe81cbc2a9e922982326a0ef",
"size": "399",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "saleor/warehouse/migrations/0015_alter_stock_quantity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
import pykintone.structure as ps
class Organization(ps.kintoneStructure):
def __init__(self):
super(Organization, self).__init__()
self.organization_id = ""
self.code = None
self.name = ""
self.description = ""
self.local_name = ""
self.local_name_locale = ""
self.parent_code = ""
self._pd("organization_id", field_name="id")
self._pd("local_name", name_style_conversion=True)
self._pd("local_name_locale", name_style_conversion=True)
self._pd("parent_code", name_style_conversion=True)
|
{
"content_hash": "d7280beb865ea6fef924f5ee6c3d1afd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 31.157894736842106,
"alnum_prop": 0.5912162162162162,
"repo_name": "icoxfog417/pykintone",
"id": "ef672e07c6311af804f0fd3886f5884c096ef3af",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pykintone/user_api/organization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "101327"
}
],
"symlink_target": ""
}
|
from StringIO import StringIO
import logging
import tarfile
from tempfile import mkdtemp
from abc import abstractmethod
from mcloud.util import Interface
from twisted.internet import reactor, defer
logger = logging.getLogger('mcloud.application')
from twisted.python import log
class IContainerBuilder(Interface):
pass
class IImageBuilder(Interface):
@abstractmethod
def build_image(self, ticket_id, service):
pass
class PrebuiltImageBuilder(IImageBuilder):
def __init__(self, image):
super(PrebuiltImageBuilder, self).__init__()
self.image = image
@defer.inlineCallbacks
def build_image(self, ticket_id, service):
log.msg('[%s] Building image "%s".', ticket_id, self.image)
name = self.image
tag = None
if ':' in name:
name, tag = name.split(':')
images = yield service.client.images(name=name)
if tag:
images = [x for x in images if self.image in x['RepoTags']]
if not images:
log.msg('[%s] Image is not there. Pulling "%s" ...', ticket_id, self.image)
yield service.client.pull(name, ticket_id, tag)
log.msg('[%s] Image "%s" is ready to use.', ticket_id, self.image)
defer.returnValue(self.image)
class CanNotAccessPath(Exception):
pass
class DockerfileImageBuilder(IImageBuilder):
def __init__(self, path):
super(DockerfileImageBuilder, self).__init__()
self.path = path
self.image_id = None
def create_archive(self):
d = defer.Deferred()
def archive():
memfile = StringIO()
try:
t = tarfile.open(mode='w', fileobj=memfile)
t.add(self.path, arcname='.')
d.callback(memfile.getvalue())
except OSError as e:
d.errback(CanNotAccessPath('Can not access %s: %s' % (self.path, str(e))))
except Exception as e:
d.errback(e)
finally:
memfile.close()
reactor.callLater(0, archive)
return d
@defer.inlineCallbacks
def build_image(self, ticket_id, service):
archive = yield self.create_archive()
ret = yield service.client.build_image(archive, ticket_id=ticket_id)
defer.returnValue(ret)
class VirtualFolderImageBuilder(DockerfileImageBuilder):
def __init__(self, files):
self.files = files
super(VirtualFolderImageBuilder, self).__init__(None)
self.image_id = None
def build_image(self, ticket_id, service):
tdir = mkdtemp()
for file_, source in self.files.items():
with open(tdir + '/%s' % file_, 'w+') as f:
f.write(source)
self.path = tdir
return super(VirtualFolderImageBuilder, self).build_image(ticket_id, service)
class InlineDockerfileImageBuilder(VirtualFolderImageBuilder):
def __init__(self, source):
super(InlineDockerfileImageBuilder, self).__init__({
'Dockerfile': source
})
|
{
"content_hash": "0a5dd95bb128a8424774ee44b0f1b623",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 90,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.6078878748370273,
"repo_name": "modera/mcloud",
"id": "fdfa525b69b9d585ba30d49345b9c761d756fa09",
"size": "3068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcloud/container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33676"
},
{
"name": "HTML",
"bytes": "19944"
},
{
"name": "JavaScript",
"bytes": "371312"
},
{
"name": "Makefile",
"bytes": "147"
},
{
"name": "Python",
"bytes": "295383"
},
{
"name": "Shell",
"bytes": "162"
}
],
"symlink_target": ""
}
|
from os.path import dirname, join
import pypd
def preload():
path = join(dirname(dirname(dirname(__file__))), 'api.key')
pypd.set_api_key_from_file(path)
|
{
"content_hash": "47a4616dc310a107676c45697ef314c3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 63,
"avg_line_length": 20.625,
"alnum_prop": 0.6727272727272727,
"repo_name": "PagerDuty/pagerduty-api-python-client",
"id": "21e552e4649511d3e909bbe108b3bdc99a520c90",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/hackday/preload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105430"
}
],
"symlink_target": ""
}
|
import json
from rest_framework.test import APIClient
from rest_framework import status
from rest_framework.test import APITestCase
# TODO broken in myhpom ("oauthlib.oauth2.rfc6749.endpoints.resource: DEBUG: Dispatching token_type Bearer request")
# class TestUniversities(APITestCase):
# def setUp(self):
# self.client = APIClient()
#
# def test_universities_no_query(self):
# response = self.client.get('/hydroshare/hsapi/dictionary/universities/', format='json')
# self.assertEqual(response.status_code, status.HTTP_200_OK)
# content = json.loads(response.content)
# self.assertEqual(len(content), 1)
#
# def test_universities_query(self):
# response = self.client.get('/hydroshare/hsapi/dictionary/universities/?term=dubai', format='json')
# self.assertEqual(response.status_code, status.HTTP_200_OK)
# content = json.loads(response.content)
# self.assertEqual(len(content), 9)
|
{
"content_hash": "736fc59d83c14c1935c3fc510e68710b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 116,
"avg_line_length": 42.17391304347826,
"alnum_prop": 0.7041237113402062,
"repo_name": "ResearchSoftwareInstitute/MyHPOM",
"id": "23c52e0812fe65e11bfdf4506e43f4b34809a198",
"size": "970",
"binary": false,
"copies": "1",
"ref": "refs/heads/myhpom-develop",
"path": "hs_dictionary/tests/test_universities.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "399181"
},
{
"name": "HTML",
"bytes": "950570"
},
{
"name": "JavaScript",
"bytes": "2069460"
},
{
"name": "Python",
"bytes": "5006675"
},
{
"name": "R",
"bytes": "4463"
},
{
"name": "Shell",
"bytes": "53077"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User
from rest_framework import viewsets
from rest_framework.decorators import list_route
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.response import Response
from posts.models import StarredJob, Alarm
from posts.permissions import IsSelf, IsOwner
from posts.serializers import UserSerializer, \
StarredJobsSerializer, AlarmSerializer
from rest_framework import generics
# @TODO: switch to generics.CreateAPIView
from rest_framework.permissions import IsAuthenticated
class UserAuthentication(viewsets.ModelViewSet):
"""
this is used to verify whether authentication is valid or not.
some clients want to know if authentication is valid, so they
request a url looks like `/users/me`.
"""
@list_route()
def me(self, request, *args, **kwargs):
if request.user.is_authenticated():
return Response(UserSerializer(request.user).data)
raise AuthenticationFailed()
class UserList(generics.ListCreateAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveUpdateAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (IsSelf,)
class StarredJobsList(generics.ListCreateAPIView):
serializer_class = StarredJobsSerializer
permission_classes = (IsAuthenticated, )
def get_queryset(self):
user = self.request.user
return StarredJob.objects.filter(user=user.id)
class StarredJobDetail(generics.DestroyAPIView):
queryset = StarredJob.objects.all()
serializer_class = StarredJobsSerializer
permission_classes = (IsOwner,)
lookup_field = "job"
class AlarmList(generics.ListCreateAPIView):
serializer_class = AlarmSerializer
permission_classes = (IsAuthenticated, )
def get_queryset(self):
user = self.request.user
return Alarm.objects.filter(user=user.id).order_by("-updated_at")
class AlarmDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Alarm.objects.all()
serializer_class = AlarmSerializer
permission_classes = (IsOwner,)
|
{
"content_hash": "cda990b8091314b4e4aa74741e09461d",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 73,
"avg_line_length": 31.405797101449274,
"alnum_prop": 0.7503461005999077,
"repo_name": "alpayOnal/flj",
"id": "ce81b6980d53f551b6650777357e9c1e60f77f98",
"size": "2167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "posts/user_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "144001"
},
{
"name": "Python",
"bytes": "46483"
},
{
"name": "Shell",
"bytes": "807"
}
],
"symlink_target": ""
}
|
""" Database code for Swift """
from __future__ import with_statement
from contextlib import contextmanager
import hashlib
import logging
import operator
import os
from uuid import uuid4
import sys
import time
import cPickle as pickle
import errno
from tempfile import mkstemp
from eventlet import sleep, Timeout
import sqlite3
from swift.common.utils import json, normalize_timestamp, renamer, \
mkdirs, lock_parent_directory, fallocate
from swift.common.exceptions import LockTimeout
#: Whether calls will be made to preallocate disk space for database files.
DB_PREALLOCATION = True
#: Timeout for trying to connect to a DB
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
#: Max number of pending entries
PENDING_CAP = 131072
def utf8encode(*args):
return [(s.encode('utf8') if isinstance(s, unicode) else s) for s in args]
class DatabaseConnectionError(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path, msg, timeout=0):
self.path = path
self.timeout = timeout
self.msg = msg
def __str__(self):
return 'DB connection error (%s, %s):\n%s' % (
self.path, self.timeout, self.msg)
class GreenDBConnection(sqlite3.Connection):
"""SQLite DB Connection handler that plays well with eventlet."""
def __init__(self, *args, **kwargs):
self.timeout = kwargs.get('timeout', BROKER_TIMEOUT)
kwargs['timeout'] = 0
self.db_file = args and args[0] or '-'
sqlite3.Connection.__init__(self, *args, **kwargs)
def _timeout(self, call):
with LockTimeout(self.timeout, self.db_file):
while True:
try:
return call()
except sqlite3.OperationalError, e:
if 'locked' not in str(e):
raise
sleep(0.05)
def execute(self, *args, **kwargs):
return self._timeout(lambda: sqlite3.Connection.execute(
self, *args, **kwargs))
def commit(self):
return self._timeout(lambda: sqlite3.Connection.commit(self))
def dict_factory(crs, row):
"""
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
"""
return dict(
((col[0], row[idx]) for idx, col in enumerate(crs.description)))
def chexor(old, name, timestamp):
"""
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: timestamp of the new record
:returns: a hex representation of the new hash value
"""
if name is None:
raise Exception('name is None!')
old = old.decode('hex')
new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf_8')).digest()
response = ''.join(
map(chr, map(operator.xor, map(ord, old), map(ord, new))))
return response.encode('hex')
def get_db_connection(path, timeout=30, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if path != ':memory:' and not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.execute('PRAGMA synchronous = NORMAL')
conn.execute('PRAGMA count_changes = OFF')
conn.execute('PRAGMA temp_store = MEMORY')
conn.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
class DatabaseBroker(object):
"""Encapsulates working with a database."""
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=10,
stale_reads_ok=False):
""" Encapsulates working with a database. """
self.conn = None
self.db_file = db_file
self.pending_file = self.db_file + '.pending'
self.pending_timeout = pending_timeout
self.stale_reads_ok = stale_reads_ok
self.db_dir = os.path.dirname(db_file)
self.timeout = timeout
self.logger = logger or logging.getLogger()
self.account = account
self.container = container
self._db_version = -1
def initialize(self, put_timestamp=None):
"""
Create the DB
:param put_timestamp: timestamp of initial PUT request
"""
if self.db_file == ':memory:':
tmp_db_file = None
conn = get_db_connection(self.db_file, self.timeout)
else:
mkdirs(self.db_dir)
fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
os.close(fd)
conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
factory=GreenDBConnection, timeout=0)
# creating dbs implicitly does a lot of transactions, so we
# pick fast, unsafe options here and do a big fsync at the end.
conn.execute('PRAGMA synchronous = OFF')
conn.execute('PRAGMA temp_store = MEMORY')
conn.execute('PRAGMA journal_mode = MEMORY')
conn.create_function('chexor', 3, chexor)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.executescript("""
CREATE TABLE outgoing_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TABLE incoming_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
""")
if not put_timestamp:
put_timestamp = normalize_timestamp(0)
self._initialize(conn, put_timestamp)
conn.commit()
if tmp_db_file:
conn.close()
with open(tmp_db_file, 'r+b') as fp:
os.fsync(fp.fileno())
with lock_parent_directory(self.db_file, self.pending_timeout):
if os.path.exists(self.db_file):
# It's as if there was a "condition" where different parts
# of the system were "racing" each other.
raise DatabaseConnectionError(
self.db_file,
'DB created by someone else while working?')
renamer(tmp_db_file, self.db_file)
self.conn = get_db_connection(self.db_file, self.timeout)
else:
self.conn = conn
def delete_db(self, timestamp):
"""
Mark the DB as deleted
:param timestamp: delete timestamp
"""
timestamp = normalize_timestamp(timestamp)
# first, clear the metadata
cleared_meta = {}
for k in self.metadata.iterkeys():
cleared_meta[k] = ('', timestamp)
self.update_metadata(cleared_meta)
# then mark the db as deleted
with self.get() as conn:
self._delete_db(conn, timestamp)
conn.commit()
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed'
elif 'file is encrypted or is not a database' in str(exc_value):
exc_hint = 'corrupted'
else:
raise exc_type(*exc_value.args), None, exc_traceback
prefix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(prefix_path)
dbs_path = os.path.dirname(partition_path)
device_path = os.path.dirname(dbs_path)
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path)
except OSError, e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path)
detail = _('Quarantined %s to %s due to %s database') % \
(self.db_dir, quar_path, exc_hint)
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
@contextmanager
def get(self):
"""Use with the "with" statement; returns a database connection."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
try:
self.conn = get_db_connection(self.db_file, self.timeout)
except (sqlite3.DatabaseError, DatabaseConnectionError):
self.possibly_quarantine(*sys.exc_info())
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
try:
yield conn
conn.rollback()
self.conn = conn
except sqlite3.DatabaseError:
try:
conn.close()
except:
pass
self.possibly_quarantine(*sys.exc_info())
except (Exception, Timeout):
conn.close()
raise
@contextmanager
def lock(self):
"""Use with the "with" statement; locks a database."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
self.conn = get_db_connection(self.db_file, self.timeout)
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
orig_isolation_level = conn.isolation_level
conn.isolation_level = None
conn.execute('BEGIN IMMEDIATE')
try:
yield True
except (Exception, Timeout):
pass
try:
conn.execute('ROLLBACK')
conn.isolation_level = orig_isolation_level
self.conn = conn
except (Exception, Timeout):
logging.exception(
_('Broker error trying to rollback locked connection'))
conn.close()
def newid(self, remote_id):
"""
Re-id the database. This should be called after an rsync.
:param remote_id: the ID of the remote database being rsynced in
"""
with self.get() as conn:
row = conn.execute('''
UPDATE %s_stat SET id=?
''' % self.db_type, (str(uuid4()),))
row = conn.execute('''
SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1
''' % self.db_contains_type).fetchone()
sync_point = row['ROWID'] if row else -1
conn.execute('''
INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, remote_id))
self._newid(conn)
conn.commit()
def _newid(self, conn):
# Override for additional work when receiving an rsynced db.
pass
def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):
"""
Used in replication to handle updating timestamps.
:param created_at: create timestamp
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
"""
with self.get() as conn:
conn.execute('''
UPDATE %s_stat SET created_at=MIN(?, created_at),
put_timestamp=MAX(?, put_timestamp),
delete_timestamp=MAX(?, delete_timestamp)
''' % self.db_type, (created_at, put_timestamp, delete_timestamp))
conn.commit()
def get_items_since(self, start, count):
"""
Get a list of objects in the database between start and end.
:param start: start ROWID
:param count: number to get
:returns: list of objects between start and end
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
curs = conn.execute('''
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
''' % self.db_contains_type, (start, count))
curs.row_factory = dict_factory
return [r for r in curs]
def get_sync(self, id, incoming=True):
"""
Gets the most recent sync point for a server from the sync table.
:param id: remote ID to get the sync_point for
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: the sync point, or -1 if the id doesn't exist.
"""
with self.get() as conn:
row = conn.execute(
"SELECT sync_point FROM %s_sync WHERE remote_id=?"
% ('incoming' if incoming else 'outgoing'), (id,)).fetchone()
if not row:
return -1
return row['sync_point']
def get_syncs(self, incoming=True):
"""
Get a serialized copy of the sync table.
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: list of {'remote_id', 'sync_point'}
"""
with self.get() as conn:
curs = conn.execute('''
SELECT remote_id, sync_point FROM %s_sync
''' % 'incoming' if incoming else 'outgoing')
result = []
for row in curs:
result.append({'remote_id': row[0], 'sync_point': row[1]})
return result
def get_replication_info(self):
"""
Get information about the DB required for replication.
:returns: dict containing keys: hash, id, created_at, put_timestamp,
delete_timestamp, count, max_row, and metadata
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
query_part1 = '''
SELECT hash, id, created_at, put_timestamp, delete_timestamp,
%s_count AS count,
CASE WHEN SQLITE_SEQUENCE.seq IS NOT NULL
THEN SQLITE_SEQUENCE.seq ELSE -1 END AS max_row, ''' % \
self.db_contains_type
query_part2 = '''
FROM (%s_stat LEFT JOIN SQLITE_SEQUENCE
ON SQLITE_SEQUENCE.name == '%s') LIMIT 1
''' % (self.db_type, self.db_contains_type)
with self.get() as conn:
try:
curs = conn.execute(query_part1 + 'metadata' + query_part2)
except sqlite3.OperationalError, err:
if 'no such column: metadata' not in str(err):
raise
curs = conn.execute(query_part1 + "'' as metadata" +
query_part2)
curs.row_factory = dict_factory
return curs.fetchone()
def _commit_puts(self):
pass # stub to be overridden if need be
def merge_syncs(self, sync_points, incoming=True):
"""
Merge a list of sync points with the incoming sync table.
:param sync_points: list of sync points where a sync point is a dict of
{'sync_point', 'remote_id'}
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
"""
with self.get() as conn:
for rec in sync_points:
try:
conn.execute('''
INSERT INTO %s_sync (sync_point, remote_id)
VALUES (?, ?)
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
except sqlite3.IntegrityError:
conn.execute('''
UPDATE %s_sync SET sync_point=max(?, sync_point)
WHERE remote_id=?
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
conn.commit()
def _preallocate(self):
"""
The idea is to allocate space in front of an expanding db. If it gets
within 512k of a boundary, it allocates to the next boundary.
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
"""
if not DB_PREALLOCATION or self.db_file == ':memory:':
return
MB = (1024 * 1024)
def prealloc_points():
for pm in (1, 2, 5, 10, 25, 50):
yield pm * MB
while True:
pm += 50
yield pm * MB
stat = os.stat(self.db_file)
file_size = stat.st_size
allocated_size = stat.st_blocks * 512
for point in prealloc_points():
if file_size <= point - MB / 2:
prealloc_size = point
break
if allocated_size < prealloc_size:
with open(self.db_file, 'rb+') as fp:
fallocate(fp.fileno(), int(prealloc_size))
@property
def metadata(self):
"""
Returns the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value.
"""
with self.get() as conn:
try:
metadata = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
except sqlite3.OperationalError, err:
if 'no such column: metadata' not in str(err):
raise
metadata = ''
if metadata:
metadata = json.loads(metadata)
else:
metadata = {}
return metadata
def update_metadata(self, metadata_updates):
"""
Updates the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value. Key/values will only be overwritten if
the timestamp is newer. To delete a key, set its value to ('',
timestamp). These empty keys will eventually be removed by
:func:reclaim
"""
old_metadata = self.metadata
if set(metadata_updates).issubset(set(old_metadata)):
for key, (value, timestamp) in metadata_updates.iteritems():
if timestamp > old_metadata[key][1]:
break
else:
return
with self.get() as conn:
try:
md = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
md = md and json.loads(md) or {}
except sqlite3.OperationalError, err:
if 'no such column: metadata' not in str(err):
raise
conn.execute("""
ALTER TABLE %s_stat
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
md = {}
for key, value_timestamp in metadata_updates.iteritems():
value, timestamp = value_timestamp
if key not in md or timestamp > md[key][1]:
md[key] = value_timestamp
conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type,
(json.dumps(md),))
conn.commit()
def reclaim(self, timestamp):
"""Removes any empty metadata values older than the timestamp"""
if not self.metadata:
return
with self.get() as conn:
if self._reclaim(conn, timestamp):
conn.commit()
def _reclaim(self, conn, timestamp):
"""
Removes any empty metadata values older than the timestamp using the
given database connection. This function will not call commit on the
conn, but will instead return True if the database needs committing.
This function was created as a worker to limit transactions and commits
from other related functions.
:param conn: Database connection to reclaim metadata within.
:param timestamp: Empty metadata items last updated before this
timestamp will be removed.
:returns: True if conn.commit() should be called
"""
try:
md = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
if md:
md = json.loads(md)
keys_to_delete = []
for key, (value, value_timestamp) in md.iteritems():
if value == '' and value_timestamp < timestamp:
keys_to_delete.append(key)
if keys_to_delete:
for key in keys_to_delete:
del md[key]
conn.execute('UPDATE %s_stat SET metadata = ?' %
self.db_type, (json.dumps(md),))
return True
except sqlite3.OperationalError, err:
if 'no such column: metadata' not in str(err):
raise
return False
class ContainerBroker(DatabaseBroker):
"""Encapsulates working with a container database."""
db_type = 'container'
db_contains_type = 'object'
def _initialize(self, conn, put_timestamp):
"""Creates a brand new database (tables, indices, triggers, etc.)"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
if not self.container:
raise ValueError(
'Attempting to create a new database with no container set')
self.create_object_table(conn)
self.create_container_stat_table(conn, put_timestamp)
def create_object_table(self, conn):
"""
Create the object table which is specifc to the container DB.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE object (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
created_at TEXT,
size INTEGER,
content_type TEXT,
etag TEXT,
deleted INTEGER DEFAULT 0
);
CREATE INDEX ix_object_deleted_name ON object (deleted, name);
CREATE TRIGGER object_insert AFTER INSERT ON object
BEGIN
UPDATE container_stat
SET object_count = object_count + (1 - new.deleted),
bytes_used = bytes_used + new.size,
hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER object_update BEFORE UPDATE ON object
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER object_delete AFTER DELETE ON object
BEGIN
UPDATE container_stat
SET object_count = object_count - (1 - old.deleted),
bytes_used = bytes_used - old.size,
hash = chexor(hash, old.name, old.created_at);
END;
""")
def create_container_stat_table(self, conn, put_timestamp=None):
"""
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT '',
x_container_sync_point1 INTEGER DEFAULT -1,
x_container_sync_point2 INTEGER DEFAULT -1
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, normalize_timestamp(time.time()),
str(uuid4()), put_timestamp))
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
SELECT name FROM sqlite_master
WHERE name = 'ix_object_deleted_name' '''):
self._db_version = 1
return self._db_version
def _newid(self, conn):
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = 0, reported_delete_timestamp = 0,
reported_object_count = 0, reported_bytes_used = 0''')
def update_put_timestamp(self, timestamp):
"""
Update the put_timestamp. Only modifies it if it is greater than
the current timestamp.
:param timestamp: put timestamp
"""
with self.get() as conn:
conn.execute('''
UPDATE container_stat SET put_timestamp = ?
WHERE put_timestamp < ? ''', (timestamp, timestamp))
conn.commit()
def _delete_db(self, conn, timestamp):
"""
Mark the DB as deleted
:param conn: DB connection object
:param timestamp: timestamp to mark as deleted
"""
conn.execute("""
UPDATE container_stat
SET delete_timestamp = ?,
status = 'DELETED',
status_changed_at = ?
WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp))
def empty(self):
"""
Check if the DB is empty.
:returns: True if the database has no active objects, False otherwise
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
row = conn.execute(
'SELECT object_count from container_stat').fetchone()
return (row[0] == 0)
def _commit_puts(self, item_list=None):
"""Handles commiting rows in .pending files."""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
if item_list is None:
item_list = []
with lock_parent_directory(self.pending_file, self.pending_timeout):
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(':'):
if entry:
try:
(name, timestamp, size, content_type, etag,
deleted) = pickle.loads(entry.decode('base64'))
item_list.append({'name': name,
'created_at': timestamp,
'size': size,
'content_type': content_type,
'etag': etag,
'deleted': deleted})
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.ftruncate(fp.fileno(), 0)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def reclaim(self, object_timestamp, sync_timestamp):
"""
Delete rows from the object table that are marked deleted and
whose created_at timestamp is < object_timestamp. Also deletes rows
from incoming_sync and outgoing_sync where the updated_at timestamp is
< sync_timestamp.
In addition, this calls the DatabaseBroker's :func:_reclaim method.
:param object_timestamp: max created_at timestamp of object rows to
delete
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
self._commit_puts()
with self.get() as conn:
conn.execute("""
DELETE FROM object
WHERE deleted = 1
AND created_at < ?""", (object_timestamp,))
try:
conn.execute('''
DELETE FROM outgoing_sync WHERE updated_at < ?
''', (sync_timestamp,))
conn.execute('''
DELETE FROM incoming_sync WHERE updated_at < ?
''', (sync_timestamp,))
except sqlite3.OperationalError, err:
# Old dbs didn't have updated_at in the _sync tables.
if 'no such column: updated_at' not in str(err):
raise
DatabaseBroker._reclaim(self, conn, object_timestamp)
conn.commit()
def delete_object(self, name, timestamp):
"""
Mark an object deleted.
:param name: object name to be deleted
:param timestamp: timestamp when the object was marked as deleted
"""
self.put_object(name, timestamp, 0, 'application/deleted', 'noetag', 1)
def put_object(self, name, timestamp, size, content_type, etag, deleted=0):
"""
Creates an object in the DB with its metadata.
:param name: object name to be created
:param timestamp: timestamp of when the object was created
:param size: object size
:param content_type: object content-type
:param etag: object etag
:param deleted: if True, marks the object as deleted and sets the
deteleted_at timestamp to timestamp
"""
record = {'name': name, 'created_at': timestamp, 'size': size,
'content_type': content_type, 'etag': etag,
'deleted': deleted}
if self.db_file == ':memory:':
self.merge_items([record])
return
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except OSError, err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
with lock_parent_directory(
self.pending_file, self.pending_timeout):
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(':')
fp.write(pickle.dumps(
(name, timestamp, size, content_type, etag, deleted),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
def is_deleted(self, timestamp=None):
"""
Check if the DB is considered to be deleted.
:returns: True if the DB is considered to be deleted, False otherwise
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return True
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
row = conn.execute('''
SELECT put_timestamp, delete_timestamp, object_count
FROM container_stat''').fetchone()
# leave this db as a tombstone for a consistency window
if timestamp and row['delete_timestamp'] > timestamp:
return False
# The container is considered deleted if the delete_timestamp
# value is greater than the put_timestamp, and there are no
# objects in the container.
return (row['object_count'] in (None, '', 0, '0')) and \
(float(row['delete_timestamp']) > float(row['put_timestamp']))
def get_info(self, include_metadata=False):
"""
Get global data for the container.
:returns: dict with keys: account, container, created_at,
put_timestamp, delete_timestamp, object_count, bytes_used,
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, reported_bytes_used, hash, id,
x_container_sync_point1, and x_container_sync_point2.
If include_metadata is set, metadata is included as a key
pointing to a dict of tuples of the metadata
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
data = None
trailing1 = 'metadata'
trailing2 = 'x_container_sync_point1, x_container_sync_point2'
while not data:
try:
data = conn.execute('''
SELECT account, container, created_at, put_timestamp,
delete_timestamp, object_count, bytes_used,
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, reported_bytes_used, hash,
id, %s, %s
FROM container_stat
''' % (trailing1, trailing2)).fetchone()
except sqlite3.OperationalError, err:
if 'no such column: metadata' in str(err):
trailing1 = "'' as metadata"
elif 'no such column: x_container_sync_point' in str(err):
trailing2 = '-1 AS x_container_sync_point1, ' \
'-1 AS x_container_sync_point2'
else:
raise
data = dict(data)
if include_metadata:
try:
data['metadata'] = json.loads(data.get('metadata', ''))
except ValueError:
data['metadata'] = {}
elif 'metadata' in data:
del data['metadata']
return data
def set_x_container_sync_points(self, sync_point1, sync_point2):
with self.get() as conn:
orig_isolation_level = conn.isolation_level
try:
# We turn off auto-transactions to ensure the alter table
# commands are part of the transaction.
conn.isolation_level = None
conn.execute('BEGIN')
try:
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
except sqlite3.OperationalError, err:
if 'no such column: x_container_sync_point' not in \
str(err):
raise
conn.execute('''
ALTER TABLE container_stat
ADD COLUMN x_container_sync_point1 INTEGER DEFAULT -1
''')
conn.execute('''
ALTER TABLE container_stat
ADD COLUMN x_container_sync_point2 INTEGER DEFAULT -1
''')
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
conn.execute('COMMIT')
finally:
conn.isolation_level = orig_isolation_level
def _set_x_container_sync_points(self, conn, sync_point1, sync_point2):
if sync_point1 is not None and sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?,
x_container_sync_point2 = ?
''', (sync_point1, sync_point2))
elif sync_point1 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?
''', (sync_point1,))
elif sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point2 = ?
''', (sync_point2,))
def reported(self, put_timestamp, delete_timestamp, object_count,
bytes_used):
"""
Update reported stats.
:param put_timestamp: put_timestamp to update
:param delete_timestamp: delete_timestamp to update
:param object_count: object_count to update
:param bytes_used: bytes_used to update
"""
with self.get() as conn:
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = ?, reported_delete_timestamp = ?,
reported_object_count = ?, reported_bytes_used = ?
''', (put_timestamp, delete_timestamp, object_count, bytes_used))
conn.commit()
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter,
path=None):
"""
Get a list of objects sorted by name starting at marker onward, up
to limit entries. Entries will begin with the prefix and will not
have the delimiter after the prefix.
:param limit: maximum number of entries to get
:param marker: marker query
:param end_marker: end marker query
:param prefix: prefix query
:param delimiter: delimiter for query
:param path: if defined, will set the prefix and delimter based on
the path
:returns: list of tuples of (name, created_at, size, content_type,
etag)
"""
(marker, end_marker, prefix, delimiter, path) = utf8encode(
marker, end_marker, prefix, delimiter, path)
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
if path is not None:
prefix = path
if path:
prefix = path = path.rstrip('/') + '/'
delimiter = '/'
elif delimiter and not prefix:
prefix = ''
orig_marker = marker
with self.get() as conn:
results = []
while len(results) < limit:
query = '''SELECT name, created_at, size, content_type, etag
FROM object WHERE'''
query_args = []
if end_marker:
query += ' name < ? AND'
query_args.append(end_marker)
if marker and marker >= prefix:
query += ' name > ? AND'
query_args.append(marker)
elif prefix:
query += ' name >= ? AND'
query_args.append(prefix)
if self.get_db_version(conn) < 1:
query += ' +deleted = 0'
else:
query += ' deleted = 0'
query += ' ORDER BY name LIMIT ?'
query_args.append(limit - len(results))
curs = conn.execute(query, query_args)
curs.row_factory = None
if prefix is None:
return [r for r in curs]
if not delimiter:
return [r for r in curs if r[0].startswith(prefix)]
rowcount = 0
for row in curs:
rowcount += 1
marker = name = row[0]
if len(results) >= limit or not name.startswith(prefix):
curs.close()
return results
end = name.find(delimiter, len(prefix))
if path is not None:
if name == path:
continue
if end >= 0 and len(name) > end + len(delimiter):
marker = name[:end] + chr(ord(delimiter) + 1)
curs.close()
break
elif end > 0:
marker = name[:end] + chr(ord(delimiter) + 1)
dir_name = name[:end + 1]
if dir_name != orig_marker:
results.append([dir_name, '0', 0, None, ''])
curs.close()
break
results.append(row)
if not rowcount:
break
return results
def merge_items(self, item_list, source=None):
"""
Merge items into the object table.
:param item_list: list of dictionaries of {'name', 'created_at',
'size', 'content_type', 'etag', 'deleted'}
:param source: if defined, update incoming_sync with the source
"""
with self.get() as conn:
max_rowid = -1
for rec in item_list:
query = '''
DELETE FROM object
WHERE name = ? AND (created_at < ?)
'''
if self.get_db_version(conn) >= 1:
query += ' AND deleted IN (0, 1)'
conn.execute(query, (rec['name'], rec['created_at']))
query = 'SELECT 1 FROM object WHERE name = ?'
if self.get_db_version(conn) >= 1:
query += ' AND deleted IN (0, 1)'
if not conn.execute(query, (rec['name'],)).fetchall():
conn.execute('''
INSERT INTO object (name, created_at, size,
content_type, etag, deleted)
VALUES (?, ?, ?, ?, ?, ?)
''', ([rec['name'], rec['created_at'], rec['size'],
rec['content_type'], rec['etag'], rec['deleted']]))
if source:
max_rowid = max(max_rowid, rec['ROWID'])
if source:
try:
conn.execute('''
INSERT INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (max_rowid, source))
except sqlite3.IntegrityError:
conn.execute('''
UPDATE incoming_sync SET sync_point=max(?, sync_point)
WHERE remote_id=?
''', (max_rowid, source))
conn.commit()
class AccountBroker(DatabaseBroker):
"""Encapsulates working with a account database."""
db_type = 'account'
db_contains_type = 'container'
def _initialize(self, conn, put_timestamp):
"""
Create a brand new database (tables, indices, triggers, etc.)
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
self.create_container_table(conn)
self.create_account_stat_table(conn, put_timestamp)
def create_container_table(self, conn):
"""
Create container table which is specific to the account DB.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""")
def create_account_stat_table(self, conn, put_timestamp):
"""
Create account_stat table which is specific to the account DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
conn.executescript("""
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO account_stat (container_count) VALUES (0);
""")
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, normalize_timestamp(time.time()), str(uuid4()),
put_timestamp))
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
SELECT name FROM sqlite_master
WHERE name = 'ix_container_deleted_name' '''):
self._db_version = 1
return self._db_version
def update_put_timestamp(self, timestamp):
"""
Update the put_timestamp. Only modifies it if it is greater than
the current timestamp.
:param timestamp: put timestamp
"""
with self.get() as conn:
conn.execute('''
UPDATE account_stat SET put_timestamp = ?
WHERE put_timestamp < ? ''', (timestamp, timestamp))
conn.commit()
def _delete_db(self, conn, timestamp, force=False):
"""
Mark the DB as deleted.
:param conn: DB connection object
:param timestamp: timestamp to mark as deleted
"""
conn.execute("""
UPDATE account_stat
SET delete_timestamp = ?,
status = 'DELETED',
status_changed_at = ?
WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp))
def _commit_puts(self, item_list=None):
"""Handles commiting rows in .pending files."""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
if item_list is None:
item_list = []
with lock_parent_directory(self.pending_file, self.pending_timeout):
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(':'):
if entry:
try:
(name, put_timestamp, delete_timestamp,
object_count, bytes_used, deleted) = \
pickle.loads(entry.decode('base64'))
item_list.append(
{'name': name,
'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'object_count': object_count,
'bytes_used': bytes_used,
'deleted': deleted})
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.ftruncate(fp.fileno(), 0)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def empty(self):
"""
Check if the account DB is empty.
:returns: True if the database has no active containers.
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
row = conn.execute(
'SELECT container_count from account_stat').fetchone()
return (row[0] == 0)
def reclaim(self, container_timestamp, sync_timestamp):
"""
Delete rows from the container table that are marked deleted and
whose created_at timestamp is < container_timestamp. Also deletes rows
from incoming_sync and outgoing_sync where the updated_at timestamp is
< sync_timestamp.
In addition, this calls the DatabaseBroker's :func:_reclaim method.
:param container_timestamp: max created_at timestamp of container rows
to delete
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
self._commit_puts()
with self.get() as conn:
conn.execute('''
DELETE FROM container WHERE
deleted = 1 AND delete_timestamp < ?
''', (container_timestamp,))
try:
conn.execute('''
DELETE FROM outgoing_sync WHERE updated_at < ?
''', (sync_timestamp,))
conn.execute('''
DELETE FROM incoming_sync WHERE updated_at < ?
''', (sync_timestamp,))
except sqlite3.OperationalError, err:
# Old dbs didn't have updated_at in the _sync tables.
if 'no such column: updated_at' not in str(err):
raise
DatabaseBroker._reclaim(self, conn, container_timestamp)
conn.commit()
def get_container_timestamp(self, container_name):
"""
Get the put_timestamp of a container.
:param container_name: container name
:returns: put_timestamp of the container
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
ret = conn.execute('''
SELECT put_timestamp FROM container
WHERE name = ? AND deleted != 1''',
(container_name,)).fetchone()
if ret:
ret = ret[0]
return ret
def put_container(self, name, put_timestamp, delete_timestamp,
object_count, bytes_used):
"""
Create a container with the given attributes.
:param name: name of the container to create
:param put_timestamp: put_timestamp of the container to create
:param delete_timestamp: delete_timestamp of the container to create
:param object_count: number of objects in the container
:param bytes_used: number of bytes used by the container
"""
if delete_timestamp > put_timestamp and \
object_count in (None, '', 0, '0'):
deleted = 1
else:
deleted = 0
record = {'name': name, 'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'object_count': object_count,
'bytes_used': bytes_used,
'deleted': deleted}
if self.db_file == ':memory:':
self.merge_items([record])
return
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except OSError, err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
with lock_parent_directory(self.pending_file,
self.pending_timeout):
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(':')
fp.write(pickle.dumps(
(name, put_timestamp, delete_timestamp, object_count,
bytes_used, deleted),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
def can_delete_db(self, cutoff):
"""
Check if the accont DB can be deleted.
:returns: True if the account can be deleted, False otherwise
"""
self._commit_puts()
with self.get() as conn:
row = conn.execute('''
SELECT status, put_timestamp, delete_timestamp, container_count
FROM account_stat''').fetchone()
# The account is considered deleted if its status is marked
# as 'DELETED" and the delete_timestamp is older than the supplied
# cutoff date; or if the delete_timestamp value is greater than
# the put_timestamp, and there are no containers for the account
status_del = (row['status'] == 'DELETED')
deltime = float(row['delete_timestamp'])
past_cutoff = (deltime < cutoff)
time_later = (row['delete_timestamp'] > row['put_timestamp'])
no_containers = (row['container_count'] in (None, '', 0, '0'))
return (
(status_del and past_cutoff) or (time_later and no_containers))
def is_deleted(self):
"""
Check if the account DB is considered to be deleted.
:returns: True if the account DB is considered to be deleted, False
otherwise
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return True
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
row = conn.execute('''
SELECT put_timestamp, delete_timestamp, container_count, status
FROM account_stat''').fetchone()
return row['status'] == 'DELETED' or (
row['container_count'] in (None, '', 0, '0') and
row['delete_timestamp'] > row['put_timestamp'])
def is_status_deleted(self):
"""Only returns true if the status field is set to DELETED."""
with self.get() as conn:
row = conn.execute('''
SELECT status
FROM account_stat''').fetchone()
return (row['status'] == "DELETED")
def get_info(self):
"""
Get global data for the account.
:returns: dict with keys: account, created_at, put_timestamp,
delete_timestamp, container_count, object_count,
bytes_used, hash, id
"""
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
with self.get() as conn:
return dict(conn.execute('''
SELECT account, created_at, put_timestamp, delete_timestamp,
container_count, object_count, bytes_used, hash, id
FROM account_stat
''').fetchone())
def list_containers_iter(self, limit, marker, end_marker, prefix,
delimiter):
"""
Get a list of containerss sorted by name starting at marker onward, up
to limit entries. Entries will begin with the prefix and will not
have the delimiter after the prefix.
:param limit: maximum number of entries to get
:param marker: marker query
:param end_marker: end marker query
:param prefix: prefix query
:param delimiter: delimiter for query
:returns: list of tuples of (name, object_count, bytes_used, 0)
"""
(marker, end_marker, prefix, delimiter) = utf8encode(
marker, end_marker, prefix, delimiter)
try:
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
if delimiter and not prefix:
prefix = ''
orig_marker = marker
with self.get() as conn:
results = []
while len(results) < limit:
query = """
SELECT name, object_count, bytes_used, 0
FROM container
WHERE deleted = 0 AND """
query_args = []
if end_marker:
query += ' name < ? AND'
query_args.append(end_marker)
if marker and marker >= prefix:
query += ' name > ? AND'
query_args.append(marker)
elif prefix:
query += ' name >= ? AND'
query_args.append(prefix)
if self.get_db_version(conn) < 1:
query += ' +deleted = 0'
else:
query += ' deleted = 0'
query += ' ORDER BY name LIMIT ?'
query_args.append(limit - len(results))
curs = conn.execute(query, query_args)
curs.row_factory = None
if prefix is None:
return [r for r in curs]
if not delimiter:
return [r for r in curs if r[0].startswith(prefix)]
rowcount = 0
for row in curs:
rowcount += 1
marker = name = row[0]
if len(results) >= limit or not name.startswith(prefix):
curs.close()
return results
end = name.find(delimiter, len(prefix))
if end > 0:
marker = name[:end] + chr(ord(delimiter) + 1)
dir_name = name[:end + 1]
if dir_name != orig_marker:
results.append([dir_name, 0, 0, 1])
curs.close()
break
results.append(row)
if not rowcount:
break
return results
def merge_items(self, item_list, source=None):
"""
Merge items into the container table.
:param item_list: list of dictionaries of {'name', 'put_timestamp',
'delete_timestamp', 'object_count', 'bytes_used',
'deleted'}
:param source: if defined, update incoming_sync with the source
"""
with self.get() as conn:
max_rowid = -1
for rec in item_list:
record = [rec['name'], rec['put_timestamp'],
rec['delete_timestamp'], rec['object_count'],
rec['bytes_used'], rec['deleted']]
query = '''
SELECT name, put_timestamp, delete_timestamp,
object_count, bytes_used, deleted
FROM container WHERE name = ?
'''
if self.get_db_version(conn) >= 1:
query += ' AND deleted IN (0, 1)'
curs = conn.execute(query, (rec['name'],))
curs.row_factory = None
row = curs.fetchone()
if row:
row = list(row)
for i in xrange(5):
if record[i] is None and row[i] is not None:
record[i] = row[i]
if row[1] > record[1]: # Keep newest put_timestamp
record[1] = row[1]
if row[2] > record[2]: # Keep newest delete_timestamp
record[2] = row[2]
# If deleted, mark as such
if record[2] > record[1] and \
record[3] in (None, '', 0, '0'):
record[5] = 1
else:
record[5] = 0
conn.execute('''
DELETE FROM container WHERE name = ? AND
deleted IN (0, 1)
''', (record[0],))
conn.execute('''
INSERT INTO container (name, put_timestamp,
delete_timestamp, object_count, bytes_used,
deleted)
VALUES (?, ?, ?, ?, ?, ?)
''', record)
if source:
max_rowid = max(max_rowid, rec['ROWID'])
if source:
try:
conn.execute('''
INSERT INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (max_rowid, source))
except sqlite3.IntegrityError:
conn.execute('''
UPDATE incoming_sync SET sync_point=max(?, sync_point)
WHERE remote_id=?
''', (max_rowid, source))
conn.commit()
|
{
"content_hash": "5d6f92350d8c7f0d0456ac135ef74d52",
"timestamp": "",
"source": "github",
"line_count": 1675,
"max_line_length": 79,
"avg_line_length": 39.90089552238806,
"alnum_prop": 0.5090822036687913,
"repo_name": "garvenshen/zeda-swift",
"id": "ce7dcb264e4eee725a8f7ce3fca41662111e1e48",
"size": "67424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift/common/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2513356"
}
],
"symlink_target": ""
}
|
from cla_public.app import create_app
app = create_app(config_file="config/deployment.py")
|
{
"content_hash": "5dfa9fb7c0421d5950f67b6867d51a5b",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 52,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.7717391304347826,
"repo_name": "ministryofjustice/cla_public",
"id": "612b7798d633caf137e6b103d056b954fccc4d0d",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla_public/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1271"
},
{
"name": "HTML",
"bytes": "206100"
},
{
"name": "JavaScript",
"bytes": "110334"
},
{
"name": "Jinja",
"bytes": "12336"
},
{
"name": "Mustache",
"bytes": "2915"
},
{
"name": "Python",
"bytes": "351431"
},
{
"name": "SCSS",
"bytes": "35282"
},
{
"name": "Shell",
"bytes": "5150"
}
],
"symlink_target": ""
}
|
from org.gluu.model.custom.script.type.user import UserRegistrationType
from org.gluu.ldap.model import GluuStatus
from org.gluu.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList
import java
class UserRegistration(UserRegistrationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "User registration. Initialization"
self.enable_user = StringHelper.toBoolean(configurationAttributes.get("enable_user").getValue2(), False)
print "User registration. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "User registration. Destroy"
print "User registration. Destroyed successfully"
return True
# User registration init method
# user is org.gluu.oxtrust.model.GluuCustomPerson
# requestParameters is java.util.Map<String, String[]>
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def initRegistration(self, user, requestParameters, configurationAttributes):
print "User registration. Init method"
return True
# User registration pre method
# user is org.gluu.oxtrust.model.GluuCustomPerson
# requestParameters is java.util.Map<String, String[]>
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def preRegistration(self, user, requestParameters, configurationAttributes):
print "User registration. Pre method"
userStatus = GluuStatus.ACTIVE
if not self.enable_user:
userStatus = GluuStatus.INACTIVE
# Disable/Enable registered user
user.setStatus(userStatus)
return True
# User registration post method
# user is org.gluu.oxtrust.model.GluuCustomPerson
# requestParameters is java.util.Map<String, String[]>
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def postRegistration(self, user, requestParameters, configurationAttributes):
print "User registration. Post method"
return True
# User confirm New Registration method
# user is org.gluu.oxtrust.model.GluuCustomPerson
# requestParameters is java.util.Map<String, String[]>
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def confirmRegistration(self, user, requestParameters, configurationAttributes):
print "User registration. Confirm registration method"
return True
def getApiVersion(self):
return 1
|
{
"content_hash": "944b49a68eb5c7ed3d696e211412b7ce",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 112,
"avg_line_length": 37.57142857142857,
"alnum_prop": 0.7239543726235741,
"repo_name": "GluuFederation/oxExternal",
"id": "ba6b9d6c82871b1d1cc1e638f4790fa110b7340e",
"size": "2792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_registration/sample/SampleScript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43425"
}
],
"symlink_target": ""
}
|
"""Simple functional APIs for TF2."""
from sonnet import optimizers as oo_optimizers
from sonnet.src.functional import haiku
from sonnet.src.functional import jax
from sonnet.src.functional import optimizers
# Utilities for converting Sonnet code into pure functions.
variables = haiku.variables
transform = haiku.transform
transform_with_state = haiku.transform_with_state
without_state = haiku.without_state
# Utilities for working with tensors on device.
device_get = jax.device_get
device_put = jax.device_put
# Utilities for transforming pure functions.
grad = jax.grad
jit = jax.jit
value_and_grad = jax.value_and_grad
# Optimizers.
optimizer = optimizers.optimizer
sgd = optimizer(oo_optimizers.SGD)
adam = optimizer(oo_optimizers.Adam)
rmsprop = optimizer(oo_optimizers.RMSProp)
momentum = optimizer(oo_optimizers.Momentum)
# Avoid accidentally exporting the private API.
del oo_optimizers, haiku, optimizers, jax
__all__ = (
"variables",
"transform",
"transform_with_state",
"without_state",
"device_get",
"device_put",
"grad",
"jit",
"value_and_grad",
"optimizer",
"sgd",
"adam",
"rmsprop",
"momentum",
)
|
{
"content_hash": "9300fca608c19c81e35ec20003d08611",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 59,
"avg_line_length": 24.541666666666668,
"alnum_prop": 0.7275042444821732,
"repo_name": "deepmind/sonnet",
"id": "2ad678fb258c5c11b655105b9f5d6491656654fb",
"size": "1861",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2",
"path": "sonnet/functional.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "794977"
},
{
"name": "Shell",
"bytes": "1844"
},
{
"name": "Starlark",
"bytes": "31841"
}
],
"symlink_target": ""
}
|
"""
Transformations between QuantumChannel representations.
"""
import numpy as np
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.predicates import is_hermitian_matrix
from qiskit.quantum_info.operators.predicates import ATOL_DEFAULT
def _transform_rep(input_rep, output_rep, data, input_dim, output_dim):
"""Transform a QuantumChannel between representation."""
if input_rep == output_rep:
return data
if output_rep == "Choi":
return _to_choi(input_rep, data, input_dim, output_dim)
if output_rep == "Operator":
return _to_operator(input_rep, data, input_dim, output_dim)
if output_rep == "SuperOp":
return _to_superop(input_rep, data, input_dim, output_dim)
if output_rep == "Kraus":
return _to_kraus(input_rep, data, input_dim, output_dim)
if output_rep == "Chi":
return _to_chi(input_rep, data, input_dim, output_dim)
if output_rep == "PTM":
return _to_ptm(input_rep, data, input_dim, output_dim)
if output_rep == "Stinespring":
return _to_stinespring(input_rep, data, input_dim, output_dim)
raise QiskitError(f"Invalid QuantumChannel {output_rep}")
def _to_choi(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Choi representation."""
if rep == "Choi":
return data
if rep == "Operator":
return _from_operator("Choi", data, input_dim, output_dim)
if rep == "SuperOp":
return _superop_to_choi(data, input_dim, output_dim)
if rep == "Kraus":
return _kraus_to_choi(data)
if rep == "Chi":
return _chi_to_choi(data, input_dim)
if rep == "PTM":
data = _ptm_to_superop(data, input_dim)
return _superop_to_choi(data, input_dim, output_dim)
if rep == "Stinespring":
return _stinespring_to_choi(data, input_dim, output_dim)
raise QiskitError(f"Invalid QuantumChannel {rep}")
def _to_superop(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the SuperOp representation."""
if rep == "SuperOp":
return data
if rep == "Operator":
return _from_operator("SuperOp", data, input_dim, output_dim)
if rep == "Choi":
return _choi_to_superop(data, input_dim, output_dim)
if rep == "Kraus":
return _kraus_to_superop(data)
if rep == "Chi":
data = _chi_to_choi(data, input_dim)
return _choi_to_superop(data, input_dim, output_dim)
if rep == "PTM":
return _ptm_to_superop(data, input_dim)
if rep == "Stinespring":
return _stinespring_to_superop(data, input_dim, output_dim)
raise QiskitError(f"Invalid QuantumChannel {rep}")
def _to_kraus(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Kraus representation."""
if rep == "Kraus":
return data
if rep == "Stinespring":
return _stinespring_to_kraus(data, output_dim)
if rep == "Operator":
return _from_operator("Kraus", data, input_dim, output_dim)
# Convert via Choi and Kraus
if rep != "Choi":
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_kraus(data, input_dim, output_dim)
def _to_chi(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Chi representation."""
if rep == "Chi":
return data
# Check valid n-qubit input
_check_nqubit_dim(input_dim, output_dim)
if rep == "Operator":
return _from_operator("Chi", data, input_dim, output_dim)
# Convert via Choi representation
if rep != "Choi":
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_chi(data, input_dim)
def _to_ptm(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the PTM representation."""
if rep == "PTM":
return data
# Check valid n-qubit input
_check_nqubit_dim(input_dim, output_dim)
if rep == "Operator":
return _from_operator("PTM", data, input_dim, output_dim)
# Convert via Superoperator representation
if rep != "SuperOp":
data = _to_superop(rep, data, input_dim, output_dim)
return _superop_to_ptm(data, input_dim)
def _to_stinespring(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Stinespring representation."""
if rep == "Stinespring":
return data
if rep == "Operator":
return _from_operator("Stinespring", data, input_dim, output_dim)
# Convert via Superoperator representation
if rep != "Kraus":
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_stinespring(data, input_dim, output_dim)
def _to_operator(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Operator representation."""
if rep == "Operator":
return data
if rep == "Stinespring":
return _stinespring_to_operator(data, output_dim)
# Convert via Kraus representation
if rep != "Kraus":
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_operator(data)
def _from_operator(rep, data, input_dim, output_dim):
"""Transform Operator representation to other representation."""
if rep == "Operator":
return data
if rep == "SuperOp":
return np.kron(np.conj(data), data)
if rep == "Choi":
vec = np.ravel(data, order="F")
return np.outer(vec, np.conj(vec))
if rep == "Kraus":
return [data], None
if rep == "Stinespring":
return data, None
if rep == "Chi":
_check_nqubit_dim(input_dim, output_dim)
data = _from_operator("Choi", data, input_dim, output_dim)
return _choi_to_chi(data, input_dim)
if rep == "PTM":
_check_nqubit_dim(input_dim, output_dim)
data = _from_operator("SuperOp", data, input_dim, output_dim)
return _superop_to_ptm(data, input_dim)
raise QiskitError(f"Invalid QuantumChannel {rep}")
def _kraus_to_operator(data):
"""Transform Kraus representation to Operator representation."""
if data[1] is not None or len(data[0]) > 1:
raise QiskitError("Channel cannot be converted to Operator representation")
return data[0][0]
def _stinespring_to_operator(data, output_dim):
"""Transform Stinespring representation to Operator representation."""
trace_dim = data[0].shape[0] // output_dim
if data[1] is not None or trace_dim != 1:
raise QiskitError("Channel cannot be converted to Operator representation")
return data[0]
def _superop_to_choi(data, input_dim, output_dim):
"""Transform SuperOp representation to Choi representation."""
shape = (output_dim, output_dim, input_dim, input_dim)
return _reshuffle(data, shape)
def _choi_to_superop(data, input_dim, output_dim):
"""Transform Choi to SuperOp representation."""
shape = (input_dim, output_dim, input_dim, output_dim)
return _reshuffle(data, shape)
def _kraus_to_choi(data):
"""Transform Kraus representation to Choi representation."""
choi = 0
kraus_l, kraus_r = data
if kraus_r is None:
for i in kraus_l:
vec = i.ravel(order="F")
choi += np.outer(vec, vec.conj())
else:
for i, j in zip(kraus_l, kraus_r):
choi += np.outer(i.ravel(order="F"), j.ravel(order="F").conj())
return choi
def _choi_to_kraus(data, input_dim, output_dim, atol=ATOL_DEFAULT):
"""Transform Choi representation to Kraus representation."""
from scipy import linalg as la
# Check if hermitian matrix
if is_hermitian_matrix(data, atol=atol):
# Get eigen-decomposition of Choi-matrix
# This should be a call to la.eigh, but there is an OpenBlas
# threading issue that is causing segfaults.
# Need schur here since la.eig does not
# guarentee orthogonality in degenerate subspaces
w, v = la.schur(data, output="complex")
w = w.diagonal().real
# Check eigenvalues are non-negative
if len(w[w < -atol]) == 0:
# CP-map Kraus representation
kraus = []
for val, vec in zip(w, v.T):
if abs(val) > atol:
k = np.sqrt(val) * vec.reshape((output_dim, input_dim), order="F")
kraus.append(k)
# If we are converting a zero matrix, we need to return a Kraus set
# with a single zero-element Kraus matrix
if not kraus:
kraus.append(np.zeros((output_dim, input_dim), dtype=complex))
return kraus, None
# Non-CP-map generalized Kraus representation
mat_u, svals, mat_vh = la.svd(data)
kraus_l = []
kraus_r = []
for val, vec_l, vec_r in zip(svals, mat_u.T, mat_vh.conj()):
kraus_l.append(np.sqrt(val) * vec_l.reshape((output_dim, input_dim), order="F"))
kraus_r.append(np.sqrt(val) * vec_r.reshape((output_dim, input_dim), order="F"))
return kraus_l, kraus_r
def _stinespring_to_kraus(data, output_dim):
"""Transform Stinespring representation to Kraus representation."""
kraus_pair = []
for stine in data:
if stine is None:
kraus_pair.append(None)
else:
trace_dim = stine.shape[0] // output_dim
iden = np.eye(output_dim)
kraus = []
for j in range(trace_dim):
vec = np.zeros(trace_dim)
vec[j] = 1
kraus.append(np.kron(iden, vec[None, :]).dot(stine))
kraus_pair.append(kraus)
return tuple(kraus_pair)
def _stinespring_to_choi(data, input_dim, output_dim):
"""Transform Stinespring representation to Choi representation."""
trace_dim = data[0].shape[0] // output_dim
stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim))
if data[1] is None:
stine_r = stine_l
else:
stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim))
return np.reshape(
np.einsum("iAj,kAl->jilk", stine_l, stine_r.conj()), 2 * [input_dim * output_dim]
)
def _stinespring_to_superop(data, input_dim, output_dim):
"""Transform Stinespring representation to SuperOp representation."""
trace_dim = data[0].shape[0] // output_dim
stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim))
if data[1] is None:
stine_r = stine_l
else:
stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim))
return np.reshape(
np.einsum("iAj,kAl->ikjl", stine_r.conj(), stine_l),
(output_dim * output_dim, input_dim * input_dim),
)
def _kraus_to_stinespring(data, input_dim, output_dim):
"""Transform Kraus representation to Stinespring representation."""
stine_pair = [None, None]
for i, kraus in enumerate(data):
if kraus is not None:
num_kraus = len(kraus)
stine = np.zeros((output_dim * num_kraus, input_dim), dtype=complex)
for j, mat in enumerate(kraus):
vec = np.zeros(num_kraus)
vec[j] = 1
stine += np.kron(mat, vec[:, None])
stine_pair[i] = stine
return tuple(stine_pair)
def _kraus_to_superop(data):
"""Transform Kraus representation to SuperOp representation."""
kraus_l, kraus_r = data
superop = 0
if kraus_r is None:
for i in kraus_l:
superop += np.kron(np.conj(i), i)
else:
for i, j in zip(kraus_l, kraus_r):
superop += np.kron(np.conj(j), i)
return superop
def _chi_to_choi(data, input_dim):
"""Transform Chi representation to a Choi representation."""
num_qubits = int(np.log2(input_dim))
return _transform_from_pauli(data, num_qubits)
def _choi_to_chi(data, input_dim):
"""Transform Choi representation to the Chi representation."""
num_qubits = int(np.log2(input_dim))
return _transform_to_pauli(data, num_qubits)
def _ptm_to_superop(data, input_dim):
"""Transform PTM representation to SuperOp representation."""
num_qubits = int(np.log2(input_dim))
return _transform_from_pauli(data, num_qubits)
def _superop_to_ptm(data, input_dim):
"""Transform SuperOp representation to PTM representation."""
num_qubits = int(np.log2(input_dim))
return _transform_to_pauli(data, num_qubits)
def _bipartite_tensor(mat1, mat2, shape1=None, shape2=None):
"""Tensor product (A ⊗ B) to bipartite matrices and reravel indices.
This is used for tensor product of superoperators and Choi matrices.
Args:
mat1 (matrix_like): a bipartite matrix A
mat2 (matrix_like): a bipartite matrix B
shape1 (tuple): bipartite-shape for matrix A (a0, a1, a2, a3)
shape2 (tuple): bipartite-shape for matrix B (b0, b1, b2, b3)
Returns:
np.array: a bipartite matrix for reravel(A ⊗ B).
Raises:
QiskitError: if input matrices are wrong shape.
"""
# Convert inputs to numpy arrays
mat1 = np.array(mat1)
mat2 = np.array(mat2)
# Determine bipartite dimensions if not provided
dim_a0, dim_a1 = mat1.shape
dim_b0, dim_b1 = mat2.shape
if shape1 is None:
sdim_a0 = int(np.sqrt(dim_a0))
sdim_a1 = int(np.sqrt(dim_a1))
shape1 = (sdim_a0, sdim_a0, sdim_a1, sdim_a1)
if shape2 is None:
sdim_b0 = int(np.sqrt(dim_b0))
sdim_b1 = int(np.sqrt(dim_b1))
shape2 = (sdim_b0, sdim_b0, sdim_b1, sdim_b1)
# Check dimensions
if len(shape1) != 4 or shape1[0] * shape1[1] != dim_a0 or shape1[2] * shape1[3] != dim_a1:
raise QiskitError("Invalid shape_a")
if len(shape2) != 4 or shape2[0] * shape2[1] != dim_b0 or shape2[2] * shape2[3] != dim_b1:
raise QiskitError("Invalid shape_b")
return _reravel(mat1, mat2, shape1, shape2)
def _reravel(mat1, mat2, shape1, shape2):
"""Reravel two bipartite matrices."""
# Reshuffle indices
left_dims = shape1[:2] + shape2[:2]
right_dims = shape1[2:] + shape2[2:]
tensor_shape = left_dims + right_dims
final_shape = (np.product(left_dims), np.product(right_dims))
# Tensor product matrices
data = np.kron(mat1, mat2)
data = np.reshape(
np.transpose(np.reshape(data, tensor_shape), (0, 2, 1, 3, 4, 6, 5, 7)), final_shape
)
return data
def _transform_to_pauli(data, num_qubits):
"""Change of basis of bipartite matrix representation."""
# Change basis: um_{i=0}^3 |i>><\sigma_i|
basis_mat = np.array(
[[1, 0, 0, 1], [0, 1, 1, 0], [0, -1j, 1j, 0], [1, 0j, 0, -1]], dtype=complex
)
# Note that we manually renormalized after change of basis
# to avoid rounding errors from square-roots of 2.
cob = basis_mat
for _ in range(num_qubits - 1):
dim = int(np.sqrt(len(cob)))
cob = np.reshape(
np.transpose(
np.reshape(np.kron(basis_mat, cob), (4, dim * dim, 2, 2, dim, dim)),
(0, 1, 2, 4, 3, 5),
),
(4 * dim * dim, 4 * dim * dim),
)
return np.dot(np.dot(cob, data), cob.conj().T) / 2**num_qubits
def _transform_from_pauli(data, num_qubits):
"""Change of basis of bipartite matrix representation."""
# Change basis: sum_{i=0}^3 =|\sigma_i>><i|
basis_mat = np.array(
[[1, 0, 0, 1], [0, 1, 1j, 0], [0, 1, -1j, 0], [1, 0j, 0, -1]], dtype=complex
)
# Note that we manually renormalized after change of basis
# to avoid rounding errors from square-roots of 2.
cob = basis_mat
for _ in range(num_qubits - 1):
dim = int(np.sqrt(len(cob)))
cob = np.reshape(
np.transpose(
np.reshape(np.kron(basis_mat, cob), (2, 2, dim, dim, 4, dim * dim)),
(0, 2, 1, 3, 4, 5),
),
(4 * dim * dim, 4 * dim * dim),
)
return np.dot(np.dot(cob, data), cob.conj().T) / 2**num_qubits
def _reshuffle(mat, shape):
"""Reshuffle the indices of a bipartite matrix A[ij,kl] -> A[lj,ki]."""
return np.reshape(
np.transpose(np.reshape(mat, shape), (3, 1, 2, 0)),
(shape[3] * shape[1], shape[0] * shape[2]),
)
def _check_nqubit_dim(input_dim, output_dim):
"""Return true if dims correspond to an n-qubit channel."""
if input_dim != output_dim:
raise QiskitError(
f"Not an n-qubit channel: input_dim ({input_dim}) != output_dim ({output_dim})"
)
num_qubits = int(np.log2(input_dim))
if 2**num_qubits != input_dim:
raise QiskitError("Not an n-qubit channel: input_dim != 2 ** n")
|
{
"content_hash": "4fa12d7be24b2d4adcf2b643eb567173",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 94,
"avg_line_length": 36.60532150776053,
"alnum_prop": 0.6103337573444788,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "2d980942550bc1312cd539c20ef2f09e76dd7546",
"size": "17044",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/quantum_info/operators/channel/transformations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
}
|
import pyblish.api
class ValidateMindbenderProjectEditInfo(pyblish.api.ContextPlugin):
"""Checks your scene with editorial info
All the info that gets validated has been set by the projects bat files.
If you are certain that the info is incorrect, talk to your project
Supervisor with haste!
"""
label = "Project Edit Info"
optional = True
order = pyblish.api.ValidatorOrder
families = ["mindbender.animation"]
def process(self, context):
from maya import cmds
scene_in = cmds.playbackOptions(query=True, animationStartTime=True)
scene_out = cmds.playbackOptions(query=True, animationEndTime=True)
scene_fps = {
"12fps": 12,
"game": 15,
"16fps": 16,
"film": 24,
"pal": 25,
"ntsc": 30,
"show": 48,
"palf": 50,
"ntscf": 60}.get(cmds.currentUnit(query=True, time=True))
if scene_fps is None:
scene_fps = "a strange "
env = context.data.get("environment", dict())
valid_fps = env.get("avalonFps")
valid_edit_in = env.get("avalonEditIn")
valid_edit_out = env.get("avalonEditOut")
skip_on_none = [valid_fps, valid_edit_in, valid_edit_out]
if None in skip_on_none:
self.log.debug(" environment not set")
return
assert int(valid_fps) == int(scene_fps), (
"The FPS is set to %sfps and not to %sfps"
% (scene_fps, valid_fps))
assert int(scene_in) == int(valid_edit_in), (
"Animation Start is set to %s and not set to \"%s\""
% (scene_in, valid_edit_in))
assert int(scene_out) == int(valid_edit_out), (
"Animation End is set to %s and not set to \"%s\""
% (scene_out, valid_edit_out))
|
{
"content_hash": "0e2b4ff29c706be3cfcbb7faf135991e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 76,
"avg_line_length": 32,
"alnum_prop": 0.5705818965517241,
"repo_name": "mindbender-studio/config",
"id": "e4879205d6669df12c8506f349457af8d0fbf8bb",
"size": "1893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polly/plugins/maya/publish/validate_project_editorial_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "130394"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/medicine/shared_medpack_enhance_strength.iff"
result.attribute_template_id = 7
result.stfName("medicine_name","medpack_enhance_strength")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "3de97b07eab1a9fbd1fb5c114128c6f8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.7173252279635258,
"repo_name": "obi-two/Rebelion",
"id": "5a476eacd66f164d695edc59a2a6198449217fba",
"size": "474",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/medicine/shared_medpack_enhance_strength.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workshops', '0106_auto_20160701_0246'),
]
operations = [
migrations.AddField(
model_name='trainingrequest',
name='comment',
field=models.TextField(blank=True, default='', help_text='What else do you want us to know?', verbose_name='Anything else?'),
),
migrations.AddField(
model_name='trainingrequest',
name='group_name',
field=models.CharField(blank=True, default='', help_text="If you are part of a group that is applying for an instructor training together, please enter the name of your group here and on every group member's application.", max_length=100, verbose_name='Group name'),
),
]
|
{
"content_hash": "d0d0d1373da10ce9e53dc03d722405dd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 278,
"avg_line_length": 37.69565217391305,
"alnum_prop": 0.6412918108419838,
"repo_name": "vahtras/amy",
"id": "dfa9d589888230932687570bf271989dc083b32c",
"size": "939",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "workshops/migrations/0107_auto_20160701_0408.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4505"
},
{
"name": "HTML",
"bytes": "216300"
},
{
"name": "JavaScript",
"bytes": "16883"
},
{
"name": "Makefile",
"bytes": "2167"
},
{
"name": "Python",
"bytes": "1090706"
}
],
"symlink_target": ""
}
|
'''
catalog_harvesting/__init__.py
'''
import logging
import os
__version__ = '1.2.0'
LOGGER = None
def get_logger():
'''
Returns an initialized logger
'''
global LOGGER
if LOGGER is None:
LOGGER = logging.getLogger(__name__)
return LOGGER
def get_redis_connection():
redis_url = os.environ.get('REDIS_URL', 'redis://localhost:6379/0')
protocol, address = redis_url.split('://')
if protocol != 'redis':
raise ValueError('REDIS_URL must be protocol redis')
connection_str, path = address.split('/')
if ':' in connection_str:
host, port = connection_str.split(':')
else:
port = 6379
host = connection_str
db = path
return host, port, db
|
{
"content_hash": "9bf16c5ef10698fea40d1b67042fd694",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 20,
"alnum_prop": 0.5972972972972973,
"repo_name": "ioos/catalog-harvesting",
"id": "24a2a1f936e6682c742b8c78d27fc836605dde2a",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalog_harvesting/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "61259"
},
{
"name": "Shell",
"bytes": "3104"
}
],
"symlink_target": ""
}
|
import telnetlib
from time import sleep
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def main():
print "Hello"
time.sleep(6)
print "Hello6"
ip_addr = '10.7.192.169'
username = 'pete4027'
password = 'Intel1000'
remote_conn = telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
output = remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output = remote_conn.read_until("ssword:", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
time.sleep(1)
print output
output = remote_conn.read_very_eager()
print output
remote_conn.close()
main()
|
{
"content_hash": "da63cb699bc9846334b255d1f81a59f2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 72,
"avg_line_length": 23.48148148148148,
"alnum_prop": 0.6577287066246057,
"repo_name": "peternemec/peterPYTHON",
"id": "c1ae45b7321e60e3785fb6d2172e10d605a083b4",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telnet.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24802"
}
],
"symlink_target": ""
}
|
"""Benchmark for split and grad of split."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import random
import time
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_boolean("use_gpu", True, """Run GPU benchmarks.""")
def build_graph(device, input_shape, variable, num_inputs, axis, grad):
"""Build a graph containing a sequence of batch normalizations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensors.
variable: whether or not to randomize the input shape
num_inputs: the number of inputs to concat
axis: axis to be concat'ed
grad: if True compute the gradient
Returns:
An array of tensors to run()
"""
with tf.device("/%s:0" % device):
if not variable:
inputs = [tf.zeros(input_shape) for _ in range(num_inputs)]
else:
if axis == 1:
inputs = [
tf.zeros([
input_shape[0],
random.randint(max(1, input_shape[1] - 5), input_shape[1] + 5)
]) for _ in range(num_inputs)
]
else:
inputs = [
tf.zeros([
random.randint(max(1, input_shape[0] - 5), input_shape[0] + 5),
input_shape[1]
]) for _ in range(num_inputs)
]
outputs = [tf.concat(axis, inputs) for _ in range(100)]
if grad:
return tf.group(*list(
itertools.chain.from_iterable(
[tf.gradients(output, inputs) for output in outputs])))
else:
return tf.group(*outputs)
class ConcatBenchmark(tf.test.Benchmark):
"""Benchmark batch normalization."""
def _run_graph(self, device, input_shape, variable, num_inputs, axis, grad,
num_iters):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensors.
variable: whether or not the input shape should be fixed
num_inputs: the number of inputs to concat
axis: axis to be concat'ed
grad: if True compute the gradient
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
"""
graph = tf.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, variable, num_inputs, axis,
grad)
config = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0)))
with tf.Session(graph=graph, config=config) as session:
tf.global_variables_initializer().run()
_ = session.run(outputs) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(outputs)
duration = time.time() - start_time
print("%s shape:%d/%d var: %r #inputs:%d axis:%d grad:%r - %f secs - %f "
"GB/sec" % (device, input_shape[0], input_shape[1], variable,
num_inputs, axis, grad, duration / num_iters,
num_inputs * input_shape[0] * input_shape[1] * 4 * 2 *
100 / (duration / num_iters) / 1e9))
name_template = (
"concat_bench_{device}_input_shape_{shape}_variable_{variable}"
"_num_inputs_{num_inputs}_axis_{axis}_grad_{grad}")
self.report_benchmark(name=name_template.format(
device=device,
num_inputs=num_inputs,
variable=variable,
grad=grad,
shape=str(input_shape).replace(" ", ""),
axis=str(axis),
iters=num_iters))
return duration
def benchmark_concat(self):
print("Forward vs backward concat")
shapes = [[2000, 8], [8, 2000], [100, 18], [1000, 18], [10000, 18],
[100, 97], [1000, 97], [10000, 97], [10000, 1], [1, 10000],
[100000, 1], [1, 100000]]
axis_ = [0, 1]
num_inputs = 100
num_iters = [20] * len(shapes)
variable = [False, True] # fixed input size or not
for shape, iters in zip(shapes, num_iters):
for axis in axis_:
for v in variable:
self._run_graph("gpu", shape, v, num_inputs, axis, False, iters)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "1d84964654320aae67934cd6576ab5b3",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 79,
"avg_line_length": 33.265625,
"alnum_prop": 0.5875998121183654,
"repo_name": "nanditav/15712-TensorFlow",
"id": "00dd9b107631604f31473ee59cb7f57f9eab63d2",
"size": "4947",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/concat_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2967"
},
{
"name": "C",
"bytes": "94853"
},
{
"name": "C++",
"bytes": "13822769"
},
{
"name": "CMake",
"bytes": "93933"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "85586"
},
{
"name": "HTML",
"bytes": "525001"
},
{
"name": "Java",
"bytes": "56007"
},
{
"name": "JavaScript",
"bytes": "12235"
},
{
"name": "Jupyter Notebook",
"bytes": "1833475"
},
{
"name": "Makefile",
"bytes": "23468"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "142429"
},
{
"name": "Python",
"bytes": "13133178"
},
{
"name": "Shell",
"bytes": "270336"
},
{
"name": "TypeScript",
"bytes": "724952"
}
],
"symlink_target": ""
}
|
import threading
from abc import ABCMeta, abstractmethod
from hqlib.rabbitmq import ExchangeType
import logging
from pika.exceptions import AMQPConnectionError
class Subscriber(threading.Thread):
__metaclass__ = ABCMeta
def __init__(self, rabbitmq, exchange_name, routing_key, queue_name="", qos=0, auto_delete=True):
super(Subscriber, self).__init__()
self.rabbitmq = rabbitmq
self.exchange_name = exchange_name
self.routing_key = routing_key
self.connection = None
self.queue_name = queue_name
self.qos = qos
self.channel = None
self.stopped = False
self.consumer_tag = None
self.auto_delete = auto_delete
self.logger = logging.getLogger("hq.warehouse.rabbitmq.routing.subscriber")
def run(self):
reconnect = False
while not self.stopped:
if self.connection is not None:
self.logger.debug("Restarting IO Loop")
if not self.connection.is_closed and not self.connection.is_closing:
self.connection.close()
try:
if self.connection is None:
reconnect = True
self.connection = self.connect(reconnect)
reconnect = False
except AMQPConnectionError as e:
self.logger.error("Rabbitmq Connection error " + e.message)
reconnect = True
try:
threading.Event().wait(5)
except KeyboardInterrupt:
pass
continue
if self.consumer_tag is not None:
self.consumer_tag = None
if self.queue_name.startswith("amq."):
self.queue_name = ""
self.connection.ioloop.start()
if not self.connection.is_closed and not self.connection.is_closing:
try:
self.connection.close()
except IOError:
pass
def connect(self, reconnect=False):
self.logger.debug("Creating connection")
connection = self.rabbitmq.asyncconnection(self.on_connection_open, reconnect)
connection.add_on_open_error_callback(self.on_connection_open_error)
self.logger.debug("Connection created")
return connection
def reconnect(self):
self.logger.info("Subscriber reconnecting to rabbitmq")
self.connection.ioloop.stop()
def on_connection_open(self, connection):
connection.add_on_close_callback(self.on_connection_closed)
self.open_channel()
def on_connection_open_error(self, connection, error):
self.logger.info("RabbitMQ Connection couldn't open. Trying another server")
connection.add_timeout(5, self.reconnect)
def on_connection_closed(self, connection, reply_code, reply_text):
self.channel = None
if self.stopped:
self.logger.info("Connection "+str(connection)+" was closed: ("+str(reply_code)+") "+reply_text+" exchange "+self.exchange_name+" routing "+self.routing_key)
else:
self.logger.info("RabbitMQ Connection Closed. Reconnecting in 5 seconds (%s) %s", reply_code, reply_text)
connection.add_timeout(5, self.reconnect)
def open_channel(self):
self.connection.channel(on_open_callback=self.on_channel_open)
def close_channel(self):
if self.channel.is_open:
self.channel.close()
def on_channel_open(self, channel):
self.channel = channel
self.channel.add_on_close_callback(self.on_channel_closed)
if self.qos != 0:
self.channel.basic_qos(self.on_qos, prefetch_count=self.qos)
else:
self.setup_exchange()
def on_qos(self, frame):
self.setup_exchange()
def on_channel_closed(self, channel, reply_code, reply_text):
self.logger.info("Channel "+str(channel)+" was closed: ("+str(reply_code)+") "+reply_text+" exchange "+self.exchange_name+" routing "+self.routing_key)
#self.connection.close()
def setup_exchange(self):
self.logger.debug("Subscriber Declaring Routing Exchange " + self.exchange_name)
self.channel.exchange_declare(callback=self.on_exchange_declareok, exchange=self.exchange_name,
exchange_type=ExchangeType.DIRECT.value)
def on_exchange_declareok(self, frame):
self.setup_queue()
def setup_queue(self):
self.channel.queue_declare(callback=self.on_queue_declareok, queue=self.queue_name, auto_delete=self.auto_delete)
def on_queue_declareok(self, frame):
if self.queue_name == "":
self.queue_name = frame.method.queue
if self.routing_key == "":
self.routing_key = self.queue_name
self.channel.queue_bind(callback=self.on_bindok, queue=self.queue_name, exchange=self.exchange_name, routing_key=self.routing_key)
def on_bindok(self, frame):
self.start_consuming()
def start_consuming(self):
self.logger.info("Starting Consume on routing exchange " + self.exchange_name+" routing "+self.routing_key)
self.add_on_cancel_callback()
self.consumer_tag = self.channel.basic_consume(self.on_message, self.queue_name,
arguments={"x-cancel-on-ha-failover": True})
def add_on_cancel_callback(self):
self.channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, frame):
self.logger.info("Consumer was cancelled remotely. Shutting down: %r", frame)
if not self.stopped:
self.start_consuming()
return
if self.channel:
self.stop()
def on_message(self, channel, basic_deliver, properties, body):
try:
self.message_deliver(channel, basic_deliver, properties, body)
except:
self.logger.exception("Routing Subscriber threw a error on consume")
channel.basic_nack(basic_deliver.delivery_tag, requeue=True)
@abstractmethod
def message_deliver(self, channel, basic_deliver, properties, body):
pass
def stop(self):
self.logger.info("Stopping Consumer ("+str(self)+") on Routing Exchange " + self.exchange_name+" routing "+self.routing_key)
self.stopped = True
self.connection.ioloop.stop()
self.rabbitmq.remove_subscriber(self)
def start(self):
self.rabbitmq.add_subscriber(self)
self.logger.debug("Adding Consumer "+str(self))
super(Subscriber, self).start()
|
{
"content_hash": "0ec95ace3811262f3b18a16fa6c8fa67",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 169,
"avg_line_length": 40.527272727272724,
"alnum_prop": 0.6195603409600717,
"repo_name": "herqles-io/hq-lib",
"id": "bd2a9d4e8cc42d1b5f373af13b48097cccc8b3b0",
"size": "6687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hqlib/rabbitmq/routing/subscriber.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33931"
}
],
"symlink_target": ""
}
|
import os
from java.io import File
from ij import ImagePlus
from ij import IJ
from ij.measure import Measurements
from ij.measure import ResultsTable
from ij.plugin.frame import RoiManager
table = ResultsTable()
x_axis_calibration = 0.160 # um
y_axis_calibration = 5 # sec
save_roi = True
kymo_extension = ".tif"
parent_folder = File(dataset.getSource()).getParent()
kymo_folder = os.path.join(parent_folder, "Kymographs-" + os.path.splitext(dataset.getName())[0])
rm = RoiManager(True)
for dataset in datasetService.getDatasets():
if dataset.getName().startswith("Kymograph_"):
print(dataset.getName())
_, roi_id, roi_name = dataset.getName().split("_")
roi_name = roi_name.split(".")[0]
#IJ.selectWindow(dataset.getName().split(".")[0])
IJ.selectWindow(dataset.getName())
imp = IJ.getImage()
roi = imp.getRoi()
if roi is None:
continue
width = roi.getBounds().width
height = roi.getBounds().height
slope = 1.0 * width / height
width_cal = width * x_axis_calibration
height_cal = height * y_axis_calibration
slope_cal = 1.0 * width_cal / height_cal
table.incrementCounter()
table.addValue("Kymograph Name", dataset.getName())
table.addValue("Speeds (um/min)", slope_cal * 60)
table.addValue("Width", width)
table.addValue("Height", height)
table.addValue("Slope", slope)
table.addValue("Width Calibrated (um)", width_cal)
table.addValue("Height Calibrated (sec)", height_cal)
table.addValue("Slope Calibrated (um/sec)", slope_cal)
if save_roi:
rm.reset()
rm.addRoi(roi)
roi_path = os.path.join(parent_folder, dataset.getName().replace(kymo_extension, ".zip"))
rm.runCommand("Save", roi_path)
rm.reset()
table.show("Kymographs Statistics")
table.save(os.path.join(parent_folder, "Statistics.csv"))
|
{
"content_hash": "e73155f741a1a90f02fdce3887742bad",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 97,
"avg_line_length": 25.52857142857143,
"alnum_prop": 0.7011751538891998,
"repo_name": "hadim/fiji_tools",
"id": "6e8e53bc97eff016d6022ca93524d423ffd70bba",
"size": "1981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/main/resources/script_templates/Hadim_Scripts/Kymograph_Analysis/Multi_Kymograph_Slope_Speed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33299"
},
{
"name": "Shell",
"bytes": "434"
}
],
"symlink_target": ""
}
|
"""
worker.py
~~~~~~~~~
EnergyPlus worker. This contains the basic code required to watch a folder for
incoming jobs, then run them and place the results ready to be fetched by the
client.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import logging
from multiprocessing import cpu_count
import multiprocessing
import os
import time
from zipfile import BadZipfile
import zipfile
from worker.runner import run as eplus_run
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
JOBS_DIR = os.path.join(THIS_DIR, 'jobs')
RESULTS_DIR = os.path.join(THIS_DIR, 'results')
LOG_PATH = os.path.join(THIS_DIR, 'worker.log')
logging.basicConfig(
level=logging.DEBUG,
filename=LOG_PATH,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filemode='a')
logging.info("Starting worker")
def get_jobs():
"""Pick up the jobs currently in the jobs directory.
Returns
-------
list
List of absolute paths to each zipped job.
"""
jobs = [os.path.join(JOBS_DIR, job)
for job in os.listdir(JOBS_DIR)
if job != '.gitignore']
return jobs
def unzip_dir(src, dest=None, rm=False, retry_time=5):
"""Unzip a zipped file.
This is used for the incoming jobs.
Parameters
----------
src : str
Path to the zip archive.
dest : str, optional {default: None}
The destination folder.
rm : bool, optional {default: False}
Flag indicating whether to delete the archive once unzipped.
retry_time : int, {default: 5}
Seconds to wait if unzipping fails on first attempt.
"""
with zipfile.ZipFile(src, 'r') as zf:
try:
zf.extractall(dest)
except BadZipfile:
time.sleep(retry_time) # allow partially-uploaded jobs to complete
zf.extractall(dest)
if rm:
os.remove(src)
def ensure_dir(dir_):
"""Ensure a directory exists.
"""
try:
os.mkdir(dir_)
except OSError:
assert os.path.isdir(dir_)
def run_job(job, rm=True):
"""Run an EnergyPlus job.
"""
run_dir = os.path.join(
RESULTS_DIR, os.path.basename(job).replace('.zip', ''))
ensure_dir(run_dir)
unzip_dir(job, run_dir, rm=rm, retry_time=5)
try:
idf = glob.glob(os.path.join(run_dir, '*.idf'))[0]
epw = glob.glob(os.path.join(run_dir, '*.epw'))[0]
output_dir = os.path.join(
RESULTS_DIR, os.path.basename(run_dir))
eplus_run(idf, epw,
output_directory=output_dir,
expandobjects=True,
verbose='v')
except Exception as e:
logging.error("Error: %s" % e)
raise
def running_jobs():
"""Get a count of currently running jobs.
Returns
-------
int
"""
dirs = [os.path.join(RESULTS_DIR, dir_)
for dir_ in os.listdir(RESULTS_DIR)
if os.path.isdir(os.path.join(RESULTS_DIR, dir_))]
active_jobs = len(dirs)
for f in dirs:
if 'eplusout.end\n' in os.listdir(os.path.join(RESULTS_DIR, f)):
active_jobs -= 1
return active_jobs
def main():
num_cpus = cpu_count()
logging.info('This system has %i CPUs available' % num_cpus)
while True:
jobs = get_jobs()
if jobs:
logging.info('found %i jobs' % len(jobs))
for job in jobs:
p = multiprocessing.Process(target=run_job, args=(job,))
p.start()
time.sleep(5)
if running_jobs() <= num_cpus:
open(os.path.join(
THIS_DIR, os.pardir, 'ready.txt'), 'a').close()
time.sleep(5)
if __name__ == "__main__":
try:
main()
except Exception as e:
logging.error(e, exc_info=True)
raise
|
{
"content_hash": "d1c66ee6ef85dccf08faf13309626aef",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 79,
"avg_line_length": 25.75657894736842,
"alnum_prop": 0.5923371647509579,
"repo_name": "jamiebull1/eplus_worker",
"id": "74f17ebf496542d089619912027713f585ec6bb1",
"size": "3961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worker/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13389"
}
],
"symlink_target": ""
}
|
"""
Application to convert MAF file to AXT file, projecting to any two species.
Reads a MAF file from standard input and writes an AXT file to standard out;
some statistics are written to standard error. The user must specify the
two species of interest.
usage: %prog primary_species secondary_species < maf_file > axt_file
"""
__author__ = "Bob Harris (rsharris@bx.psu.edu)"
import sys
import copy
import bx.align.maf
import bx.align.axt
def usage(s=None):
message = """
maf_to_axt primary_species secondary_species < maf_file > axt_file
"""
if (s == None): sys.exit (message)
else: sys.exit ("%s\n%s" % (s,message))
def main():
# parse the command line
primary = None
secondary = None
args = sys.argv[1:]
while (len(args) > 0):
arg = args.pop(0)
val = None
fields = arg.split("=",1)
if (len(fields) == 2):
arg = fields[0]
val = fields[1]
if (val == ""):
usage("missing a value in %s=" % arg)
if (primary == None) and (val == None):
primary = arg
elif (secondary == None) and (val == None):
secondary = arg
else:
usage("unknown argument: %s" % arg)
if (primary == None):
usage("missing primary species")
if (secondary == None):
usage("missing secondary species")
# read the alignments and other info
out = bx.align.axt.Writer(sys.stdout)
axtsRead = 0
mafsWritten = 0
for mafBlock in bx.align.maf.Reader(sys.stdin):
axtsRead += 1
p = mafBlock.get_component_by_src_start(primary)
if (p == None): continue
s = mafBlock.get_component_by_src_start(secondary)
if (s == None): continue
axtBlock = bx.align.Alignment (mafBlock.score, mafBlock.attributes)
axtBlock.add_component (clone_component(p))
axtBlock.add_component (clone_component(s))
remove_mutual_gaps (axtBlock)
if (axtBlock.text_size == 0):
continue
out.write (axtBlock)
mafsWritten += 1
sys.stderr.write ("%d blocks read, %d written\n" % (axtsRead,mafsWritten))
def clone_component(c):
return bx.align.Component (c.src, c.start, c.size, c.strand, c.src_size, \
copy.copy(c.text))
def remove_mutual_gaps (block):
if (len(block.components) == 0): return
nonGaps = []
for c in block.components:
for ix in range(0,block.text_size):
if (ix not in nonGaps) and (c.text[ix] != "-"):
nonGaps.append(ix)
nonGaps.sort()
for c in block.components:
c.text = "".join([c.text[ix] for ix in nonGaps])
block.text_size = len(nonGaps)
if __name__ == "__main__": main()
|
{
"content_hash": "cd3c685f8d917a2e0115e6e9d83da423",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 22.807339449541285,
"alnum_prop": 0.6516492357200322,
"repo_name": "bxlab/HiFive_Paper",
"id": "3c5aa3d6c5d38e907a10be16b7f0be6b94e10ec9",
"size": "2508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Scripts/HiCLib/bx-python-0.7.1/scripts/maf_to_axt.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5096"
},
{
"name": "C",
"bytes": "107381"
},
{
"name": "C++",
"bytes": "182835"
},
{
"name": "CMake",
"bytes": "3353"
},
{
"name": "Forth",
"bytes": "152"
},
{
"name": "Makefile",
"bytes": "22978"
},
{
"name": "Perl",
"bytes": "25453"
},
{
"name": "Python",
"bytes": "4229513"
},
{
"name": "R",
"bytes": "43022"
},
{
"name": "Shell",
"bytes": "10798"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_gtm_topology
short_description: BIG-IP gtm topology module
description:
- Configures a topology statement.
version_added: "2.4"
author:
- "Eric Jacob (@erjac77)"
options:
app_service:
description:
- Specifies the application service that the object belongs to.
description:
description:
- Specifies a user-defined description.
name:
description:
- Specifies unique name for the component.
required: true
order:
description:
- Specifies the order in which the system processes the topology record.
default: 0
score:
description:
- Specifies the weight of the topology item.
default: 1
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create GTM Topology
f5bigip_gtm_topology:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: 'ldns: country US server: datacenter /Common/DC1'
description: My topology
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import AnsibleF5Error
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
description=dict(type='str'),
order=dict(type='list'),
score=dict(type='int')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
del argument_spec['partition']
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpGtmTopology(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.gtm.topology_s.topology.create,
'read': self._api.tm.gtm.topology_s.topology.load,
'update': self._api.tm.gtm.topology_s.topology.update,
'delete': self._api.tm.gtm.topology_s.topology.delete,
'exists': self._api.tm.gtm.topology_s.topology.exists
}
def _update(self):
raise AnsibleF5Error("%s does not support update" % self.__class__.__name__)
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpGtmTopology(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
{
"content_hash": "82a7f01390b0f441e1c836153c70b060",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 110,
"avg_line_length": 27.99122807017544,
"alnum_prop": 0.630836728298339,
"repo_name": "GabrielFortin/ansible-module-f5bigip",
"id": "b7d0d0a6a4ecb93de832eb53bd798a82ae9f1f80",
"size": "3836",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "library/f5bigip_gtm_topology.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1265283"
}
],
"symlink_target": ""
}
|
'''Django has several of these built-in but they are annoying to use.
:attr:`WEEK_MAP_MID`: This is a map of the days of the week along with the
mid-length string for that value. For example:
.. code-block:: python
WEEK_MAP_MID = {
0: 'Mon',
1: 'Tue',
...
}
:attr:`WEEK_MAP_SHORT`: This is similar except using a longer string.
:attr:`MONTH_MAP`: This is a map of the months which refer to a two-element
tuple which has the short code for the month and the long string for that
month. I.e. 'JAN' and 'January'.
:attr:`MONTH_MAP`: This is a map of the months which refer to a two-element
tuple which has the short code for the month and the long string for that
month. I.e. 'JAN' and 'January'.
:attr:`MONTH_MAP_SHORT`: This is a map of the months which refer to the short
name of a month corresponding to the number. I.e. 1: January.
:attr:`WORKING_CHOICES`: This is a tuple of two-element tuples which contain
the only possible working day possibilites.
:attr:`ABSENT_CHOICES`: This is a tuple of two-element tuples which contain
the only possible absent day possibilities.
:attr:`DAYTYPE_CHOICES`: This is both :attr:`WORKING_CHOICES` and
:attr:`ABSENT_CHOICES` joined together to give all the daytype possibilities.
'''
import datetime
from django.core.cache import cache
WEEK_MAP_MID = {
0: 'Mon',
1: 'Tue',
2: 'Wed',
3: 'Thu',
4: 'Fri',
5: 'Sat',
6: 'Sat'
}
WEEK_MAP_SHORT = {
0: 'Mo',
1: 'Tu',
2: 'We',
3: 'Th',
4: 'Fr',
5: 'Sa',
6: 'Su'
}
MONTH_MAP = {
0: ('JAN', 'January'),
1: ('FEB', 'February'),
2: ('MAR', 'March'),
3: ('APR', 'April'),
4: ('MAY', 'May'),
5: ('JUN', 'June'),
6: ('JUL', 'July'),
7: ('AUG', 'August'),
8: ('SEP', 'September'),
9: ('OCT', 'October'),
10: ('NOV', 'November'),
11: ('DEC', 'December')
}
MONTH_MAP_SHORT = (
(1, 'January'),
(2, 'February'),
(3, 'March'),
(4, 'April'),
(5, 'May'),
(6, 'June'),
(7, 'July'),
(8, 'August'),
(9, 'September'),
(10, 'October'),
(11, 'November'),
(12, 'December')
)
WORKING_CHOICES = (
('WKDAY', 'Work Day'),
('SATUR', 'Work on Saturday'),
('WKHOM', 'Work at home'),
)
ABSENT_CHOICES = (
('PENDI', 'Vacation Request'),
('HOLIS', 'Vacation'),
('SICKD', 'Sickness Absence'),
('PUABS', 'Public Holiday'),
('SPECI', 'Special Leave'),
('RETRN', 'Return for Public Holiday'),
('TRAIN', 'Training'),
('DAYOD', 'Day on demand'),
)
DAYTYPE_CHOICES = (
('LINKD', 'Linked Day'),
('WKDAY', 'Work Day'),
('PENDI', 'Vacation Request'),
('HOLIS', 'Vacation'),
('SICKD', 'Sickness Absence'),
('PUABS', 'Public Holiday'),
('PUWRK', 'Work on Public Holiday'),
('RETRN', 'Return for Public Holiday'),
('SPECI', 'Special Leave'),
('TRAIN', 'Training'),
('DAYOD', 'Day on demand'),
('SATUR', 'Work on Saturday'),
('WKHOM', 'Work at home'),
('ROVER', 'Return for overtime'),
('OTHER', 'Other'),
)
MARKET_CHOICES = (
('AD', 'Administration'),
('BF', 'BPO Factory'),
('BG', 'Behr Germany'),
('BK', 'Behr Kirchberg'),
('CZ', 'Behr Czech'),
('EN', 'MCBC'),
('NE', 'Newton'),
('SA', 'Store Accounting'),
)
MARKET_CHOICES_MAP = {
c[0]: c[1] for c in MARKET_CHOICES
}
MARKET_CHOICES_LIST = [c[0] for c in MARKET_CHOICES]
TEAM_GROUPING = {
"CZ": ["CZ", "BK", "BG"],
"BK": ["CZ", "BK", "BG"],
"BG": ["CZ", "BK", "BG"],
}
PROCESS_CHOICES = (
('AD', 'Administration'),
('AO', 'Accounting Operations'),
('AP', 'Accounts Payable'),
('AR', 'Accounts Receivable'),
('CP', 'C&A PL'),
('CT', 'C&A AT'),
('FA', 'F&A'),
('HL', 'HRO Lodz'),
('HR', 'HRO'),
('HW', 'HRO Wro'),
('SC', 'Scanning'),
('SK', 'C&A CZSK'),
('TE', 'Travel & Expenses'),
)
def group_for_team(team):
fullteam = TEAM_GROUPING.get(team)
if fullteam:
return fullteam
return [team]
def generate_year_box(year, id=''):
'''Generates a select box with years -/+ 2 of the year provided.
:param year: :class:`int` the initial year to start off with.
:param id: :class:`str` the id attribute to give to the element.
'''
year_select_data = [(y, y) for y in range(year, year - 3, -1)]
year_select_data.extend([(y, y) for y in range(year + 1, year + 3)])
year_select_data.sort()
return generate_select(year_select_data, id)
def generate_month_box(id=''):
'''Generates a select box with months.
:param id: :class:`str` the id attribute to give to the element.
'''
return generate_select(MONTH_MAP_SHORT, id)
def generate_employee_box(admin_user, get_all=False):
'''Generates a select box with all subordinates for a manager.
:param admin_user: :class:`timetracker.tracker.models.Tbluser` an instance
of a manager who has subordinates underneath them.
:param get_all: :class:`bool` Used to select or ignore disabled employees.
'''
admin_user = admin_user.get_administrator()
cached_result = cache.get("employee_box:%s%s" % (admin_user.id, get_all))
if cached_result:
return cached_result
ees = admin_user.get_subordinates(get_all=get_all)
ees_tuple = [(user.id, user.name()) for user in ees]
ees_tuple.append(("null", "----------"))
select = generate_select(
ees_tuple,
id="user_select"
)
cache.set("employee_box:%s%s" % (admin_user.id, get_all), select)
return select
def generate_select(data, id=''):
"""Generates a select box from a tuple of tuples
.. code-block:: python
generate_select((
('val1', 'Value One'),
('val2', 'Value Two'),
('val3', 'Value Three')
))
will return:-
.. code-block:: html
<select id=''>
<option value="val1">Value One</option>
<option value="val2">Value Two</option>
<option value="val3">Value Three</option>
</select>
:param data: This is a tuple of tuples (can also be a list of lists. But
tuples will behave more efficiently than lists and who likes
mutation anyway?
:rtype: :class:`str`/HTML
"""
output = []
out = output.append
out('<select id="%s">\n' % id)
for option in data:
out('\t<option value="%s">%s</option>\n' % (option[0], option[1]))
out('</select>')
return ''.join(output)
def pad(string, padchr='0', amount=2):
"""Pads a string
:param string: This is the string you want to pad.
:param padchr: This is the character you want to pad the string with.
:param amount: This is the length of the string you want the input end up.
:rtype: :class:`str`
"""
string = unicode(string)
if len(string) < amount:
pre = padchr * (amount - len(string))
return pre + string
return string
def nearest_half(num):
'''Returns the number rounded down to the nearest half.
:param n: :class:`int` the number to round
:return: :class:`float`'''
return int(round(num / 0.5)) * 0.5
def round_down(num, by_=0.5):
'''Rounds a number down to the half below'''
if num < 0:
return (num // -by_) * -by_
return (num // by_) * by_
def hr_calculation(user, tracking_days, return_days):
'''This is the calculation that the HR team use to make overtime
calculations since the OT calculation cannot take 0.25hr increments
into account we ignore all those which have 0.25hr -/+ and sum the
remaining entries together.'''
total_hours = running_total = 0
for entry in tracking_days:
if entry.is_linked():
continue
running_total += round_down(entry.total_working_time())
total_hours += user.shiftlength_as_float()
for entry in return_days:
running_total -= user.shiftlength_as_float()
return running_total - total_hours
def float_to_time(timefloat):
"""Takes a float and returns the same representation of time.
:param timefloat: This is a :class:`float` which needs to be represented
as a timestring.
:rtype: :class:`str` such as '00:12' or '09:15'
"""
prefix = ''
if timefloat < 0:
timefloat = 0 - timefloat
prefix = '-'
seconds = timefloat * 3600
time = prefix + str(datetime.timedelta(seconds=seconds))
return pad(time[:-3], amount=5)
def datetime_to_timestring(dt_):
"""
Returns a pretty formatting string from a datetime object.
For example,
>>>datetime.time(hour=9, minute=10, second=30)
..."09:10:30"
:param dt_: :class:`datetime.datetime` or :class:`datetime.time`
:returns: :class:`str`
"""
return pad(dt_.hour)+':'+pad(dt_.minute)+':'+pad(dt_.second)
def gen_process_list():
'''
Generates the regex for the process list
'''
out = ''
for i, process in enumerate(PROCESS_CHOICES):
out += process[0]
if i + 1 != len(PROCESS_CHOICES):
out += '|'
return out
def gen_team_list():
'''
Generates the regex for the market list
'''
out = ''
for i, process in enumerate(MARKET_CHOICES):
out += process[0]
if i + 1 != len(MARKET_CHOICES):
out += '|'
return out
|
{
"content_hash": "b034b94729008e164355f426af5373e4",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 78,
"avg_line_length": 27.122093023255815,
"alnum_prop": 0.5810289389067524,
"repo_name": "AeroNotix/django-timetracker",
"id": "27eb984592238ff88ca70e89e1db10c9af49b14b",
"size": "9330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/datemaps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "60207"
},
{
"name": "JavaScript",
"bytes": "515697"
},
{
"name": "Python",
"bytes": "378129"
},
{
"name": "Shell",
"bytes": "5115"
}
],
"symlink_target": ""
}
|
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class UpcomingAutopayEvent(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'client_contract_id': 'int',
'charge_amount': 'float',
'payment_method': 'str',
'schedule_date': 'datetime'
}
attribute_map = {
'client_contract_id': 'ClientContractId',
'charge_amount': 'ChargeAmount',
'payment_method': 'PaymentMethod',
'schedule_date': 'ScheduleDate'
}
def __init__(self, client_contract_id=None, charge_amount=None, payment_method=None, schedule_date=None): # noqa: E501
"""UpcomingAutopayEvent - a model defined in Swagger""" # noqa: E501
self._client_contract_id = None
self._charge_amount = None
self._payment_method = None
self._schedule_date = None
self.discriminator = None
if client_contract_id is not None:
self.client_contract_id = client_contract_id
if charge_amount is not None:
self.charge_amount = charge_amount
if payment_method is not None:
self.payment_method = payment_method
if schedule_date is not None:
self.schedule_date = schedule_date
@property
def client_contract_id(self):
"""Gets the client_contract_id of this UpcomingAutopayEvent. # noqa: E501
The ID of the contract. # noqa: E501
:return: The client_contract_id of this UpcomingAutopayEvent. # noqa: E501
:rtype: int
"""
return self._client_contract_id
@client_contract_id.setter
def client_contract_id(self, client_contract_id):
"""Sets the client_contract_id of this UpcomingAutopayEvent.
The ID of the contract. # noqa: E501
:param client_contract_id: The client_contract_id of this UpcomingAutopayEvent. # noqa: E501
:type: int
"""
self._client_contract_id = client_contract_id
@property
def charge_amount(self):
"""Gets the charge_amount of this UpcomingAutopayEvent. # noqa: E501
The amount charged. # noqa: E501
:return: The charge_amount of this UpcomingAutopayEvent. # noqa: E501
:rtype: float
"""
return self._charge_amount
@charge_amount.setter
def charge_amount(self, charge_amount):
"""Sets the charge_amount of this UpcomingAutopayEvent.
The amount charged. # noqa: E501
:param charge_amount: The charge_amount of this UpcomingAutopayEvent. # noqa: E501
:type: float
"""
self._charge_amount = charge_amount
@property
def payment_method(self):
"""Gets the payment_method of this UpcomingAutopayEvent. # noqa: E501
The payment method. # noqa: E501
:return: The payment_method of this UpcomingAutopayEvent. # noqa: E501
:rtype: str
"""
return self._payment_method
@payment_method.setter
def payment_method(self, payment_method):
"""Sets the payment_method of this UpcomingAutopayEvent.
The payment method. # noqa: E501
:param payment_method: The payment_method of this UpcomingAutopayEvent. # noqa: E501
:type: str
"""
allowed_values = ["Other", "CreditCard", "DebitAccount", "ACH"] # noqa: E501
if payment_method not in allowed_values:
raise ValueError(
"Invalid value for `payment_method` ({0}), must be one of {1}" # noqa: E501
.format(payment_method, allowed_values)
)
self._payment_method = payment_method
@property
def schedule_date(self):
"""Gets the schedule_date of this UpcomingAutopayEvent. # noqa: E501
The date and time of the next payment. # noqa: E501
:return: The schedule_date of this UpcomingAutopayEvent. # noqa: E501
:rtype: datetime
"""
return self._schedule_date
@schedule_date.setter
def schedule_date(self, schedule_date):
"""Sets the schedule_date of this UpcomingAutopayEvent.
The date and time of the next payment. # noqa: E501
:param schedule_date: The schedule_date of this UpcomingAutopayEvent. # noqa: E501
:type: datetime
"""
self._schedule_date = schedule_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UpcomingAutopayEvent, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpcomingAutopayEvent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "e34039cc5a2ca2ee271b2691e0ffb97f",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 123,
"avg_line_length": 31.541463414634148,
"alnum_prop": 0.588617383235385,
"repo_name": "mindbody/API-Examples",
"id": "31962d5bcfdf7794cd7386615abdc6d0a7c21f1c",
"size": "6483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SDKs/Python/swagger_client/models/upcoming_autopay_event.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PHP",
"bytes": "3610259"
},
{
"name": "Python",
"bytes": "2338642"
},
{
"name": "Ruby",
"bytes": "2284441"
},
{
"name": "Shell",
"bytes": "5058"
}
],
"symlink_target": ""
}
|
"""Gradient tape utilites."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.lazy_loader import LazyLoader
# There is a circular dependency between this, ops.py, and
# distribution_strategy_context.
# TODO(b/117329403): Remove this circular dependency.
distribution_strategy_context = LazyLoader(
"distribution_strategy_context", globals(),
"tensorflow.python.distribute."
"distribution_strategy_context")
class Tape(object):
"""Represents a gradient propagation trace."""
def __init__(self, tape):
self._tape = tape
def watched_variables(self):
return pywrap_tensorflow.TFE_Py_TapeWatchedVariables(self._tape)
def push_new_tape(persistent=False, watch_accessed_variables=True):
"""Pushes a new tape onto the tape stack."""
tape = pywrap_tensorflow.TFE_Py_TapeSetNew(persistent,
watch_accessed_variables)
return Tape(tape)
def push_tape(tape):
"""Pushes an existing tape onto the tape stack."""
pywrap_tensorflow.TFE_Py_TapeSetAdd(tape._tape) # pylint: disable=protected-access
def watch(tape, tensor):
"""Marks this tensor to be watched by the given tape."""
pywrap_tensorflow.TFE_Py_TapeWatch(tape._tape, tensor) # pylint: disable=protected-access
def watch_variable(tape, variable):
"""Marks this variable to be watched by the given tape."""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.experimental_local_results(variable)
for var in variables:
pywrap_tensorflow.TFE_Py_TapeWatchVariable(tape._tape, var) # pylint: disable=protected-access
def variable_accessed(variable):
"""Notifies all tapes in the stack that a variable has been accessed.
Args:
variable: variable to be watched.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.experimental_local_results(variable)
for var in variables:
pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var)
def variables_accessed(variables):
"""Notifies all tapes in the stack that variables have been accessed.
Only trainable variables are marked as accessed.
Args:
variables: iterable of variables to mark as accessed.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
accessed = []
if context:
accessed = [strategy.extended.value_container(variable)
for variable in variables if variable.trainable]
else:
for variable in variables:
if variable.trainable:
accessed.extend(strategy.experimental_local_results(variable))
for var in accessed:
pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var)
def pop_tape(tape):
"""Pops the given tape in the stack."""
pywrap_tensorflow.TFE_Py_TapeSetRemove(tape._tape) # pylint: disable=protected-access
@contextlib.contextmanager
def stop_recording():
is_stopped = pywrap_tensorflow.TFE_Py_TapeSetIsStopped()
try:
if not is_stopped:
pywrap_tensorflow.TFE_Py_TapeSetStopOnThread()
yield
finally:
if not is_stopped:
pywrap_tensorflow.TFE_Py_TapeSetRestartOnThread()
def should_record(tensors):
"""Returns true if any tape in the stack watches any of these tensors."""
return pywrap_tensorflow.TFE_Py_TapeSetShouldRecord(tensors)
def record_operation(op_type, output_tensors, input_tensors, backward_function):
"""Records the operation on all tapes in the stack."""
pywrap_tensorflow.TFE_Py_TapeSetRecordOperation(
op_type, output_tensors, input_tensors, backward_function)
def delete_trace(tensor_id):
"""Deletes traces for this Tensor from all tapes in the stack."""
pywrap_tensorflow.TFE_Py_TapeSetDeleteTrace(tensor_id)
def could_possibly_record():
"""Returns True if any tape is active."""
return not pywrap_tensorflow.TFE_Py_TapeSetIsEmpty()
|
{
"content_hash": "d26e2c00521dee9f0e292581a0938e99",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 99,
"avg_line_length": 31.440298507462686,
"alnum_prop": 0.7298836933301686,
"repo_name": "alsrgv/tensorflow",
"id": "66a58af371bfc10490e3dfe45d1b68dc54f9fae3",
"size": "4902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/tape.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "755360"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "68001148"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1627121"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "842866"
},
{
"name": "Jupyter Notebook",
"bytes": "1665584"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101157"
},
{
"name": "Objective-C",
"bytes": "104061"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17570"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48843099"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "488241"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
from globals import WORLD_INFO, ITEMS, LIFE
import alife
def is_life(entity):
_life = (not 'prefix' in entity)
if _life:
_id_key = 'id'
else:
_id_key = 'uid'
return _life, _id_key
def create(entity, action, time):
_life, _id_key = is_life(entity)
_new_timer = {'action': action,
'time': WORLD_INFO['ticks']+time,
'owner': entity[_id_key],
'life': _life}
_i = 0
for timer in WORLD_INFO['timers']:
if _new_timer['time'] > time['time']:
WORLD_INFO['timers'].insert(_i, _new_timer)
return True
_i += 1
WORLD_INFO['timers'].append(_new_timer)
def remove_by_owner(entity):
_life, _id_key = is_life(entity)
_remove = []
for timer in WORLD_INFO['timers']:
if timer['owner'] in LIFE:
if LIFE[timer['owner']] == entity:
_remove.append(timer)
else:
if ITEMS[timer['owner']] == entity:
_remove.append(timer)
while _remove:
WORLD_INFO['timers'].remove(_remove.pop())
def tick():
if not WORLD_INFO['timers']:
return False
while WORLD_INFO['ticks'] == WORLD_INFO['timers'][0]['time']:
_event = WORLD_INFO['timers'][0]
if _event['life']:
_owner = LIFE[_event['owner']]
else:
_owner = ITEMS[_event['owner']]
alife.action.execute_small_script(_owner, _event['action'])
if _event in WORLD_INFO['timers']:
WORLD_INFO['timers'].remove(_event)
if not WORLD_INFO['timers']:
break
|
{
"content_hash": "e6b309fa2cd2f74683650f2228aeed53",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 62,
"avg_line_length": 21.227272727272727,
"alnum_prop": 0.6009992862241256,
"repo_name": "flags/Reactor-3",
"id": "4617037491cd48d15ae2b7523a0cb1adf8d60cb5",
"size": "1401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "415"
},
{
"name": "Python",
"bytes": "1042784"
}
],
"symlink_target": ""
}
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(), (3, 2)],
}))
class Log1pFunctionTest(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
self.ggx = \
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = F.log1p(x)
testing.assert_allclose(
numpy.log1p(self.x), y.data, atol=1e-7, rtol=1e-7)
def test_log1p_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_log1p_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(F.log1p, x_data, y_grad, dtype='d')
def test_log1p_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_log1p_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
gradient_check.check_double_backward(
F.log1p, x_data, y_grad, x_grad_grad, dtype=numpy.float64)
def test_log1p_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_log1p_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
def test_log1p(self):
self.assertEqual(F.Log1p().label, 'log1p')
testing.run_module(__name__, __file__)
|
{
"content_hash": "73178818e107b91519107e3b4ac471ea",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 79,
"avg_line_length": 29.671875,
"alnum_prop": 0.6456029489204844,
"repo_name": "aonotas/chainer",
"id": "2a3a69eb70b04000c5a1473f181c92a13b854cf9",
"size": "1899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/math_tests/test_logarithm_1p.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3357320"
}
],
"symlink_target": ""
}
|
from RPi import GPIO
from time import sleep
GPIO.setmode(GPIO.BOARD)
PINS = (22, 24, 26)
latest_states = {}
for p in PINS:
GPIO.setup(p, GPIO.IN)
latest_states[p] = GPIO.input(p)
try:
while True:
for p in PINS:
state = GPIO.input(p)
if state and state != latest_states[p]:
print "%s on" % p
latest_states[p] = state
sleep(0.5)
except KeyboardInterrupt:
pass
|
{
"content_hash": "3aff91a603d28dc9e48743ba1ed89014",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 51,
"avg_line_length": 20.272727272727273,
"alnum_prop": 0.5672645739910314,
"repo_name": "madebymany/isthetoiletfree",
"id": "b892ccf64c1b8f6f7e5d5d1bb6a3fa7435af1844",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hardware_tests/switches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14289"
}
],
"symlink_target": ""
}
|
"""Print standard input or files, omitting repeated lines"""
import os
import sys
import fileinput
import argparse
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('files', nargs='*', help='files to unique (must be sorted first)')
ns = ap.parse_args(args)
def _print(lines):
if lines is not None:
print ''.join(lines)
fileinput.close() # in case it is not closed
try:
prev_line = None
lines = None
for line in fileinput.input(ns.files):
if fileinput.isfirstline():
_print(lines)
lines = []
prev_line = None
if prev_line is None or line != prev_line:
lines.append(line)
prev_line = line
_print(lines)
finally:
fileinput.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
{
"content_hash": "cae3b67bd60f3a9056a7e7aa6b9426ce",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 86,
"avg_line_length": 23.972972972972972,
"alnum_prop": 0.552423900789177,
"repo_name": "jsbain/stash",
"id": "4090f0f2a48e7d555c763100a0f290f86f039916",
"size": "887",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/uniq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "402187"
},
{
"name": "Shell",
"bytes": "1648"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('privacy_state', models.CharField(max_length=20)),
('creation_date', models.DateTimeField()),
('last_update', models.DateTimeField()),
('is_crypted', models.BooleanField(default=False)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
('parent', models.ForeignKey(blank=True, to='note.Tag', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='note',
name='tags',
field=models.ManyToManyField(to='note.Tag', null=True),
preserve_default=True,
),
]
|
{
"content_hash": "2386fc03997afea69eaa2de21fe5fd33",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 114,
"avg_line_length": 35.3125,
"alnum_prop": 0.5327433628318584,
"repo_name": "MaximeRaynal/SimpleNote",
"id": "14ab5e9e063682addc7cbdf81bbb79ab99379a3d",
"size": "1719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/note/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2260"
},
{
"name": "HTML",
"bytes": "5361"
},
{
"name": "JavaScript",
"bytes": "3952"
},
{
"name": "Python",
"bytes": "21335"
},
{
"name": "Shell",
"bytes": "926"
}
],
"symlink_target": ""
}
|
"""
An annotation library that draws overlays on the Raspberry Pi's camera preview.
Annotations include bounding boxes, text overlays, and points.
Annotations support partial opacity, however only with respect to the content in
the preview. A transparent fill value will cover up previously drawn overlay
under it, but not the camera content under it. A color of None can be given,
which will then not cover up overlay content drawn under the region.
Note: Overlays do not persist through to the storage layer so images saved from
the camera, will not contain overlays.
"""
import time
from PIL import Image, ImageDraw
import picamera
def _round_to_bit(value, power):
"""Rounds the given value to the next multiple of 2^power.
Args:
value: int to be rounded.
power: power of two which the value should be rounded up to.
Returns:
the result of value rounded to the next multiple 2^power.
"""
return (((value - 1) >> power) + 1) << power
def _round_buffer_dims(dims):
"""Appropriately rounds the given dimensions for image overlaying.
The overlay buffer must be rounded the next multiple of 32 for the hight, and
the next multiple of 16 for the width."""
return (_round_to_bit(dims[0], 5), _round_to_bit(dims[1], 4))
# TODO(namiller): Add an annotator for images.
class Annotator:
"""Utility for managing annotations on the camera preview.
Args:
camera: picamera.PiCamera camera object to overlay on top of.
bg_color: PIL.ImageColor (with alpha) for the background of the overlays.
default_color: PIL.ImageColor (with alpha) default for the drawn content.
"""
def __init__(self, camera, bg_color=None, default_color=None,
dimensions=None):
self._dims = dimensions if dimensions else camera.resolution
self._buffer_dims = _round_buffer_dims(self._dims)
self._buffer = Image.new('RGBA', self._buffer_dims)
self._overlay = camera.add_overlay(
self._buffer.tobytes(), format='rgba', layer=3, size=self._buffer_dims)
self._draw = ImageDraw.Draw(self._buffer)
self._bg_color = bg_color if bg_color else (0, 0, 0, 0xA0)
self._default_color = default_color if default_color else (0xFF, 0, 0, 0xFF)
# MMALPort has a bug in enable.wrapper, where it always calls
# self._pool.send_buffer(block=False) regardless of the port direction.
# This is in contrast to setup time when it only calls
# self._pool.send_all_buffers(block=False)
# if self._port[0].type == mmal.MMAL_PORT_TYPE_OUTPUT.
# Because of this bug updating an overlay once will log a MMAL_EAGAIN
# error every update. This is safe to ignore as we the user is driving
# the renderer input port with calls to update() that dequeue buffers
# and sends them to the input port (so queue is empty on when
# send_all_buffers(block=False) is called from wrapper).
# As a workaround, monkey patch MMALPortPool.send_buffer and
# silence the "error" if thrown by our overlay instance.
original_send_buffer = picamera.mmalobj.MMALPortPool.send_buffer
def silent_send_buffer(zelf, **kwargs):
try:
original_send_buffer(zelf, **kwargs)
except picamera.exc.PiCameraMMALError as error:
# Only silence MMAL_EAGAIN for our target instance.
our_target = self._overlay.renderer.inputs[0].pool == zelf
if not our_target or error.status != 14:
raise error
picamera.mmalobj.MMALPortPool.send_buffer = silent_send_buffer
def update(self):
"""Updates the contents of the overlay."""
self._overlay.update(self._buffer.tobytes())
def stop(self):
"""Removes the overlay from the screen."""
self._draw.rectangle((0, 0) + self._dims, fill=0)
self.update()
def clear(self):
"""Clears the contents of the overlay - leaving only the plain background.
"""
self._draw.rectangle((0, 0) + self._dims, fill=self._bg_color)
def bounding_box(self, rect, outline=None, fill=None):
"""Draws a bounding box around the specified rectangle.
Args:
rect: (x1, y1, x2, y2) rectangle to be drawn - where (x1,y1) and (x2, y2)
are opposite corners of the desired rectangle.
outline: PIL.ImageColor with which to draw the outline (defaults to the
configured default_color).
fill: PIL.ImageColor with which to fill the rectangel (defaults to None
which will not cover up drawings under the region.
"""
outline = self._default_color if outline is None else outline
self._draw.rectangle(rect, fill=fill, outline=outline)
# TODO(namiller): Add a font size parameter and load a truetype font.
def text(self, location, text, color=None):
"""Draws the given text at the given location.
Args:
location: (x,y) point at which to draw the text (upper left corner).
text: string to be drawn.
color: PIL.ImageColor to draw the string in (defaults to default_color).
"""
color = self._default_color if color is None else color
self._draw.text(location, text, fill=color)
def point(self, location, radius=1, color=None):
"""Draws a point of the given size at the given location.
Args:
location: (x,y) center of the point to be drawn.
radius: the radius of the point to be drawn.
color: The color to draw the point in (defaults to default_color).
"""
color = self._default_color if color is None else color
self._draw.ellipse(
(location[0] - radius, location[1] - radius, location[0] + radius,
location[1] + radius),
fill=color)
def _main():
"""Example usage of the annotator utility.
Demonstrates setting up a camera preview, drawing slowly moving/intersecting
animations over it, and clearing the overlays."""
with picamera.PiCamera() as camera:
# Resolution can be arbitrary.
camera.resolution = (351, 561)
camera.start_preview()
annotator = Annotator(camera)
for i in range(10):
annotator.clear()
annotator.bounding_box(
(20, 20, 70, 70), outline=(0, 0xFF, 0, 0xFF), fill=0)
annotator.bounding_box((10 * i, 10, 10 * i + 50, 60))
annotator.bounding_box(
(80, 0, 130, 50), outline=(0, 0, 0xFF, 0xFF), fill=0)
annotator.text((100, 100), 'Hello World')
annotator.point((10, 100), radius=5)
annotator.update()
time.sleep(1)
annotator.stop()
time.sleep(10)
if __name__ == '__main__':
_main()
|
{
"content_hash": "bdd9af88834b30baf804f2c8e849ec66",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 84,
"avg_line_length": 41.191616766467064,
"alnum_prop": 0.6373019334205553,
"repo_name": "google/aiyprojects-raspbian",
"id": "efb4ab8f470f77033970385ea427b743bec13b0f",
"size": "7478",
"binary": false,
"copies": "1",
"ref": "refs/heads/aiyprojects",
"path": "src/aiy/vision/annotator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "333768"
},
{
"name": "HTML",
"bytes": "468"
},
{
"name": "JavaScript",
"bytes": "31073"
},
{
"name": "Jupyter Notebook",
"bytes": "19786"
},
{
"name": "Makefile",
"bytes": "10938"
},
{
"name": "Python",
"bytes": "400565"
},
{
"name": "Shell",
"bytes": "7540"
}
],
"symlink_target": ""
}
|
"""SCons.Util
Various utility functions go here.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Util.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import sys
import copy
import re
import types
from collections import UserDict, UserList, UserString
# Don't "from types import ..." these because we need to get at the
# types module later to look for UnicodeType.
InstanceType = types.InstanceType
MethodType = types.MethodType
FunctionType = types.FunctionType
try: unicode
except NameError: UnicodeType = None
else: UnicodeType = unicode
def dictify(keys, values, result={}):
for k, v in zip(keys, values):
result[k] = v
return result
_altsep = os.altsep
if _altsep is None and sys.platform == 'win32':
# My ActivePython 2.0.1 doesn't set os.altsep! What gives?
_altsep = '/'
if _altsep:
def rightmost_separator(path, sep):
return max(path.rfind(sep), path.rfind(_altsep))
else:
def rightmost_separator(path, sep):
return path.rfind(sep)
# First two from the Python Cookbook, just for completeness.
# (Yeah, yeah, YAGNI...)
def containsAny(str, set):
"""Check whether sequence str contains ANY of the items in set."""
for c in set:
if c in str: return 1
return 0
def containsAll(str, set):
"""Check whether sequence str contains ALL of the items in set."""
for c in set:
if c not in str: return 0
return 1
def containsOnly(str, set):
"""Check whether sequence str contains ONLY items in set."""
for c in str:
if c not in set: return 0
return 1
def splitext(path):
"Same as os.path.splitext() but faster."
sep = rightmost_separator(path, os.sep)
dot = path.rfind('.')
# An ext is only real if it has at least one non-digit char
if dot > sep and not containsOnly(path[dot:], "0123456789."):
return path[:dot],path[dot:]
else:
return path,""
def updrive(path):
"""
Make the drive letter (if any) upper case.
This is useful because Windows is inconsistent on the case
of the drive letter, which can cause inconsistencies when
calculating command signatures.
"""
drive, rest = os.path.splitdrive(path)
if drive:
path = drive.upper() + rest
return path
class NodeList(UserList):
"""This class is almost exactly like a regular list of Nodes
(actually it can hold any object), with one important difference.
If you try to get an attribute from this list, it will return that
attribute from every item in the list. For example:
>>> someList = NodeList([ ' foo ', ' bar ' ])
>>> someList.strip()
[ 'foo', 'bar' ]
"""
def __nonzero__(self):
return len(self.data) != 0
def __str__(self):
return ' '.join(map(str, self.data))
def __iter__(self):
return iter(self.data)
def __call__(self, *args, **kwargs):
result = [x(*args, **kwargs) for x in self.data]
return self.__class__(result)
def __getattr__(self, name):
result = [getattr(x, name) for x in self.data]
return self.__class__(result)
_get_env_var = re.compile(r'^\$([_a-zA-Z]\w*|{[_a-zA-Z]\w*})$')
def get_environment_var(varstr):
"""Given a string, first determine if it looks like a reference
to a single environment variable, like "$FOO" or "${FOO}".
If so, return that variable with no decorations ("FOO").
If not, return None."""
mo=_get_env_var.match(to_String(varstr))
if mo:
var = mo.group(1)
if var[0] == '{':
return var[1:-1]
else:
return var
else:
return None
class DisplayEngine(object):
print_it = True
def __call__(self, text, append_newline=1):
if not self.print_it:
return
if append_newline: text = text + '\n'
try:
sys.stdout.write(unicode(text))
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def set_mode(self, mode):
self.print_it = mode
def render_tree(root, child_func, prune=0, margin=[0], visited=None):
"""
Render a tree of nodes into an ASCII tree view.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
# Initialize 'visited' dict, if required
if visited is None:
visited = {}
children = child_func(root)
retval = ""
for pipe in margin[:-1]:
if pipe:
retval = retval + "| "
else:
retval = retval + " "
if rname in visited:
return retval + "+-[" + rname + "]\n"
retval = retval + "+-" + rname + "\n"
if not prune:
visited = copy.copy(visited)
visited[rname] = 1
for i in range(len(children)):
margin.append(i<len(children)-1)
retval = retval + render_tree(children[i], child_func, prune, margin, visited
)
margin.pop()
return retval
IDX = lambda N: N and 1 or 0
def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited=None):
"""
Print a tree of nodes. This is like render_tree, except it prints
lines directly instead of creating a string representation in memory,
so that huge trees can be printed.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
showtags - print status information to the left of each node line
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
# Initialize 'visited' dict, if required
if visited is None:
visited = {}
if showtags:
if showtags == 2:
legend = (' E = exists\n' +
' R = exists in repository only\n' +
' b = implicit builder\n' +
' B = explicit builder\n' +
' S = side effect\n' +
' P = precious\n' +
' A = always build\n' +
' C = current\n' +
' N = no clean\n' +
' H = no cache\n' +
'\n')
sys.stdout.write(unicode(legend))
tags = ['[']
tags.append(' E'[IDX(root.exists())])
tags.append(' R'[IDX(root.rexists() and not root.exists())])
tags.append(' BbB'[[0,1][IDX(root.has_explicit_builder())] +
[0,2][IDX(root.has_builder())]])
tags.append(' S'[IDX(root.side_effect)])
tags.append(' P'[IDX(root.precious)])
tags.append(' A'[IDX(root.always_build)])
tags.append(' C'[IDX(root.is_up_to_date())])
tags.append(' N'[IDX(root.noclean)])
tags.append(' H'[IDX(root.nocache)])
tags.append(']')
else:
tags = []
def MMM(m):
return [" ","| "][m]
margins = list(map(MMM, margin[:-1]))
children = child_func(root)
if prune and rname in visited and children:
sys.stdout.write(''.join(tags + margins + ['+-[', rname, ']']) + u'\n')
return
sys.stdout.write(''.join(tags + margins + ['+-', rname]) + u'\n')
visited[rname] = 1
if children:
margin.append(1)
idx = IDX(showtags)
for C in children[:-1]:
print_tree(C, child_func, prune, idx, margin, visited)
margin[-1] = 0
print_tree(children[-1], child_func, prune, idx, margin, visited)
margin.pop()
# Functions for deciding if things are like various types, mainly to
# handle UserDict, UserList and UserString like their underlying types.
#
# Yes, all of this manual testing breaks polymorphism, and the real
# Pythonic way to do all of this would be to just try it and handle the
# exception, but handling the exception when it's not the right type is
# often too slow.
# We are using the following trick to speed up these
# functions. Default arguments are used to take a snapshot of
# the global functions and constants used by these functions. This
# transforms accesses to global variable into local variables
# accesses (i.e. LOAD_FAST instead of LOAD_GLOBAL).
DictTypes = (dict, UserDict)
ListTypes = (list, UserList)
SequenceTypes = (list, tuple, UserList)
# Note that profiling data shows a speed-up when comparing
# explicitly with str and unicode instead of simply comparing
# with basestring. (at least on Python 2.5.1)
StringTypes = (str, unicode, UserString)
# Empirically, it is faster to check explicitly for str and
# unicode than for basestring.
BaseStringTypes = (str, unicode)
def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes):
return isinstance(obj, DictTypes)
def is_List(obj, isinstance=isinstance, ListTypes=ListTypes):
return isinstance(obj, ListTypes)
def is_Sequence(obj, isinstance=isinstance, SequenceTypes=SequenceTypes):
return isinstance(obj, SequenceTypes)
def is_Tuple(obj, isinstance=isinstance, tuple=tuple):
return isinstance(obj, tuple)
def is_String(obj, isinstance=isinstance, StringTypes=StringTypes):
return isinstance(obj, StringTypes)
def is_Scalar(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes):
# Profiling shows that there is an impressive speed-up of 2x
# when explicitly checking for strings instead of just not
# sequence when the argument (i.e. obj) is already a string.
# But, if obj is a not string then it is twice as fast to
# check only for 'not sequence'. The following code therefore
# assumes that the obj argument is a string most of the time.
return isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes)
def do_flatten(sequence, result, isinstance=isinstance,
StringTypes=StringTypes, SequenceTypes=SequenceTypes):
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
def flatten(obj, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes):
return [obj]
result = []
for item in obj:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
def flatten_sequence(sequence, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
result = []
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
def to_String(s,
isinstance=isinstance, str=str,
UserString=UserString, BaseStringTypes=BaseStringTypes):
if isinstance(s,BaseStringTypes):
# Early out when already a string!
return s
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_subst(s,
isinstance=isinstance, str=str, to_String=to_String,
BaseStringTypes=BaseStringTypes, SequenceTypes=SequenceTypes,
UserString=UserString):
# Note that the test cases are sorted by order of probability.
if isinstance(s, BaseStringTypes):
return s
elif isinstance(s, SequenceTypes):
l = []
for e in s:
l.append(to_String_for_subst(e))
return ' '.join( s )
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_signature(obj, to_String_for_subst=to_String_for_subst,
AttributeError=AttributeError):
try:
f = obj.for_signature
except AttributeError:
return to_String_for_subst(obj)
else:
return f()
# The SCons "semi-deep" copy.
#
# This makes separate copies of lists (including UserList objects)
# dictionaries (including UserDict objects) and tuples, but just copies
# references to anything else it finds.
#
# A special case is any object that has a __semi_deepcopy__() method,
# which we invoke to create the copy. Currently only used by
# BuilderDict to actually prevent the copy operation (as invalid on that object).
#
# The dispatch table approach used here is a direct rip-off from the
# normal Python copy module.
_semi_deepcopy_dispatch = d = {}
def semi_deepcopy_dict(x, exclude = [] ):
copy = {}
for key, val in x.items():
# The regular Python copy.deepcopy() also deepcopies the key,
# as follows:
#
# copy[semi_deepcopy(key)] = semi_deepcopy(val)
#
# Doesn't seem like we need to, but we'll comment it just in case.
if key not in exclude:
copy[key] = semi_deepcopy(val)
return copy
d[dict] = semi_deepcopy_dict
def _semi_deepcopy_list(x):
return list(map(semi_deepcopy, x))
d[list] = _semi_deepcopy_list
def _semi_deepcopy_tuple(x):
return tuple(map(semi_deepcopy, x))
d[tuple] = _semi_deepcopy_tuple
def semi_deepcopy(x):
copier = _semi_deepcopy_dispatch.get(type(x))
if copier:
return copier(x)
else:
if hasattr(x, '__semi_deepcopy__') and callable(x.__semi_deepcopy__):
return x.__semi_deepcopy__()
elif isinstance(x, UserDict):
return x.__class__(semi_deepcopy_dict(x))
elif isinstance(x, UserList):
return x.__class__(_semi_deepcopy_list(x))
return x
class Proxy(object):
"""A simple generic Proxy class, forwarding all calls to
subject. So, for the benefit of the python newbie, what does
this really mean? Well, it means that you can take an object, let's
call it 'objA', and wrap it in this Proxy class, with a statement
like this
proxyObj = Proxy(objA),
Then, if in the future, you do something like this
x = proxyObj.var1,
since Proxy does not have a 'var1' attribute (but presumably objA does),
the request actually is equivalent to saying
x = objA.var1
Inherit from this class to create a Proxy.
Note that, with new-style classes, this does *not* work transparently
for Proxy subclasses that use special .__*__() method names, because
those names are now bound to the class, not the individual instances.
You now need to know in advance which .__*__() method names you want
to pass on to the underlying Proxy object, and specifically delegate
their calls like this:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, subject):
"""Wrap an object as a Proxy object"""
self._subject = subject
def __getattr__(self, name):
"""Retrieve an attribute from the wrapped object. If the named
attribute doesn't exist, AttributeError is raised"""
return getattr(self._subject, name)
def get(self):
"""Retrieve the entire wrapped object"""
return self._subject
def __cmp__(self, other):
if issubclass(other.__class__, self._subject.__class__):
return cmp(self._subject, other)
return cmp(self.__dict__, other.__dict__)
class Delegate(object):
"""A Python Descriptor class that delegates attribute fetches
to an underlying wrapped subject of a Proxy. Typical use:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, attribute):
self.attribute = attribute
def __get__(self, obj, cls):
if isinstance(obj, cls):
return getattr(obj._subject, self.attribute)
else:
return self
# attempt to load the windows registry module:
can_read_reg = 0
try:
import winreg
can_read_reg = 1
hkey_mod = winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegQueryValueEx = winreg.QueryValueEx
RegError = winreg.error
except ImportError:
try:
import win32api
import win32con
can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegQueryValueEx = win32api.RegQueryValueEx
RegError = win32api.error
except ImportError:
class _NoError(Exception):
pass
RegError = _NoError
WinError = None
# Make sure we have a definition of WindowsError so we can
# run platform-independent tests of Windows functionality on
# platforms other than Windows. (WindowsError is, in fact, an
# OSError subclass on Windows.)
class PlainWindowsError(OSError):
pass
try:
WinError = WindowsError
except NameError:
WinError = PlainWindowsError
if can_read_reg:
HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT
HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE
HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER
HKEY_USERS = hkey_mod.HKEY_USERS
def RegGetValue(root, key):
"""This utility function returns a value in the registry
without having to open the key first. Only available on
Windows platforms with a version of Python that can read the
registry. Returns the same thing as
SCons.Util.RegQueryValueEx, except you just specify the entire
path to the value, and don't have to bother opening the key
first. So:
Instead of:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion')
out = SCons.Util.RegQueryValueEx(k,
'ProgramFilesDir')
You can write:
out = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion\ProgramFilesDir')
"""
# I would use os.path.split here, but it's not a filesystem
# path...
p = key.rfind('\\') + 1
keyp = key[:p-1] # -1 to omit trailing slash
val = key[p:]
k = RegOpenKeyEx(root, keyp)
return RegQueryValueEx(k,val)
else:
HKEY_CLASSES_ROOT = None
HKEY_LOCAL_MACHINE = None
HKEY_CURRENT_USER = None
HKEY_USERS = None
def RegGetValue(root, key):
raise WinError
def RegOpenKeyEx(root, key):
raise WinError
if sys.platform == 'win32':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
try:
pathext = os.environ['PATHEXT']
except KeyError:
pathext = '.COM;.EXE;.BAT;.CMD'
if is_String(pathext):
pathext = pathext.split(os.pathsep)
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
elif os.name == 'os2':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
pathext = ['.exe', '.cmd']
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
else:
def WhereIs(file, path=None, pathext=None, reject=[]):
import stat
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for d in path:
f = os.path.join(d, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
# os.stat() raises OSError, not IOError if the file
# doesn't exist, so in this case we let IOError get
# raised so as to not mask possibly serious disk or
# network issues.
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
try:
reject.index(f)
except ValueError:
return os.path.normpath(f)
continue
return None
def PrependPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This prepends newpath elements to the given oldpath. Will only
add any particular path once (leaving the first one it encounters
and ignoring the rest, to preserve path order), and will
os.path.normpath and os.path.normcase all paths to help assure
this. This can also handle the case where the given old path
variable is a list instead of a string, in which case a list will
be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/biz/boom:/foo:/foo/bar"
If delete_existing is 0, then adding a path that exists will
not move it to the beginning; it will stay where it is in the
list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# First uniquify the old paths, making sure to
# preserve the first instance (in Unix/Linux,
# the first one wins), and remembering them in normpaths.
# Then insert the new paths at the head of the list
# if they're not already in the normpaths list.
result = []
normpaths = []
for path in paths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
newpaths.reverse() # since we're inserting at the head
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.insert(0, path)
normpaths.append(normpath)
paths = result
else:
newpaths = newpaths + paths # prepend new paths
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
if is_list:
return paths
else:
return sep.join(paths)
def AppendPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This appends new path elements to the given old path. Will
only add any particular path once (leaving the last one it
encounters and ignoring the rest, to preserve path order), and
will os.path.normpath and os.path.normcase all paths to help
assure this. This can also handle the case where the given old
path variable is a list instead of a string, in which case a list
will be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/foo/bar:/biz/boom:/foo"
If delete_existing is 0, then adding a path that exists
will not move it to the end; it will stay where it is in the list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# add old paths to result, then
# add new paths if not already present
# (I thought about using a dict for normpaths for speed,
# but it's not clear hashing the strings would be faster
# than linear searching these typically short lists.)
result = []
normpaths = []
for path in paths:
if not path:
continue
result.append(path)
normpaths.append(os.path.normpath(os.path.normcase(path)))
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
paths = result
else:
# start w/ new paths, add old ones if not present,
# then reverse.
newpaths = paths + newpaths # append new paths
newpaths.reverse()
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
paths.reverse()
if is_list:
return paths
else:
return sep.join(paths)
def AddPathIfNotExists(env_dict, key, path, sep=os.pathsep):
"""This function will take 'key' out of the dictionary
'env_dict', then add the path 'path' to that key if it is not
already there. This treats the value of env_dict[key] as if it
has a similar format to the PATH variable...a list of paths
separated by tokens. The 'path' will get added to the list if it
is not already there."""
try:
is_list = 1
paths = env_dict[key]
if not is_List(env_dict[key]):
paths = paths.split(sep)
is_list = 0
if os.path.normcase(path) not in list(map(os.path.normcase, paths)):
paths = [ path ] + paths
if is_list:
env_dict[key] = paths
else:
env_dict[key] = sep.join(paths)
except KeyError:
env_dict[key] = path
if sys.platform == 'cygwin':
def get_native_path(path):
"""Transforms an absolute path into a native path for the system. In
Cygwin, this converts from a Cygwin path to a Windows one."""
return os.popen('cygpath -w ' + path).read().replace('\n', '')
else:
def get_native_path(path):
"""Transforms an absolute path into a native path for the system.
Non-Cygwin version, just leave the path alone."""
return path
display = DisplayEngine()
def Split(arg):
if is_List(arg) or is_Tuple(arg):
return arg
elif is_String(arg):
return arg.split()
else:
return [arg]
class CLVar(UserList):
"""A class for command-line construction variables.
This is a list that uses Split() to split an initial string along
white-space arguments, and similarly to split any strings that get
added. This allows us to Do the Right Thing with Append() and
Prepend() (as well as straight Python foo = env['VAR'] + 'arg1
arg2') regardless of whether a user adds a list or a string to a
command-line construction variable.
"""
def __init__(self, seq = []):
UserList.__init__(self, Split(seq))
def __add__(self, other):
return UserList.__add__(self, CLVar(other))
def __radd__(self, other):
return UserList.__radd__(self, CLVar(other))
def __coerce__(self, other):
return (self, CLVar(other))
def __str__(self):
return ' '.join(self.data)
# A dictionary that preserves the order in which items are added.
# Submitted by David Benjamin to ActiveState's Python Cookbook web site:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
# Including fixes/enhancements from the follow-on discussions.
class OrderedDict(UserDict):
def __init__(self, dict = None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = OrderedDict()
dict.update(self)
return dict
def items(self):
return list(zip(self._keys, list(self.values())))
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
UserDict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self.__setitem__(key, val)
def values(self):
return list(map(self.get, self._keys))
class Selector(OrderedDict):
"""A callable ordered dictionary that maps file suffixes to
dictionary values. We preserve the order in which items are added
so that get_suffix() calls always return the first suffix added."""
def __call__(self, env, source, ext=None):
if ext is None:
try:
ext = source[0].get_suffix()
except IndexError:
ext = ""
try:
return self[ext]
except KeyError:
# Try to perform Environment substitution on the keys of
# the dictionary before giving up.
s_dict = {}
for (k,v) in self.items():
if k is not None:
s_k = env.subst(k)
if s_k in s_dict:
# We only raise an error when variables point
# to the same suffix. If one suffix is literal
# and a variable suffix contains this literal,
# the literal wins and we don't raise an error.
raise KeyError(s_dict[s_k][0], k, s_k)
s_dict[s_k] = (k,v)
try:
return s_dict[ext][1]
except KeyError:
try:
return self[None]
except KeyError:
return None
if sys.platform == 'cygwin':
# On Cygwin, os.path.normcase() lies, so just report back the
# fact that the underlying Windows OS is case-insensitive.
def case_sensitive_suffixes(s1, s2):
return 0
else:
def case_sensitive_suffixes(s1, s2):
return (os.path.normcase(s1) != os.path.normcase(s2))
def adjustixes(fname, pre, suf, ensure_suffix=False):
if pre:
path, fn = os.path.split(os.path.normpath(fname))
if fn[:len(pre)] != pre:
fname = os.path.join(path, pre + fn)
# Only append a suffix if the suffix we're going to add isn't already
# there, and if either we've been asked to ensure the specific suffix
# is present or there's no suffix on it at all.
if suf and fname[-len(suf):] != suf and \
(ensure_suffix or not splitext(fname)[1]):
fname = fname + suf
return fname
# From Tim Peters,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# (Also in the printed Python Cookbook.)
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
pass # move on to the next method
else:
return list(u.keys())
del u
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = sorted(s)
except TypeError:
pass # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti = lasti + 1
i = i + 1
return t[:lasti]
del t
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# A more efficient implementation of Alex's uniquer(), this avoids the
# idfun() argument and function-call overhead by assuming that all
# items in the sequence are hashable.
def uniquer_hashables(seq):
seen = {}
result = []
for item in seq:
#if not item in seen:
if item not in seen:
seen[item] = 1
result.append(item)
return result
# Recipe 19.11 "Reading Lines with Continuation Characters",
# by Alex Martelli, straight from the Python CookBook (2nd edition).
def logical_lines(physical_lines, joiner=''.join):
logical_line = []
for line in physical_lines:
stripped = line.rstrip()
if stripped.endswith('\\'):
# a line which continues w/the next physical line
logical_line.append(stripped[:-1])
else:
# a line which does not continue, end of logical line
logical_line.append(line)
yield joiner(logical_line)
logical_line = []
if logical_line:
# end of sequence implies end of last logical line
yield joiner(logical_line)
class LogicalLines(object):
""" Wrapper class for the logical_lines method.
Allows us to read all "logical" lines at once from a
given file object.
"""
def __init__(self, fileobj):
self.fileobj = fileobj
def readlines(self):
result = [l for l in logical_lines(self.fileobj)]
return result
class UniqueList(UserList):
def __init__(self, seq = []):
UserList.__init__(self, seq)
self.unique = True
def __make_unique(self):
if not self.unique:
self.data = uniquer_hashables(self.data)
self.unique = True
def __lt__(self, other):
self.__make_unique()
return UserList.__lt__(self, other)
def __le__(self, other):
self.__make_unique()
return UserList.__le__(self, other)
def __eq__(self, other):
self.__make_unique()
return UserList.__eq__(self, other)
def __ne__(self, other):
self.__make_unique()
return UserList.__ne__(self, other)
def __gt__(self, other):
self.__make_unique()
return UserList.__gt__(self, other)
def __ge__(self, other):
self.__make_unique()
return UserList.__ge__(self, other)
def __cmp__(self, other):
self.__make_unique()
return UserList.__cmp__(self, other)
def __len__(self):
self.__make_unique()
return UserList.__len__(self)
def __getitem__(self, i):
self.__make_unique()
return UserList.__getitem__(self, i)
def __setitem__(self, i, item):
UserList.__setitem__(self, i, item)
self.unique = False
def __getslice__(self, i, j):
self.__make_unique()
return UserList.__getslice__(self, i, j)
def __setslice__(self, i, j, other):
UserList.__setslice__(self, i, j, other)
self.unique = False
def __add__(self, other):
result = UserList.__add__(self, other)
result.unique = False
return result
def __radd__(self, other):
result = UserList.__radd__(self, other)
result.unique = False
return result
def __iadd__(self, other):
result = UserList.__iadd__(self, other)
result.unique = False
return result
def __mul__(self, other):
result = UserList.__mul__(self, other)
result.unique = False
return result
def __rmul__(self, other):
result = UserList.__rmul__(self, other)
result.unique = False
return result
def __imul__(self, other):
result = UserList.__imul__(self, other)
result.unique = False
return result
def append(self, item):
UserList.append(self, item)
self.unique = False
def insert(self, i):
UserList.insert(self, i)
self.unique = False
def count(self, item):
self.__make_unique()
return UserList.count(self, item)
def index(self, item):
self.__make_unique()
return UserList.index(self, item)
def reverse(self):
self.__make_unique()
UserList.reverse(self)
def sort(self, *args, **kwds):
self.__make_unique()
return UserList.sort(self, *args, **kwds)
def extend(self, other):
UserList.extend(self, other)
self.unique = False
class Unbuffered(object):
"""
A proxy class that wraps a file object, flushing after every write,
and delegating everything else to the wrapped object.
"""
def __init__(self, file):
self.file = file
self.softspace = 0 ## backward compatibility; not supported in Py3k
def write(self, arg):
try:
self.file.write(arg)
self.file.flush()
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def __getattr__(self, attr):
return getattr(self.file, attr)
def make_path_relative(path):
""" makes an absolute path name to a relative pathname.
"""
if os.path.isabs(path):
drive_s,path = os.path.splitdrive(path)
import re
if not drive_s:
path=re.compile("/*(.*)").findall(path)[0]
else:
path=path[1:]
assert( not os.path.isabs( path ) ), path
return path
# The original idea for AddMethod() and RenameFunction() come from the
# following post to the ActiveState Python Cookbook:
#
# ASPN: Python Cookbook : Install bound methods in an instance
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/223613
#
# That code was a little fragile, though, so the following changes
# have been wrung on it:
#
# * Switched the installmethod() "object" and "function" arguments,
# so the order reflects that the left-hand side is the thing being
# "assigned to" and the right-hand side is the value being assigned.
#
# * Changed explicit type-checking to the "try: klass = object.__class__"
# block in installmethod() below so that it still works with the
# old-style classes that SCons uses.
#
# * Replaced the by-hand creation of methods and functions with use of
# the "new" module, as alluded to in Alex Martelli's response to the
# following Cookbook post:
#
# ASPN: Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
def AddMethod(obj, function, name=None):
"""
Adds either a bound method to an instance or an unbound method to
a class. If name is ommited the name of the specified function
is used by default.
Example:
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print a.z
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print a.listIndex(5)
"""
if name is None:
name = function.func_name
else:
function = RenameFunction(function, name)
if hasattr(obj, '__class__') and obj.__class__ is not type:
# "obj" is an instance, so it gets a bound method.
setattr(obj, name, MethodType(function, obj, obj.__class__))
else:
# "obj" is a class, so it gets an unbound method.
setattr(obj, name, MethodType(function, None, obj))
def RenameFunction(function, name):
"""
Returns a function identical to the specified function, but with
the specified name.
"""
return FunctionType(function.func_code,
function.func_globals,
name,
function.func_defaults)
md5 = False
def MD5signature(s):
return str(s)
def MD5filesignature(fname, chunksize=65536):
f = open(fname, "rb")
result = f.read()
f.close()
return result
try:
import hashlib
except ImportError:
pass
else:
if hasattr(hashlib, 'md5'):
md5 = True
def MD5signature(s):
m = hashlib.md5()
m.update(str(s))
return m.hexdigest()
def MD5filesignature(fname, chunksize=65536):
m = hashlib.md5()
f = open(fname, "rb")
while True:
blck = f.read(chunksize)
if not blck:
break
m.update(str(blck))
f.close()
return m.hexdigest()
def MD5collect(signatures):
"""
Collects a list of signatures into an aggregate signature.
signatures - a list of signatures
returns - the aggregate signature
"""
if len(signatures) == 1:
return signatures[0]
else:
return MD5signature(', '.join(signatures))
def silent_intern(x):
"""
Perform sys.intern() on the passed argument and return the result.
If the input is ineligible (e.g. a unicode string) the original argument is
returned and no exception is thrown.
"""
try:
return sys.intern(x)
except TypeError:
return x
# From Dinu C. Gherman,
# Python Cookbook, second edition, recipe 6.17, p. 277.
# Also:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
# ASPN: Python Cookbook: Null Object Design Pattern
#TODO??? class Null(object):
class Null(object):
""" Null objects always and reliably "do nothing." """
def __new__(cls, *args, **kwargs):
if not '_instance' in vars(cls):
cls._instance = super(Null, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return "Null(0x%08X)" % id(self)
def __nonzero__(self):
return False
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
class NullSeq(Null):
def __len__(self):
return 0
def __iter__(self):
return iter(())
def __getitem__(self, i):
return self
def __delitem__(self, i):
return self
def __setitem__(self, i, v):
return self
del __revision__
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "53060be9341e4a65e43fa232b1ce491f",
"timestamp": "",
"source": "github",
"line_count": 1523,
"max_line_length": 98,
"avg_line_length": 33.0059093893631,
"alnum_prop": 0.6007201400493356,
"repo_name": "EmanueleCannizzaro/scons",
"id": "2370a9c35f29691ce28d39328ffbd42b675f4b6b",
"size": "50268",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/engine/SCons/Util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
}
|
import os
import pytest
from conda_build import api
from .utils import thisdir
@pytest.fixture()
def recipe():
return os.path.join(thisdir, 'test-recipes', 'go-package')
@pytest.mark.sanity
def test_recipe_build(recipe, testing_config, testing_workdir, monkeypatch):
# These variables are defined solely for testing purposes,
# so they can be checked within build scripts
testing_config.activate = True
monkeypatch.setenv("CONDA_TEST_VAR", "conda_test")
monkeypatch.setenv("CONDA_TEST_VAR_2", "conda_test_2")
api.build(recipe, config=testing_config)
|
{
"content_hash": "45a4434655118a7701f95b7b7ea40641",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 27.80952380952381,
"alnum_prop": 0.7328767123287672,
"repo_name": "pelson/conda-build",
"id": "3f4b3ad0bb2b99916336fe93e8275ef4ac0fdddc",
"size": "584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_api_build_go_package.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
ircd.configure
~~~~~~~~~~~~~~
Command-line configuration script for XinIRCd.
"""
import yaml
from ircd.constants import *
OPTIONS = {}
DEFAULTS = {
"MODULE_DIR": os.path.abspath(
os.path.dirname(__file__) + os.sep + "modules"),
"MAX_CLIENT": 1024,
"NICK_LENGTH": 32,
"CHAN_LENGTH": 64,
"MAX_CHANNEL": 20,
"MOTD_FILE": os.path.abspath("motd.txt"),
"PORTS": ["6667"]
}
def getopt(key, default=None):
return OPTIONS.get(key, default)
def setopt(key, value, persist=False):
OPTIONS[key] = value
if persist:
# TODO persist
pass
def read_config():
global OPTIONS
OPTIONS = yaml.load(open(CONFIG_FILE, "r"))
# TODO config validation
# TODO make sure bind is a list of host/port pairs
def configure():
conf = dict(DEFAULTS.items())
print(YELLOW + "Welcome to the XinIRCd Configuration!" + NORMAL)
print()
print("In what directory should the modules loaded from?")
conf["MODULE_DIR"] = input("~ [" + GREEN + DEFAULTS["MODULE_DIR"] + NORMAL
+ "] ") or DEFAULTS["MODULE_DIR"]
print()
if not os.path.exists(conf["MODULE_DIR"]):
print("The module directory doesn't exist. Create it? (y/n)")
if input("~ [" + GREEN + "y" + NORMAL + "] ").lower() != "n":
os.makedirs(conf["MODULE_DIR"])
print()
print("Server name? (required for SSL, put FQDN)")
conf["SERVER_NAME"] = input("~ ")
print()
while not conf["SERVER_NAME"]:
print(RED + "Please enter a valid server name." + NORMAL)
conf["SERVER_NAME"] = input("~ ")
print()
print("Maximum number of clients at any time? (" + GREEN +
"1-" + str(DEFAULTS["MAX_CLIENT"]) + NORMAL + ")")
conf["MAX_CLIENT"] = input(
"~ [" + GREEN + str(DEFAULTS["MAX_CLIENT"]) + NORMAL + "] ") or \
str(DEFAULTS["MAX_CLIENT"])
print()
while not conf["MAX_CLIENT"].isdigit() or int(conf["MAX_CLIENT"]) > \
DEFAULTS["MAX_CLIENT"]:
print(RED + "Please enter a valid number between " +
GREEN + "1-" + str(DEFAULTS["MAX_CLIENT"]) + NORMAL)
conf["MAX_CLIENT"] = input(
"~ [" + GREEN + str(DEFAULTS["MAX_CLIENT"]) + NORMAL + "] ") or \
str(DEFAULTS["MAX_CLIENT"])
print()
conf["MAX_CLIENT"] = int(conf["MAX_CLIENT"])
print("Maximum length of nicknames?")
conf["NICK_LENGTH"] = input(
"~ [" + GREEN + str(DEFAULTS["NICK_LENGTH"]) + NORMAL + "] ") or \
str(DEFAULTS["NICK_LENGTH"])
conf["NICK_LENGTH"] = int(conf["NICK_LENGTH"])
print()
print("Maximum length of channel names?")
conf["CHAN_LENGTH"] = input(
"~ [" + GREEN + str(DEFAULTS["CHAN_LENGTH"]) + NORMAL + "] ") or \
str(DEFAULTS["CHAN_LENGTH"])
conf["CHAN_LENGTH"] = int(conf["CHAN_LENGTH"])
print()
print("Maximum number of channels a user can join?")
conf["MAX_CHANNEL"] = input(
"~ [" + GREEN + str(DEFAULTS["MAX_CHANNEL"]) + NORMAL + "] ") or \
str(DEFAULTS["MAX_CHANNEL"])
conf["MAX_CHANNEL"] = int(conf["MAX_CHANNEL"])
print()
with open(CONFIG_FILE, "w") as out:
yaml.dump(conf, out, default_flow_style=False)
print("DONE!")
print("*** " + CYAN + "Remember to edit your configuration files!" +
NORMAL + " ***")
print()
if __name__ == "__main__":
configure()
|
{
"content_hash": "91990f83a785df24c50f722d9f793a6e",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 78,
"avg_line_length": 30.068376068376068,
"alnum_prop": 0.5409323479249574,
"repo_name": "failedxyz/xinircd",
"id": "e817c8172ea8730b1588ce5517a2b9b095f74c84",
"size": "3542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ircd/config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33398"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
}
|
from wtforms import (DateField, DecimalField, IntegerField, TextField,
validators, FileField)
from finance.forms import BaseForm, Form
from finance.models.account import Account
class DateLocaleField(DateField):
"""Overload DateField to handle locale format, if applicable"""
def process_formdata(self, valuelist):
def scrub_date(value):
if 'Z' == value[-1:] and len(value) > 10:
value = value[:10]
return value
valuelist = [scrub_date(x) for x in valuelist]
return super(DateLocaleField, self).process_formdata(valuelist)
def validate_account(form, field):
try:
field.account = Account.query.get(field.data)
except:
raise validators.ValidationError(
"{0} is an invalid account".format(field.name)
)
class TransactionForm(BaseForm):
debit = IntegerField('Debit', [validators.Required(), validate_account])
credit = IntegerField('Credit', [validators.Required(), validate_account])
amount = DecimalField(
'Amount',
[validators.NumberRange(min=0), validators.Required()]
)
summary = TextField(
'Summary',
[validators.Length(min=3, max=50), validators.Required()]
)
date = DateLocaleField('Date', [validators.Required()])
description = TextField('Description', [validators.Length(min=0, max=250)])
class TransactionsImportForm(Form):
main_account_id = IntegerField('Account',
[validators.Required(), validate_account])
transactions_file = FileField(u"Transaction File",
[validators.regexp(u'\.csv$')])
|
{
"content_hash": "1b3574c20aa6455c4dd62d660fa7e871",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 35.145833333333336,
"alnum_prop": 0.6354475400118553,
"repo_name": "reinbach/finance",
"id": "9bbca49bc3c5763472f0753fdc318958f39eb461",
"size": "1687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/finance/forms/transaction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "537"
},
{
"name": "HTML",
"bytes": "12718"
},
{
"name": "JavaScript",
"bytes": "42149"
},
{
"name": "Python",
"bytes": "94442"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "512"
}
],
"symlink_target": ""
}
|
"""
Test suite for VMware VMDK driver volumeops module.
"""
import ddt
import mock
from oslo_utils import units
from oslo_vmware import exceptions
from oslo_vmware import vim_util
from cinder import test
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
from cinder.volume.drivers.vmware import volumeops
@ddt.ddt
class VolumeOpsTestCase(test.TestCase):
"""Unit tests for volumeops module."""
MAX_OBJECTS = 100
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self.session = mock.MagicMock()
self.vops = volumeops.VMwareVolumeOps(self.session, self.MAX_OBJECTS)
def test_split_datastore_path(self):
test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx'
(datastore, folder, file_name) = volumeops.split_datastore_path(test1)
self.assertEqual('datastore1', datastore)
self.assertEqual('myfolder/mysubfolder/', folder)
self.assertEqual('myvm.vmx', file_name)
test2 = '[datastore2 ] myfolder/myvm.vmdk'
(datastore, folder, file_name) = volumeops.split_datastore_path(test2)
self.assertEqual('datastore2', datastore)
self.assertEqual('myfolder/', folder)
self.assertEqual('myvm.vmdk', file_name)
test3 = 'myfolder/myvm.vmdk'
self.assertRaises(IndexError, volumeops.split_datastore_path, test3)
def vm(self, val):
"""Create a mock vm in retrieve result format."""
vm = mock.MagicMock()
prop = mock.Mock(spec=object)
prop.val = val
vm.propSet = [prop]
return vm
def test_get_backing(self):
name = 'mock-backing'
# Test no result
self.session.invoke_api.return_value = None
result = self.vops.get_backing(name)
self.assertIsNone(result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
# Test single result
vm = self.vm(name)
vm.obj = mock.sentinel.vm_obj
retrieve_result = mock.Mock(spec=object)
retrieve_result.objects = [vm]
self.session.invoke_api.return_value = retrieve_result
self.vops.cancel_retrieval = mock.Mock(spec=object)
result = self.vops.get_backing(name)
self.assertEqual(mock.sentinel.vm_obj, result)
self.session.invoke_api.assert_called_with(vim_util, 'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
self.vops.cancel_retrieval.assert_called_once_with(retrieve_result)
# Test multiple results
retrieve_result2 = mock.Mock(spec=object)
retrieve_result2.objects = [vm('1'), vm('2'), vm('3')]
self.session.invoke_api.return_value = retrieve_result2
self.vops.continue_retrieval = mock.Mock(spec=object)
self.vops.continue_retrieval.return_value = retrieve_result
result = self.vops.get_backing(name)
self.assertEqual(mock.sentinel.vm_obj, result)
self.session.invoke_api.assert_called_with(vim_util, 'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
self.vops.continue_retrieval.assert_called_once_with(retrieve_result2)
self.vops.cancel_retrieval.assert_called_with(retrieve_result)
def test_delete_backing(self):
backing = mock.sentinel.backing
task = mock.sentinel.task
self.session.invoke_api.return_value = task
self.vops.delete_backing(backing)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"Destroy_Task",
backing)
self.session.wait_for_task(task)
def test_get_host(self):
instance = mock.sentinel.instance
host = mock.sentinel.host
self.session.invoke_api.return_value = host
result = self.vops.get_host(instance)
self.assertEqual(host, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
instance,
'runtime.host')
def _host_runtime_info(
self, connection_state='connected', in_maintenance=False):
return mock.Mock(connectionState=connection_state,
inMaintenanceMode=in_maintenance)
def test_get_hosts(self):
hosts = mock.sentinel.hosts
self.session.invoke_api.return_value = hosts
result = self.vops.get_hosts()
self.assertEqual(hosts, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_objects',
self.session.vim,
'HostSystem',
self.MAX_OBJECTS)
def test_continue_retrieval(self):
retrieve_result = mock.sentinel.retrieve_result
self.session.invoke_api.return_value = retrieve_result
result = self.vops.continue_retrieval(retrieve_result)
self.assertEqual(retrieve_result, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'continue_retrieval',
self.session.vim,
retrieve_result)
def test_cancel_retrieval(self):
retrieve_result = mock.sentinel.retrieve_result
self.session.invoke_api.return_value = retrieve_result
result = self.vops.cancel_retrieval(retrieve_result)
self.assertIsNone(result)
self.session.invoke_api.assert_called_once_with(vim_util,
'cancel_retrieval',
self.session.vim,
retrieve_result)
def test_is_usable(self):
mount_info = mock.Mock(spec=object)
mount_info.accessMode = "readWrite"
mount_info.mounted = True
mount_info.accessible = True
self.assertTrue(self.vops._is_usable(mount_info))
del mount_info.mounted
self.assertTrue(self.vops._is_usable(mount_info))
mount_info.accessMode = "readonly"
self.assertFalse(self.vops._is_usable(mount_info))
mount_info.accessMode = "readWrite"
mount_info.mounted = False
self.assertFalse(self.vops._is_usable(mount_info))
mount_info.mounted = True
mount_info.accessible = False
self.assertFalse(self.vops._is_usable(mount_info))
del mount_info.accessible
self.assertFalse(self.vops._is_usable(mount_info))
def _create_host_mounts(self, access_mode, host, set_accessible=True,
is_accessible=True, mounted=True):
"""Create host mount value of datastore with single mount info.
:param access_mode: string specifying the read/write permission
:param set_accessible: specify whether accessible property
should be set
:param is_accessible: boolean specifying whether the datastore
is accessible to host
:param host: managed object reference of the connected
host
:return: list of host mount info
"""
mntInfo = mock.Mock(spec=object)
mntInfo.accessMode = access_mode
if set_accessible:
mntInfo.accessible = is_accessible
else:
del mntInfo.accessible
mntInfo.mounted = mounted
host_mount = mock.Mock(spec=object)
host_mount.key = host
host_mount.mountInfo = mntInfo
host_mounts = mock.Mock(spec=object)
host_mounts.DatastoreHostMount = [host_mount]
return host_mounts
def test_get_connected_hosts(self):
with mock.patch.object(self.vops, 'get_summary') as get_summary:
datastore = mock.sentinel.datastore
summary = mock.Mock(spec=object)
get_summary.return_value = summary
summary.accessible = False
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([], hosts)
summary.accessible = True
host = mock.Mock(spec=object)
host.value = mock.sentinel.host
host_mounts = self._create_host_mounts("readWrite", host)
self.session.invoke_api.return_value = host_mounts
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([mock.sentinel.host], hosts)
self.session.invoke_api.assert_called_once_with(
vim_util,
'get_object_property',
self.session.vim,
datastore,
'host')
del host_mounts.DatastoreHostMount
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([], hosts)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_connected_hosts')
def test_is_datastore_accessible(self, get_connected_hosts):
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
get_connected_hosts.return_value = [host_1, host_2]
ds = mock.sentinel.datastore
host = mock.Mock(value=mock.sentinel.host_1)
self.assertTrue(self.vops.is_datastore_accessible(ds, host))
get_connected_hosts.assert_called_once_with(ds)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_connected_hosts')
def test_is_datastore_accessible_with_inaccessible(self,
get_connected_hosts):
host_1 = mock.sentinel.host_1
get_connected_hosts.return_value = [host_1]
ds = mock.sentinel.datastore
host = mock.Mock(value=mock.sentinel.host_2)
self.assertFalse(self.vops.is_datastore_accessible(ds, host))
get_connected_hosts.assert_called_once_with(ds)
def test_get_parent(self):
# Not recursive
child = mock.Mock(spec=object)
child._type = 'Parent'
ret = self.vops._get_parent(child, 'Parent')
self.assertEqual(child, ret)
# Recursive
parent = mock.Mock(spec=object)
parent._type = 'Parent'
child = mock.Mock(spec=object)
child._type = 'Child'
self.session.invoke_api.return_value = parent
ret = self.vops._get_parent(child, 'Parent')
self.assertEqual(parent, ret)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim, child,
'parent')
def test_get_dc(self):
# set up hierarchy of objects
dc = mock.Mock(spec=object)
dc._type = 'Datacenter'
o1 = mock.Mock(spec=object)
o1._type = 'mockType1'
o1.parent = dc
o2 = mock.Mock(spec=object)
o2._type = 'mockType2'
o2.parent = o1
# mock out invoke_api behaviour to fetch parent
def mock_invoke_api(vim_util, method, vim, the_object, arg):
return the_object.parent
self.session.invoke_api.side_effect = mock_invoke_api
ret = self.vops.get_dc(o2)
self.assertEqual(dc, ret)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_get_vmfolder(self):
self.session.invoke_api.return_value = mock.sentinel.ret
ret = self.vops.get_vmfolder(mock.sentinel.dc)
self.assertEqual(mock.sentinel.ret, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
mock.sentinel.dc,
'vmFolder')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_entity_name')
def test_get_child_folder(self, get_entity_name):
child_entity_1 = mock.Mock(_type='Folder')
child_entity_2 = mock.Mock(_type='foo')
child_entity_3 = mock.Mock(_type='Folder')
prop_val = mock.Mock(ManagedObjectReference=[child_entity_1,
child_entity_2,
child_entity_3])
self.session.invoke_api.return_value = prop_val
get_entity_name.side_effect = ['bar', '%2fcinder-volumes']
parent_folder = mock.sentinel.parent_folder
child_name = '/cinder-volumes'
ret = self.vops._get_child_folder(parent_folder, child_name)
self.assertEqual(child_entity_3, ret)
self.session.invoke_api.assert_called_once_with(
vim_util, 'get_object_property', self.session.vim, parent_folder,
'childEntity')
get_entity_name.assert_has_calls([mock.call(child_entity_1),
mock.call(child_entity_3)])
def test_create_folder(self):
folder = mock.sentinel.folder
self.session.invoke_api.return_value = folder
parent_folder = mock.sentinel.parent_folder
child_folder_name = mock.sentinel.child_folder_name
ret = self.vops.create_folder(parent_folder, child_folder_name)
self.assertEqual(folder, ret)
self.session.invoke_api.assert_called_once_with(
self.session.vim, 'CreateFolder', parent_folder,
name=child_folder_name)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_child_folder')
def test_create_folder_with_duplicate_name(self, get_child_folder):
self.session.invoke_api.side_effect = exceptions.DuplicateName
folder = mock.sentinel.folder
get_child_folder.return_value = folder
parent_folder = mock.sentinel.parent_folder
child_folder_name = mock.sentinel.child_folder_name
ret = self.vops.create_folder(parent_folder, child_folder_name)
self.assertEqual(folder, ret)
self.session.invoke_api.assert_called_once_with(
self.session.vim, 'CreateFolder', parent_folder,
name=child_folder_name)
get_child_folder.assert_called_once_with(parent_folder,
child_folder_name)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_vmfolder')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'create_folder')
def test_create_vm_inventory_folder(self, create_folder, get_vmfolder):
vm_folder_1 = mock.sentinel.vm_folder_1
get_vmfolder.return_value = vm_folder_1
folder_1a = mock.sentinel.folder_1a
folder_1b = mock.sentinel.folder_1b
create_folder.side_effect = [folder_1a, folder_1b]
datacenter_1 = mock.Mock(value='dc-1')
path_comp = ['a', 'b']
ret = self.vops.create_vm_inventory_folder(datacenter_1, path_comp)
self.assertEqual(folder_1b, ret)
get_vmfolder.assert_called_once_with(datacenter_1)
exp_calls = [mock.call(vm_folder_1, 'a'), mock.call(folder_1a, 'b')]
self.assertEqual(exp_calls, create_folder.call_args_list)
exp_cache = {'/dc-1': vm_folder_1,
'/dc-1/a': folder_1a,
'/dc-1/a/b': folder_1b}
self.assertEqual(exp_cache, self.vops._folder_cache)
# Test cache
get_vmfolder.reset_mock()
create_folder.reset_mock()
folder_1c = mock.sentinel.folder_1c
create_folder.side_effect = [folder_1c]
path_comp = ['a', 'c']
ret = self.vops.create_vm_inventory_folder(datacenter_1, path_comp)
self.assertEqual(folder_1c, ret)
self.assertFalse(get_vmfolder.called)
exp_calls = [mock.call(folder_1a, 'c')]
self.assertEqual(exp_calls, create_folder.call_args_list)
exp_cache = {'/dc-1': vm_folder_1,
'/dc-1/a': folder_1a,
'/dc-1/a/b': folder_1b,
'/dc-1/a/c': folder_1c}
self.assertEqual(exp_cache, self.vops._folder_cache)
# Test cache with different datacenter
get_vmfolder.reset_mock()
create_folder.reset_mock()
vm_folder_2 = mock.sentinel.vm_folder_2
get_vmfolder.return_value = vm_folder_2
folder_2a = mock.sentinel.folder_2a
folder_2b = mock.sentinel.folder_2b
create_folder.side_effect = [folder_2a, folder_2b]
datacenter_2 = mock.Mock(value='dc-2')
path_comp = ['a', 'b']
ret = self.vops.create_vm_inventory_folder(datacenter_2, path_comp)
self.assertEqual(folder_2b, ret)
get_vmfolder.assert_called_once_with(datacenter_2)
exp_calls = [mock.call(vm_folder_2, 'a'), mock.call(folder_2a, 'b')]
self.assertEqual(exp_calls, create_folder.call_args_list)
exp_cache = {'/dc-1': vm_folder_1,
'/dc-1/a': folder_1a,
'/dc-1/a/b': folder_1b,
'/dc-1/a/c': folder_1c,
'/dc-2': vm_folder_2,
'/dc-2/a': folder_2a,
'/dc-2/a/b': folder_2b
}
self.assertEqual(exp_cache, self.vops._folder_cache)
def test_create_disk_backing_thin(self):
backing = mock.Mock()
del backing.eagerlyScrub
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'thin'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertIsInstance(ret.thinProvisioned, bool)
self.assertTrue(ret.thinProvisioned)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_disk_backing_thick(self):
backing = mock.Mock()
del backing.eagerlyScrub
del backing.thinProvisioned
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'thick'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_disk_backing_eager_zeroed_thick(self):
backing = mock.Mock()
del backing.thinProvisioned
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'eagerZeroedThick'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertIsInstance(ret.eagerlyScrub, bool)
self.assertTrue(ret.eagerlyScrub)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_virtual_disk_config_spec(self):
cf = self.session.vim.client.factory
cf.create.side_effect = lambda *args: mock.Mock()
size_kb = units.Ki
controller_key = 200
disk_type = 'thick'
profile_id = mock.sentinel.profile_id
spec = self.vops._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
profile_id,
None)
cf.create.side_effect = None
self.assertEqual('add', spec.operation)
self.assertEqual('create', spec.fileOperation)
device = spec.device
self.assertEqual(size_kb, device.capacityInKB)
self.assertEqual(-101, device.key)
self.assertEqual(0, device.unitNumber)
self.assertEqual(controller_key, device.controllerKey)
backing = device.backing
self.assertEqual('', backing.fileName)
self.assertEqual('persistent', backing.diskMode)
disk_profiles = spec.profile
self.assertEqual(1, len(disk_profiles))
self.assertEqual(profile_id, disk_profiles[0].profileId)
def test_create_specs_for_ide_disk_add(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
size_kb = 1
disk_type = 'thin'
adapter_type = 'ide'
profile_id = mock.sentinel.profile_id
ret = self.vops._create_specs_for_disk_add(size_kb, disk_type,
adapter_type, profile_id)
factory.create.side_effect = None
self.assertEqual(1, len(ret))
self.assertEqual(units.Ki, ret[0].device.capacityInKB)
self.assertEqual(200, ret[0].device.controllerKey)
expected = [mock.call.create('ns0:VirtualDeviceConfigSpec'),
mock.call.create('ns0:VirtualDisk'),
mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo')]
factory.create.assert_has_calls(expected, any_order=True)
def test_create_specs_for_scsi_disk_add(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
size_kb = 2 * units.Ki
disk_type = 'thin'
adapter_type = 'lsiLogicsas'
profile_id = mock.sentinel.profile_id
ret = self.vops._create_specs_for_disk_add(size_kb, disk_type,
adapter_type, profile_id)
factory.create.side_effect = None
self.assertEqual(2, len(ret))
self.assertEqual('noSharing', ret[1].device.sharedBus)
self.assertEqual(size_kb, ret[0].device.capacityInKB)
expected = [mock.call.create('ns0:VirtualLsiLogicSASController'),
mock.call.create('ns0:VirtualDeviceConfigSpec'),
mock.call.create('ns0:VirtualDisk'),
mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo'),
mock.call.create('ns0:VirtualDeviceConfigSpec')]
factory.create.assert_has_calls(expected, any_order=True)
def test_get_create_spec_disk_less(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
name = mock.sentinel.name
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
option_key = mock.sentinel.key
option_value = mock.sentinel.value
extra_config = {option_key: option_value,
volumeops.BACKING_UUID_KEY: mock.sentinel.uuid}
ret = self.vops._get_create_spec_disk_less(name, ds_name, profile_id,
extra_config)
factory.create.side_effect = None
self.assertEqual(name, ret.name)
self.assertEqual(mock.sentinel.uuid, ret.instanceUuid)
self.assertEqual('[%s]' % ds_name, ret.files.vmPathName)
self.assertEqual("vmx-08", ret.version)
self.assertEqual(profile_id, ret.vmProfile[0].profileId)
self.assertEqual(1, len(ret.extraConfig))
self.assertEqual(option_key, ret.extraConfig[0].key)
self.assertEqual(option_value, ret.extraConfig[0].value)
expected = [mock.call.create('ns0:VirtualMachineFileInfo'),
mock.call.create('ns0:VirtualMachineConfigSpec'),
mock.call.create('ns0:VirtualMachineDefinedProfileSpec'),
mock.call.create('ns0:OptionValue')]
factory.create.assert_has_calls(expected, any_order=True)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_create_spec_disk_less')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_create_specs_for_disk_add')
def test_get_create_spec(self, create_specs_for_disk_add,
get_create_spec_disk_less):
name = 'vol-1'
size_kb = 1024
disk_type = 'thin'
ds_name = 'nfs-1'
profile_id = mock.sentinel.profile_id
adapter_type = 'busLogic'
extra_config = mock.sentinel.extra_config
self.vops.get_create_spec(name, size_kb, disk_type, ds_name,
profile_id, adapter_type, extra_config)
get_create_spec_disk_less.assert_called_once_with(
name, ds_name, profileId=profile_id, extra_config=extra_config)
create_specs_for_disk_add.assert_called_once_with(
size_kb, disk_type, adapter_type, profile_id)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_create_spec')
def test_create_backing(self, get_create_spec):
create_spec = mock.sentinel.create_spec
get_create_spec.return_value = create_spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
name = 'backing_name'
size_kb = mock.sentinel.size_kb
disk_type = mock.sentinel.disk_type
adapter_type = mock.sentinel.adapter_type
folder = mock.sentinel.folder
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
extra_config = mock.sentinel.extra_config
ret = self.vops.create_backing(name, size_kb, disk_type, folder,
resource_pool, host, ds_name,
profile_id, adapter_type, extra_config)
self.assertEqual(mock.sentinel.result, ret)
get_create_spec.assert_called_once_with(
name, size_kb, disk_type, ds_name, profile_id=profile_id,
adapter_type=adapter_type, extra_config=extra_config)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
config=create_spec,
pool=resource_pool,
host=host)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_create_spec_disk_less')
def test_create_backing_disk_less(self, get_create_spec_disk_less):
create_spec = mock.sentinel.create_spec
get_create_spec_disk_less.return_value = create_spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
name = 'backing_name'
folder = mock.sentinel.folder
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
extra_config = mock.sentinel.extra_config
ret = self.vops.create_backing_disk_less(name, folder, resource_pool,
host, ds_name, profile_id,
extra_config)
self.assertEqual(mock.sentinel.result, ret)
get_create_spec_disk_less.assert_called_once_with(
name, ds_name, profileId=profile_id, extra_config=extra_config)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
config=create_spec,
pool=resource_pool,
host=host)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_datastore(self):
backing = mock.sentinel.backing
datastore = mock.Mock(spec=object)
datastore.ManagedObjectReference = [mock.sentinel.ds]
self.session.invoke_api.return_value = datastore
ret = self.vops.get_datastore(backing)
self.assertEqual(mock.sentinel.ds, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
backing, 'datastore')
def test_get_summary(self):
datastore = mock.sentinel.datastore
summary = mock.sentinel.summary
self.session.invoke_api.return_value = summary
ret = self.vops.get_summary(datastore)
self.assertEqual(summary, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
datastore,
'summary')
def test_get_relocate_spec(self):
delete_disk_attribute = True
def _create_side_effect(type):
obj = mock.Mock()
if type == "ns0:VirtualDiskFlatVer2BackingInfo":
del obj.eagerlyScrub
elif (type == "ns0:VirtualMachineRelocateSpec" and
delete_disk_attribute):
del obj.disk
else:
pass
return obj
factory = self.session.vim.client.factory
factory.create.side_effect = _create_side_effect
datastore = mock.sentinel.datastore
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
disk_move_type = mock.sentinel.disk_move_type
ret = self.vops._get_relocate_spec(datastore, resource_pool, host,
disk_move_type)
self.assertEqual(datastore, ret.datastore)
self.assertEqual(resource_pool, ret.pool)
self.assertEqual(host, ret.host)
self.assertEqual(disk_move_type, ret.diskMoveType)
# Test with disk locator.
delete_disk_attribute = False
disk_type = 'thin'
disk_device = mock.Mock()
ret = self.vops._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
factory.create.side_effect = None
self.assertEqual(datastore, ret.datastore)
self.assertEqual(resource_pool, ret.pool)
self.assertEqual(host, ret.host)
self.assertEqual(disk_move_type, ret.diskMoveType)
self.assertIsInstance(ret.disk, list)
self.assertEqual(1, len(ret.disk))
disk_locator = ret.disk[0]
self.assertEqual(datastore, disk_locator.datastore)
self.assertEqual(disk_device.key, disk_locator.diskId)
backing = disk_locator.diskBackingInfo
self.assertIsInstance(backing.thinProvisioned, bool)
self.assertTrue(backing.thinProvisioned)
self.assertEqual('', backing.fileName)
self.assertEqual('persistent', backing.diskMode)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_device')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_relocate_spec')
def test_relocate_backing(self, get_relocate_spec, get_disk_device):
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = disk_device
spec = mock.sentinel.relocate_spec
get_relocate_spec.return_value = spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
datastore = mock.sentinel.datastore
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
disk_type = mock.sentinel.disk_type
self.vops.relocate_backing(backing, datastore, resource_pool, host,
disk_type)
# Verify calls
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
get_disk_device.assert_called_once_with(backing)
get_relocate_spec.assert_called_once_with(datastore, resource_pool,
host, disk_move_type,
disk_type, disk_device)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'RelocateVM_Task',
backing,
spec=spec)
self.session.wait_for_task.assert_called_once_with(task)
def test_move_backing_to_folder(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
folder = mock.sentinel.folder
self.vops.move_backing_to_folder(backing, folder)
# Verify calls
self.session.invoke_api.assert_called_once_with(self.session.vim,
'MoveIntoFolder_Task',
folder,
list=[backing])
self.session.wait_for_task.assert_called_once_with(task)
def test_create_snapshot_operation(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
backing = mock.sentinel.backing
name = mock.sentinel.name
desc = mock.sentinel.description
quiesce = True
ret = self.vops.create_snapshot(backing, name, desc, quiesce)
self.assertEqual(mock.sentinel.result, ret)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=desc,
memory=False,
quiesce=quiesce)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_snapshot_from_tree(self):
volops = volumeops.VMwareVolumeOps
name = mock.sentinel.name
# Test snapshot == 'None'
ret = volops._get_snapshot_from_tree(name, None)
self.assertIsNone(ret)
# Test root == snapshot
snapshot = mock.sentinel.snapshot
node = mock.Mock(spec=object)
node.name = name
node.snapshot = snapshot
ret = volops._get_snapshot_from_tree(name, node)
self.assertEqual(snapshot, ret)
# Test root.childSnapshotList == None
root = mock.Mock(spec=object)
root.name = 'root'
del root.childSnapshotList
ret = volops._get_snapshot_from_tree(name, root)
self.assertIsNone(ret)
# Test root.child == snapshot
root.childSnapshotList = [node]
ret = volops._get_snapshot_from_tree(name, root)
self.assertEqual(snapshot, ret)
def test_get_snapshot(self):
# build out the root snapshot tree
snapshot_name = mock.sentinel.snapshot_name
snapshot = mock.sentinel.snapshot
root = mock.Mock(spec=object)
root.name = 'root'
node = mock.Mock(spec=object)
node.name = snapshot_name
node.snapshot = snapshot
root.childSnapshotList = [node]
# Test rootSnapshotList is not None
snapshot_tree = mock.Mock(spec=object)
snapshot_tree.rootSnapshotList = [root]
self.session.invoke_api.return_value = snapshot_tree
backing = mock.sentinel.backing
ret = self.vops.get_snapshot(backing, snapshot_name)
self.assertEqual(snapshot, ret)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
# Test rootSnapshotList == None
snapshot_tree.rootSnapshotList = None
ret = self.vops.get_snapshot(backing, snapshot_name)
self.assertIsNone(ret)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
def test_snapshot_exists(self):
backing = mock.sentinel.backing
invoke_api = self.session.invoke_api
invoke_api.return_value = None
self.assertFalse(self.vops.snapshot_exists(backing))
invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
snapshot = mock.Mock()
invoke_api.return_value = snapshot
snapshot.rootSnapshotList = None
self.assertFalse(self.vops.snapshot_exists(backing))
snapshot.rootSnapshotList = [mock.Mock()]
self.assertTrue(self.vops.snapshot_exists(backing))
def test_delete_snapshot(self):
backing = mock.sentinel.backing
snapshot_name = mock.sentinel.snapshot_name
# Test snapshot is None
with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot:
get_snapshot.return_value = None
self.vops.delete_snapshot(backing, snapshot_name)
get_snapshot.assert_called_once_with(backing, snapshot_name)
# Test snapshot is not None
snapshot = mock.sentinel.snapshot
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot:
get_snapshot.return_value = snapshot
self.vops.delete_snapshot(backing, snapshot_name)
get_snapshot.assert_called_with(backing, snapshot_name)
invoke_api.assert_called_once_with(self.session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_folder(self):
folder = mock.sentinel.folder
backing = mock.sentinel.backing
with mock.patch.object(self.vops, '_get_parent') as get_parent:
get_parent.return_value = folder
ret = self.vops._get_folder(backing)
self.assertEqual(folder, ret)
get_parent.assert_called_once_with(backing, 'Folder')
def _verify_extra_config(self, option_values, key, value):
self.assertEqual(1, len(option_values))
self.assertEqual(key, option_values[0].key)
self.assertEqual(value, option_values[0].value)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_relocate_spec')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_device')
def test_get_clone_spec(self, get_disk_device, get_relocate_spec):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
relocate_spec = mock.sentinel.relocate_spec
get_relocate_spec.return_value = relocate_spec
# Test with empty disk type.
datastore = mock.sentinel.datastore
disk_move_type = mock.sentinel.disk_move_type
snapshot = mock.sentinel.snapshot
disk_type = None
backing = mock.sentinel.backing
host = mock.sentinel.host
rp = mock.sentinel.rp
key = mock.sentinel.key
value = mock.sentinel.value
extra_config = {key: value,
volumeops.BACKING_UUID_KEY: mock.sentinel.uuid}
ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot,
backing, disk_type, host, rp,
extra_config)
self.assertEqual(relocate_spec, ret.location)
self.assertFalse(ret.powerOn)
self.assertFalse(ret.template)
self.assertEqual(snapshot, ret.snapshot)
self.assertEqual(mock.sentinel.uuid, ret.config.instanceUuid)
get_relocate_spec.assert_called_once_with(datastore, rp, host,
disk_move_type, disk_type,
None)
self._verify_extra_config(ret.config.extraConfig, key, value)
# Test with non-empty disk type.
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = disk_device
disk_type = 'thin'
ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot,
backing, disk_type, host, rp,
extra_config)
factory.create.side_effect = None
self.assertEqual(relocate_spec, ret.location)
self.assertFalse(ret.powerOn)
self.assertFalse(ret.template)
self.assertEqual(snapshot, ret.snapshot)
get_disk_device.assert_called_once_with(backing)
get_relocate_spec.assert_called_with(datastore, rp, host,
disk_move_type, disk_type,
disk_device)
self._verify_extra_config(ret.config.extraConfig, key, value)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_folder')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_clone_spec')
def _test_clone_backing(
self, clone_type, folder, get_clone_spec, get_folder):
backing_folder = mock.sentinel.backing_folder
get_folder.return_value = backing_folder
clone_spec = mock.sentinel.clone_spec
get_clone_spec.return_value = clone_spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
clone = mock.sentinel.clone
self.session.wait_for_task.return_value = mock.Mock(result=clone)
name = mock.sentinel.name
backing = mock.sentinel.backing
snapshot = mock.sentinel.snapshot
datastore = mock.sentinel.datastore
disk_type = mock.sentinel.disk_type
host = mock.sentinel.host
resource_pool = mock.sentinel.resource_pool
extra_config = mock.sentinel.extra_config
ret = self.vops.clone_backing(
name, backing, snapshot, clone_type, datastore,
disk_type=disk_type, host=host, resource_pool=resource_pool,
extra_config=extra_config, folder=folder)
if folder:
self.assertFalse(get_folder.called)
else:
get_folder.assert_called_once_with(backing)
if clone_type == 'linked':
exp_disk_move_type = 'createNewChildDiskBacking'
else:
exp_disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
get_clone_spec.assert_called_once_with(
datastore, exp_disk_move_type, snapshot, backing, disk_type,
host=host, resource_pool=resource_pool, extra_config=extra_config)
exp_folder = folder if folder else backing_folder
self.session.invoke_api.assert_called_once_with(
self.session.vim, 'CloneVM_Task', backing, folder=exp_folder,
name=name, spec=clone_spec)
self.session.wait_for_task.assert_called_once_with(task)
self.assertEqual(clone, ret)
@ddt.data('linked', 'full')
def test_clone_backing(self, clone_type):
self._test_clone_backing(clone_type, mock.sentinel.folder)
def test_clone_backing_with_empty_folder(self):
self._test_clone_backing('linked', None)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_create_specs_for_disk_add')
def test_attach_disk_to_backing(self, create_spec):
reconfig_spec = mock.Mock()
self.session.vim.client.factory.create.return_value = reconfig_spec
disk_add_config_specs = mock.Mock()
create_spec.return_value = disk_add_config_specs
task = mock.Mock()
self.session.invoke_api.return_value = task
backing = mock.Mock()
size_in_kb = units.Ki
disk_type = "thin"
adapter_type = "ide"
profile_id = mock.sentinel.profile_id
vmdk_ds_file_path = mock.sentinel.vmdk_ds_file_path
self.vops.attach_disk_to_backing(backing, size_in_kb, disk_type,
adapter_type, profile_id,
vmdk_ds_file_path)
self.assertEqual(disk_add_config_specs, reconfig_spec.deviceChange)
create_spec.assert_called_once_with(
size_in_kb, disk_type, adapter_type, profile_id,
vmdk_ds_file_path=vmdk_ds_file_path)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
def test_create_spec_for_disk_remove(self):
disk_spec = mock.Mock()
self.session.vim.client.factory.create.return_value = disk_spec
disk_device = mock.sentinel.disk_device
self.vops._create_spec_for_disk_remove(disk_device)
self.session.vim.client.factory.create.assert_called_once_with(
'ns0:VirtualDeviceConfigSpec')
self.assertEqual('remove', disk_spec.operation)
self.assertEqual(disk_device, disk_spec.device)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_create_spec_for_disk_remove')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_reconfigure_backing')
def test_detach_disk_from_backing(self, reconfigure_backing, create_spec):
disk_spec = mock.sentinel.disk_spec
create_spec.return_value = disk_spec
reconfig_spec = mock.Mock()
self.session.vim.client.factory.create.return_value = reconfig_spec
backing = mock.sentinel.backing
disk_device = mock.sentinel.disk_device
self.vops.detach_disk_from_backing(backing, disk_device)
create_spec.assert_called_once_with(disk_device)
self.session.vim.client.factory.create.assert_called_once_with(
'ns0:VirtualMachineConfigSpec')
self.assertEqual([disk_spec], reconfig_spec.deviceChange)
reconfigure_backing.assert_called_once_with(backing, reconfig_spec)
def test_rename_backing(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
new_name = mock.sentinel.new_name
self.vops.rename_backing(backing, new_name)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"Rename_Task",
backing,
newName=new_name)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_device')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_reconfigure_backing')
def test_update_backing_disk_uuid(self, reconfigure_backing,
get_disk_device):
disk_spec = mock.Mock()
reconfig_spec = mock.Mock()
self.session.vim.client.factory.create.side_effect = [disk_spec,
reconfig_spec]
disk_device = mock.Mock()
get_disk_device.return_value = disk_device
self.vops.update_backing_disk_uuid(mock.sentinel.backing,
mock.sentinel.disk_uuid)
get_disk_device.assert_called_once_with(mock.sentinel.backing)
self.assertEqual(mock.sentinel.disk_uuid, disk_device.backing.uuid)
self.assertEqual('edit', disk_spec.operation)
self.assertEqual(disk_device, disk_spec.device)
self.assertEqual([disk_spec], reconfig_spec.deviceChange)
reconfigure_backing.assert_called_once_with(mock.sentinel.backing,
reconfig_spec)
exp_factory_create_calls = [mock.call('ns0:VirtualDeviceConfigSpec'),
mock.call('ns0:VirtualMachineConfigSpec')]
self.assertEqual(exp_factory_create_calls,
self.session.vim.client.factory.create.call_args_list)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_extra_config_option_values')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_reconfigure_backing')
def test_update_backing_extra_config(self,
reconfigure_backing,
get_extra_config_option_values):
reconfig_spec = mock.Mock()
self.session.vim.client.factory.create.return_value = reconfig_spec
option_values = mock.sentinel.option_values
get_extra_config_option_values.return_value = option_values
backing = mock.sentinel.backing
option_key = mock.sentinel.key
option_value = mock.sentinel.value
extra_config = {option_key: option_value,
volumeops.BACKING_UUID_KEY: mock.sentinel.uuid}
self.vops.update_backing_extra_config(backing, extra_config)
get_extra_config_option_values.assert_called_once_with(
{option_key: option_value})
self.assertEqual(mock.sentinel.uuid, reconfig_spec.instanceUuid)
self.assertEqual(option_values, reconfig_spec.extraConfig)
reconfigure_backing.assert_called_once_with(backing, reconfig_spec)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_reconfigure_backing')
def test_update_backing_uuid(self, reconfigure_backing):
reconfig_spec = mock.Mock()
self.session.vim.client.factory.create.return_value = reconfig_spec
backing = mock.sentinel.backing
uuid = mock.sentinel.uuid
self.vops.update_backing_uuid(backing, uuid)
self.assertEqual(mock.sentinel.uuid, reconfig_spec.instanceUuid)
reconfigure_backing.assert_called_once_with(backing, reconfig_spec)
def test_change_backing_profile(self):
# Test change to empty profile.
reconfig_spec = mock.Mock()
empty_profile_spec = mock.sentinel.empty_profile_spec
self.session.vim.client.factory.create.side_effect = [
reconfig_spec, empty_profile_spec]
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
unique_profile_id = mock.sentinel.unique_profile_id
profile_id = mock.Mock(uniqueId=unique_profile_id)
self.vops.change_backing_profile(backing, profile_id)
self.assertEqual([empty_profile_spec], reconfig_spec.vmProfile)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
# Test change to non-empty profile.
profile_spec = mock.Mock()
self.session.vim.client.factory.create.side_effect = [
reconfig_spec, profile_spec]
self.session.invoke_api.reset_mock()
self.session.wait_for_task.reset_mock()
self.vops.change_backing_profile(backing, profile_id)
self.assertEqual([profile_spec], reconfig_spec.vmProfile)
self.assertEqual(unique_profile_id,
reconfig_spec.vmProfile[0].profileId)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
# Clear side effects.
self.session.vim.client.factory.create.side_effect = None
def test_delete_file(self):
file_mgr = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_mgr
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
# Test delete file
file_path = mock.sentinel.file_path
datacenter = mock.sentinel.datacenter
self.vops.delete_file(file_path, datacenter)
# verify calls
invoke_api.assert_called_once_with(self.session.vim,
'DeleteDatastoreFile_Task',
file_mgr,
name=file_path,
datacenter=datacenter)
self.session.wait_for_task.assert_called_once_with(task)
def test_create_datastore_folder(self):
file_manager = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_manager
invoke_api = self.session.invoke_api
ds_name = "nfs"
folder_path = "test/"
datacenter = mock.sentinel.datacenter
self.vops.create_datastore_folder(ds_name, folder_path, datacenter)
invoke_api.assert_called_once_with(self.session.vim,
'MakeDirectory',
file_manager,
name="[nfs] test/",
datacenter=datacenter)
def test_create_datastore_folder_with_existing_folder(self):
file_manager = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_manager
invoke_api = self.session.invoke_api
invoke_api.side_effect = exceptions.FileAlreadyExistsException
ds_name = "nfs"
folder_path = "test/"
datacenter = mock.sentinel.datacenter
self.vops.create_datastore_folder(ds_name, folder_path, datacenter)
invoke_api.assert_called_once_with(self.session.vim,
'MakeDirectory',
file_manager,
name="[nfs] test/",
datacenter=datacenter)
invoke_api.side_effect = None
def test_create_datastore_folder_with_invoke_api_error(self):
file_manager = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_manager
invoke_api = self.session.invoke_api
invoke_api.side_effect = exceptions.VimFaultException(
["FileFault"], "error")
ds_name = "nfs"
folder_path = "test/"
datacenter = mock.sentinel.datacenter
self.assertRaises(exceptions.VimFaultException,
self.vops.create_datastore_folder,
ds_name,
folder_path,
datacenter)
invoke_api.assert_called_once_with(self.session.vim,
'MakeDirectory',
file_manager,
name="[nfs] test/",
datacenter=datacenter)
invoke_api.side_effect = None
def test_get_path_name(self):
path = mock.Mock(spec=object)
path_name = mock.sentinel.vm_path_name
path.vmPathName = path_name
invoke_api = self.session.invoke_api
invoke_api.return_value = path
backing = mock.sentinel.backing
ret = self.vops.get_path_name(backing)
self.assertEqual(path_name, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, backing,
'config.files')
def test_get_entity_name(self):
entity_name = mock.sentinel.entity_name
invoke_api = self.session.invoke_api
invoke_api.return_value = entity_name
entity = mock.sentinel.entity
ret = self.vops.get_entity_name(entity)
self.assertEqual(entity_name, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, entity, 'name')
def test_get_vmdk_path(self):
# Setup hardware_devices for test
device = mock.Mock()
device.__class__.__name__ = 'VirtualDisk'
backing = mock.Mock()
backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo'
backing.fileName = mock.sentinel.vmdk_path
device.backing = backing
invoke_api = self.session.invoke_api
invoke_api.return_value = [device]
# Test get_vmdk_path
ret = self.vops.get_vmdk_path(backing)
self.assertEqual(mock.sentinel.vmdk_path, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, backing,
'config.hardware.device')
backing.__class__.__name__ = ' VirtualDiskSparseVer2BackingInfo'
self.assertRaises(AssertionError, self.vops.get_vmdk_path, backing)
# Test with no disk device.
invoke_api.return_value = []
self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException,
self.vops.get_vmdk_path,
backing)
def test_get_disk_size(self):
# Test with valid disk device.
device = mock.Mock()
device.__class__.__name__ = 'VirtualDisk'
disk_size_bytes = 1024
device.capacityInKB = disk_size_bytes / units.Ki
invoke_api = self.session.invoke_api
invoke_api.return_value = [device]
self.assertEqual(disk_size_bytes,
self.vops.get_disk_size(mock.sentinel.backing))
# Test with no disk device.
invoke_api.return_value = []
self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException,
self.vops.get_disk_size,
mock.sentinel.backing)
def test_create_virtual_disk(self):
task = mock.Mock()
invoke_api = self.session.invoke_api
invoke_api.return_value = task
spec = mock.Mock()
factory = self.session.vim.client.factory
factory.create.return_value = spec
disk_mgr = self.session.vim.service_content.virtualDiskManager
dc_ref = mock.Mock()
vmdk_ds_file_path = mock.Mock()
size_in_kb = 1024
adapter_type = 'ide'
disk_type = 'thick'
self.vops.create_virtual_disk(dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type, disk_type)
self.assertEqual(volumeops.VirtualDiskAdapterType.IDE,
spec.adapterType)
self.assertEqual(volumeops.VirtualDiskType.PREALLOCATED, spec.diskType)
self.assertEqual(size_in_kb, spec.capacityKb)
invoke_api.assert_called_once_with(self.session.vim,
'CreateVirtualDisk_Task',
disk_mgr,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=spec)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'create_virtual_disk')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'delete_file')
def test_create_flat_extent_virtual_disk_descriptor(self, delete_file,
create_virtual_disk):
dc_ref = mock.Mock()
path = mock.Mock()
size_in_kb = 1024
adapter_type = 'ide'
disk_type = 'thick'
self.vops.create_flat_extent_virtual_disk_descriptor(dc_ref,
path,
size_in_kb,
adapter_type,
disk_type)
create_virtual_disk.assert_called_once_with(
dc_ref, path.get_descriptor_ds_file_path(), size_in_kb,
adapter_type, disk_type)
delete_file.assert_called_once_with(
path.get_flat_extent_ds_file_path(), dc_ref)
def test_copy_vmdk_file(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
src_dc_ref = mock.sentinel.src_dc_ref
src_vmdk_file_path = mock.sentinel.src_vmdk_file_path
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path
self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path, dest_dc_ref)
invoke_api.assert_called_once_with(self.session.vim,
'CopyVirtualDisk_Task',
disk_mgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
self.session.wait_for_task.assert_called_once_with(task)
def test_copy_vmdk_file_with_default_dest_datacenter(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
src_dc_ref = mock.sentinel.src_dc_ref
src_vmdk_file_path = mock.sentinel.src_vmdk_file_path
dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path
self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path)
invoke_api.assert_called_once_with(self.session.vim,
'CopyVirtualDisk_Task',
disk_mgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=src_dc_ref,
force=True)
self.session.wait_for_task.assert_called_once_with(task)
def test_move_vmdk_file(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
src_dc_ref = mock.sentinel.src_dc_ref
src_vmdk_file_path = mock.sentinel.src_vmdk_file_path
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path
self.vops.move_vmdk_file(src_dc_ref,
src_vmdk_file_path,
dest_vmdk_file_path,
dest_dc_ref=dest_dc_ref)
invoke_api.assert_called_once_with(self.session.vim,
'MoveVirtualDisk_Task',
disk_mgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
self.session.wait_for_task.assert_called_once_with(task)
def test_delete_vmdk_file(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
dc_ref = self.session.dc_ref
vmdk_file_path = self.session.vmdk_file
self.vops.delete_vmdk_file(vmdk_file_path, dc_ref)
invoke_api.assert_called_once_with(self.session.vim,
'DeleteVirtualDisk_Task',
disk_mgr,
name=vmdk_file_path,
datacenter=dc_ref)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('oslo_vmware.pbm.get_profiles_by_ids')
@mock.patch('oslo_vmware.pbm.get_profiles')
def test_get_profile(self, get_profiles, get_profiles_by_ids):
profile_ids = [mock.sentinel.profile_id]
get_profiles.return_value = profile_ids
profile_name = mock.sentinel.profile_name
profile = mock.Mock()
profile.name = profile_name
get_profiles_by_ids.return_value = [profile]
backing = mock.sentinel.backing
self.assertEqual(profile_name, self.vops.get_profile(backing))
get_profiles.assert_called_once_with(self.session, backing)
get_profiles_by_ids.assert_called_once_with(self.session, profile_ids)
@mock.patch('oslo_vmware.pbm.get_profiles_by_ids')
@mock.patch('oslo_vmware.pbm.get_profiles')
def test_get_profile_with_no_profile(self, get_profiles,
get_profiles_by_ids):
get_profiles.return_value = []
backing = mock.sentinel.backing
self.assertIsNone(self.vops.get_profile(backing))
get_profiles.assert_called_once_with(self.session, backing)
self.assertFalse(get_profiles_by_ids.called)
def test_extend_virtual_disk(self):
"""Test volumeops.extend_virtual_disk."""
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
fake_size = 5
fake_size_in_kb = fake_size * units.Mi
fake_name = 'fake_volume_0000000001'
fake_dc = mock.sentinel.datacenter
self.vops.extend_virtual_disk(fake_size,
fake_name, fake_dc)
invoke_api.assert_called_once_with(self.session.vim,
"ExtendVirtualDisk_Task",
disk_mgr,
name=fake_name,
datacenter=fake_dc,
newCapacityKb=fake_size_in_kb,
eagerZero=False)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_all_clusters')
def test_get_cluster_refs(self, get_all_clusters):
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
clusters = {"cls_1": cls_1, "cls_2": cls_2}
get_all_clusters.return_value = clusters
self.assertEqual({"cls_2": cls_2},
self.vops.get_cluster_refs(["cls_2"]))
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_all_clusters')
def test_get_cluster_refs_with_invalid_cluster(self, get_all_clusters):
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
clusters = {"cls_1": cls_1, "cls_2": cls_2}
get_all_clusters.return_value = clusters
self.assertRaises(vmdk_exceptions.ClusterNotFoundException,
self.vops.get_cluster_refs,
["cls_1", "cls_3"])
def test_get_cluster_hosts(self):
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
hosts = mock.Mock(ManagedObjectReference=[host_1, host_2])
self.session.invoke_api.return_value = hosts
cluster = mock.sentinel.cluster
ret = self.vops.get_cluster_hosts(cluster)
self.assertEqual([host_1, host_2], ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
cluster,
'host')
def test_get_cluster_hosts_with_no_host(self):
self.session.invoke_api.return_value = None
cluster = mock.sentinel.cluster
ret = self.vops.get_cluster_hosts(cluster)
self.assertEqual([], ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
cluster,
'host')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'continue_retrieval', return_value=None)
def test_get_all_clusters(self, continue_retrieval):
prop_1 = mock.Mock(val='test_cluster_1')
cls_1 = mock.Mock(propSet=[prop_1], obj=mock.sentinel.mor_1)
prop_2 = mock.Mock(val='/test_cluster_2')
cls_2 = mock.Mock(propSet=[prop_2], obj=mock.sentinel.mor_2)
retrieve_result = mock.Mock(objects=[cls_1, cls_2])
self.session.invoke_api.return_value = retrieve_result
ret = self.vops._get_all_clusters()
exp = {'test_cluster_1': mock.sentinel.mor_1,
'/test_cluster_2': mock.sentinel.mor_2}
self.assertEqual(exp, ret)
self.session.invoke_api.assert_called_once_with(
vim_util, 'get_objects', self.session.vim,
'ClusterComputeResource', self.MAX_OBJECTS)
continue_retrieval.assert_called_once_with(retrieve_result)
def test_get_entity_by_inventory_path(self):
self.session.invoke_api.return_value = mock.sentinel.ref
path = mock.sentinel.path
ret = self.vops.get_entity_by_inventory_path(path)
self.assertEqual(mock.sentinel.ref, ret)
self.session.invoke_api.assert_called_once_with(
self.session.vim,
"FindByInventoryPath",
self.session.vim.service_content.searchIndex,
inventoryPath=path)
def test_get_disk_devices(self):
disk_device = mock.Mock()
disk_device.__class__.__name__ = 'VirtualDisk'
controller_device = mock.Mock()
controller_device.__class__.__name__ = 'VirtualLSILogicController'
devices = mock.Mock()
devices.__class__.__name__ = "ArrayOfVirtualDevice"
devices.VirtualDevice = [disk_device, controller_device]
self.session.invoke_api.return_value = devices
vm = mock.sentinel.vm
self.assertEqual([disk_device], self.vops._get_disk_devices(vm))
self.session.invoke_api.assert_called_once_with(
vim_util, 'get_object_property', self.session.vim,
vm, 'config.hardware.device')
def _create_disk_device(self, file_name):
backing = mock.Mock(fileName=file_name)
backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo'
return mock.Mock(backing=backing)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_devices')
def test_get_disk_device(self, get_disk_devices):
dev_1 = self._create_disk_device('[ds1] foo/foo.vmdk')
dev_2 = self._create_disk_device('[ds1] foo/foo_1.vmdk')
get_disk_devices.return_value = [dev_1, dev_2]
vm = mock.sentinel.vm
self.assertEqual(dev_2,
self.vops.get_disk_device(vm, '[ds1] foo/foo_1.vmdk'))
get_disk_devices.assert_called_once_with(vm)
class VirtualDiskPathTest(test.TestCase):
"""Unit tests for VirtualDiskPath."""
def setUp(self):
super(VirtualDiskPathTest, self).setUp()
self._path = volumeops.VirtualDiskPath("nfs", "A/B/", "disk")
def test_get_datastore_file_path(self):
self.assertEqual("[nfs] A/B/disk.vmdk",
self._path.get_datastore_file_path("nfs",
"A/B/disk.vmdk"))
def test_get_descriptor_file_path(self):
self.assertEqual("A/B/disk.vmdk",
self._path.get_descriptor_file_path())
def test_get_descriptor_ds_file_path(self):
self.assertEqual("[nfs] A/B/disk.vmdk",
self._path.get_descriptor_ds_file_path())
class FlatExtentVirtualDiskPathTest(test.TestCase):
"""Unit tests for FlatExtentVirtualDiskPath."""
def setUp(self):
super(FlatExtentVirtualDiskPathTest, self).setUp()
self._path = volumeops.FlatExtentVirtualDiskPath("nfs", "A/B/", "disk")
def test_get_flat_extent_file_path(self):
self.assertEqual("A/B/disk-flat.vmdk",
self._path.get_flat_extent_file_path())
def test_get_flat_extent_ds_file_path(self):
self.assertEqual("[nfs] A/B/disk-flat.vmdk",
self._path.get_flat_extent_ds_file_path())
class VirtualDiskTypeTest(test.TestCase):
"""Unit tests for VirtualDiskType."""
def test_is_valid(self):
self.assertTrue(volumeops.VirtualDiskType.is_valid("thick"))
self.assertTrue(volumeops.VirtualDiskType.is_valid("thin"))
self.assertTrue(volumeops.VirtualDiskType.is_valid("eagerZeroedThick"))
self.assertFalse(volumeops.VirtualDiskType.is_valid("preallocated"))
def test_validate(self):
volumeops.VirtualDiskType.validate("thick")
volumeops.VirtualDiskType.validate("thin")
volumeops.VirtualDiskType.validate("eagerZeroedThick")
self.assertRaises(vmdk_exceptions.InvalidDiskTypeException,
volumeops.VirtualDiskType.validate,
"preallocated")
def test_get_virtual_disk_type(self):
self.assertEqual("preallocated",
volumeops.VirtualDiskType.get_virtual_disk_type(
"thick"))
self.assertEqual("thin",
volumeops.VirtualDiskType.get_virtual_disk_type(
"thin"))
self.assertEqual("eagerZeroedThick",
volumeops.VirtualDiskType.get_virtual_disk_type(
"eagerZeroedThick"))
self.assertRaises(vmdk_exceptions.InvalidDiskTypeException,
volumeops.VirtualDiskType.get_virtual_disk_type,
"preallocated")
class VirtualDiskAdapterTypeTest(test.TestCase):
"""Unit tests for VirtualDiskAdapterType."""
def test_is_valid(self):
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("lsiLogic"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("busLogic"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid(
"lsiLogicsas"))
self.assertTrue(
volumeops.VirtualDiskAdapterType.is_valid("paraVirtual"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("ide"))
self.assertFalse(volumeops.VirtualDiskAdapterType.is_valid("pvscsi"))
def test_validate(self):
volumeops.VirtualDiskAdapterType.validate("lsiLogic")
volumeops.VirtualDiskAdapterType.validate("busLogic")
volumeops.VirtualDiskAdapterType.validate("lsiLogicsas")
volumeops.VirtualDiskAdapterType.validate("paraVirtual")
volumeops.VirtualDiskAdapterType.validate("ide")
self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException,
volumeops.VirtualDiskAdapterType.validate,
"pvscsi")
def test_get_adapter_type(self):
self.assertEqual("lsiLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"lsiLogic"))
self.assertEqual("busLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"busLogic"))
self.assertEqual("lsiLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"lsiLogicsas"))
self.assertEqual("lsiLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"paraVirtual"))
self.assertEqual("ide",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"ide"))
self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException,
volumeops.VirtualDiskAdapterType.get_adapter_type,
"pvscsi")
class ControllerTypeTest(test.TestCase):
"""Unit tests for ControllerType."""
def test_get_controller_type(self):
self.assertEqual(volumeops.ControllerType.LSI_LOGIC,
volumeops.ControllerType.get_controller_type(
'lsiLogic'))
self.assertEqual(volumeops.ControllerType.BUS_LOGIC,
volumeops.ControllerType.get_controller_type(
'busLogic'))
self.assertEqual(volumeops.ControllerType.LSI_LOGIC_SAS,
volumeops.ControllerType.get_controller_type(
'lsiLogicsas'))
self.assertEqual(volumeops.ControllerType.PARA_VIRTUAL,
volumeops.ControllerType.get_controller_type(
'paraVirtual'))
self.assertEqual(volumeops.ControllerType.IDE,
volumeops.ControllerType.get_controller_type(
'ide'))
self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException,
volumeops.ControllerType.get_controller_type,
'invalid_type')
def test_is_scsi_controller(self):
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.LSI_LOGIC))
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.BUS_LOGIC))
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.LSI_LOGIC_SAS))
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.PARA_VIRTUAL))
self.assertFalse(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.IDE))
|
{
"content_hash": "f961dc2e464550e3433f386c2754f11f",
"timestamp": "",
"source": "github",
"line_count": 1830,
"max_line_length": 79,
"avg_line_length": 44.94808743169399,
"alnum_prop": 0.5667011123943834,
"repo_name": "ge0rgi/cinder",
"id": "f5dedfcdb5612ddf22c89aae840f7c5124b8f1c3",
"size": "82887",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/tests/unit/volume/drivers/vmware/test_vmware_volumeops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
}
|
"""
The :mod:`sklearn.manifold` module implements data embedding techniques.
"""
from .locally_linear import locally_linear_embedding, LocallyLinearEmbedding
from .isomap import Isomap
from .mds import MDS
|
{
"content_hash": "ffae3a8a486dd210adf5ec81ff64d820",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 76,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.7971014492753623,
"repo_name": "sgenoud/scikit-learn",
"id": "7ae011739adf46649ec2d487f5e3c7e70a4a9347",
"size": "207",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/manifold/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7396960"
},
{
"name": "C++",
"bytes": "408753"
},
{
"name": "JavaScript",
"bytes": "4736"
},
{
"name": "Objective-C",
"bytes": "4595"
},
{
"name": "Python",
"bytes": "3013862"
},
{
"name": "Shell",
"bytes": "687"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="xpad", parent_name="surface.colorbar", **kwargs):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
{
"content_hash": "ed9225f9f2820e5b0bcde2c66b101132",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 36.25,
"alnum_prop": 0.6022988505747127,
"repo_name": "plotly/plotly.py",
"id": "234b9cb0a70d67857ee78541e8c9bffdbf15132d",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/surface/colorbar/_xpad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import pg
import random
SPEED = 250
COUNT = 10000
FIELD_SIZE = 2000
FIELD_DEPTH = 2500
class Window(pg.Window):
def setup(self):
data = []
shape = pg.Plane((0, 0, 0), (0, 0, 1), 0.5, False)
for _ in xrange(COUNT):
x = (random.random() - 0.5) * FIELD_SIZE
y = (random.random() - 0.5) * FIELD_SIZE
z = random.random() * FIELD_DEPTH
mesh = pg.Matrix().translate((x, y, z)) * shape
data.extend(mesh.positions)
self.context = pg.Context(pg.SolidColorProgram())
self.context.position = pg.VertexBuffer(data)
def draw(self):
self.clear()
for m in xrange(-1, 2):
z = m * FIELD_DEPTH + (-self.t * SPEED) % FIELD_DEPTH
matrix = pg.Matrix().translate((0, 0, -z))
matrix = matrix.perspective(65, self.aspect, 1, 1000)
self.context.matrix = matrix
self.context.camera_position = (0, 0, z)
self.context.draw()
if __name__ == "__main__":
pg.run(Window)
|
{
"content_hash": "af8e3675c8f4b211655d515b39965a5c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 32.53125,
"alnum_prop": 0.5369836695485111,
"repo_name": "fogleman/pg",
"id": "7bfdaf75412933f65581e0d43e103612f497ec7d",
"size": "1041",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/starfield.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214724"
}
],
"symlink_target": ""
}
|
import os
import os.path
import fs
from .setup import *
def test_listdirs():
dirs = [ os.path.join(TEST_DIR,f) for f in os.listdir(TEST_DIR)
if os.path.isdir(os.path.join(TEST_DIR,f)) ]
fs_dirs = list( fs.listdirs(TEST_DIR) )
assert len(dirs) is len(fs_dirs)
assert sorted(dirs) == sorted(fs_dirs)
|
{
"content_hash": "cc79e2936bbb8e7a2926539d6155d455",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 21.866666666666667,
"alnum_prop": 0.6463414634146342,
"repo_name": "chaosmail/python-fs",
"id": "df7664eb5e1ccdd7a0f62d8e46a009f5481711eb",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fs/tests/test_listdirs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26382"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python
import os, stat, string, sys, time
from xml.sax import make_parser, handler
class AccessorDefHandler ( handler.ContentHandler ):
indexCount = { "field" : 1, \
"repetition" : 2, \
"component" : 2, \
"subcomponent" : 3 }
# Constructor
def __init__( self ):
self.segmentId_ = ""
self.headerFileName_ = ""
self.headerFile_ = None
self.sourceFileName_ = ""
self.sourceFile_ = None
self.comment_ = ""
self.tabLength_ = 4
def startDocument( self ):
print "Creating accessors for segments:\n"
# Handler for the "startElement" SAX event.
def startElement( self, elementType, attrs ):
elementType = string.lower( elementType )
if elementType == "segment":
self.segmentId_ = string.lower( attrs["id"] )
self.comment_ = attrs["comment"]
self.headerPath_ = "include/hl7parser/"
self.headerFileName_ = "seg_" + self.segmentId_ + ".h"
self.createHeader()
self.sourcePath_ = "src/"
self.sourceFileName_ = "seg_" + self.segmentId_ + ".c"
self.createSource()
print string.upper( self.segmentId_ ) + ": " + self.headerFileName_ + " / " + self.sourceFileName_
elif elementType == "field" \
or elementType == "repetition" \
or elementType == "component" \
or elementType == "subcomponent":
name = ""
index = ""
dataType = "string"
length = ""
for attrName in attrs.keys():
attrValue = attrs[attrName]
if attrName == "name":
name = attrValue
elif attrName == "index":
# 'index' is a sequence of numbers separated by dots ('.').
indexList = string.split( attrValue, "." )
elif attrName == "data_type":
dataType = attrValue
elif attrName == "length":
length = attrValue
else:
print "WARNING: Unknown attribute " + attrName + " in " + elementType + " " + name + "."
elementLength = int( length )
self.writeAccessorDecl( elementType, name, indexList, dataType, elementLength )
self.writeAccessorDef( elementType, name, indexList, dataType, elementLength )
# Handler for the "endElement" SAX event.
def endElement( self, elementType ):
elementType = string.lower( elementType )
if elementType == "segment":
self.closeHeader( );
self.closeSource();
# Creates the header file for the accessors of the HL7 segment.
def createHeader( self ):
self.headerFile_ = os.open( self.headerPath_ + self.headerFileName_, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, \
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP )
self.writeHeaderGuardBegin()
self.writeFileTitle( self.headerFile_, self.headerFileName_ )
self.writeHeaderBegin()
# Creates the header file for the accessors of the HL7 segment.
def closeHeader( self ):
self.writeHeaderEnd()
self.writeHeaderGuardEnd()
os.close( self.headerFile_ )
self.headerFile_ = None
# Creates the header file for the accessors of the HL7 segment.
def createSource( self ):
self.sourceFile_ = os.open( self.sourcePath_ + self.sourceFileName_, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, \
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP )
self.writeFileTitle( self.sourceFile_, self.sourceFileName_ )
self.writeSourceBegin()
# Closes the source file for the accessors of the HL7 segment.
def closeSource( self ):
self.writeSourceEnd()
os.close( self.sourceFile_ )
self.sourceFile_ = None
def writeHeaderGuardBegin( self ):
headerId = "HL7PARSER_SEG_" + string.upper( self.segmentId_ ) + "_H"
os.write( self.headerFile_, \
"#ifndef " + headerId + "\n" \
"#define " + headerId + "\n\n" )
def writeHeaderGuardEnd( self ):
headerId = "HL7_SEG_" + string.upper( self.segmentId_ ) + "_H"
os.write( self.headerFile_, \
"\n" \
"#endif /* " + headerId + " */\n" )
def writeFileTitle( self, file, fileName ):
today = time.localtime( time.time() )
os.write( file, "/**\n" \
"* \\file " + fileName + "\n" \
"*\n" \
"* " + self.comment_ + " segment (" + string.upper( self.segmentId_ ) + ") accessors for the HL7 parser.\n" \
"*\n" \
"* \\internal\n" \
"* Copyright (c) " + str( today.tm_year ) + " Juan Jose Comellas <juanjo@comellas.org>\n" \
"*\n" \
"* \\warning DO NOT MODIFY THIS FILE.\n" \
"*\n"
"* Autogenerated by the " + sys.argv[0] + " script on " + time.ctime( time.time() ) + "\n" \
"*/\n\n" )
def writeHeaderBegin( self ):
os.write( self.headerFile_,
"/* ------------------------------------------------------------------------\n" \
" Headers\n" \
" ------------------------------------------------------------------------ */\n\n" \
"#include <hl7parser/config.h>\n" \
"#include <hl7parser/element.h>\n" \
"#include <hl7parser/export.h>\n" \
"#include <hl7parser/segment.h>\n\n" \
"BEGIN_C_DECL()\n\n" \
"/* ------------------------------------------------------------------------\n" \
" Function prototypes\n" \
" ------------------------------------------------------------------------ */\n\n" )
def writeHeaderEnd( self ):
os.write( self.headerFile_, \
"\n" \
"END_C_DECL()\n" )
def writeSourceBegin( self):
os.write( self.sourceFile_,
"/* ------------------------------------------------------------------------\n"
" Headers\n"
" ------------------------------------------------------------------------ */\n\n"
"#include <hl7parser/config.h>\n"
"#include <hl7parser/element.h>\n"
"#include <hl7parser/export.h>\n"
"#include <hl7parser/segment.h>\n"
"#include <hl7parser/" + self.headerFileName_ + ">\n\n"
"BEGIN_C_DECL()\n\n" )
def writeSourceEnd( self ):
os.write( self.sourceFile_, \
"END_C_DECL()\n" )
# Writes the accessor declaration to the header file.
def writeAccessorDecl( self, elementType, name, indexList, dataType, length ):
if dataType == "integer":
elementGetType = "int "
elementSetType = "int "
typedSetterSuffix = "_int"
typedSetterDesc = "an integer"
paramName = "value"
elif dataType == "date":
elementGetType = "time_t "
elementSetType = "time_t "
typedSetterSuffix = "_time"
typedSetterDesc = "a time_t"
paramName = "value"
else:
elementGetType = "HL7_Element *"
elementSetType = "char *"
typedSetterSuffix = "_str"
typedSetterDesc = "a string"
paramName = "element"
elementCoord = " (" + string.upper( self.segmentId_ )
for i in indexList:
elementCoord += "."
elementCoord += str( int( i ) + 1 )
elementCoord += ") "
accessorDecl = "/**\n" \
"* Get the " + name + elementCoord + elementType + ".\n" \
"*/\n" \
"HL7_EXPORT " + elementGetType + "hl7_" + self.segmentId_ + "_" + name + "( HL7_Segment *segment );\n"
accessorDecl += "/**\n" \
"* Set the " + name + elementCoord + elementType + ".\n" \
"*/\n" + \
"HL7_EXPORT int hl7_" + self.segmentId_ + "_set_" + name + "( HL7_Segment *segment, HL7_Element *element );\n"
accessorDecl += "/**\n" \
"* Set the " + name + elementCoord + elementType + " as " + typedSetterDesc + ".\n" \
"*/\n" + \
"HL7_EXPORT int hl7_" + self.segmentId_ + "_set_" + name + typedSetterSuffix + "( HL7_Segment *segment, const " + elementSetType + "value );\n"
os.write( self.headerFile_, accessorDecl )
# Writes a standard accessor definition to the source file.
def writeAccessorDef( self, elementType, name, indexList, dataType, length ):
if dataType == "string":
elementGetType = "HL7_Element *"
elementGetPrefix = ""
elementGetSuffix = ""
elementSetType = "char *"
elementSetPrefix = "hl7_element_copy_str( "
elementSetSuffix = ", segment->allocator )"
typedSetterSuffix = "_str"
elif dataType == "integer":
elementGetType = "int "
elementGetPrefix = "hl7_element_int( "
elementGetSuffix = " )"
elementSetType = "int "
elementSetPrefix = "hl7_element_set_int( "
elementSetSuffix = ", segment->allocator )"
typedSetterSuffix = "_int"
elif dataType == "date":
elementGetType = "time_t "
elementGetPrefix = "hl7_element_date( "
elementGetSuffix = " )"
elementSetType = "time_t "
elementSetPrefix = "hl7_element_set_date( "
# Include time?
elementSetSuffix = ", "
if length >= 12:
elementSetSuffix += "true"
else:
elementSetSuffix += "false"
# Include seconds?
elementSetSuffix += ", "
if length >= 14:
elementSetSuffix += "true"
else:
elementSetSuffix += "false"
elementSetSuffix += ", segment->allocator )"
typedSetterSuffix = "_time"
else:
print "ERROR: Unknown data type \"" + dataType + "\" in segment " + string.upper( self.segmentId_ ) + ", " + elementType + " " + name + "."
sys.exit( 1 )
segmentAccessorSuffix = self.accessorSuffix( elementType, name, indexList, dataType, length )
indexStr = ""
for i in indexList:
indexStr += ", " + i
indent = self.tab( 1 )
accessorDef = "/* ------------------------------------------------------------------------ */\n" \
"HL7_EXPORT " + elementGetType + "hl7_" + self.segmentId_ + "_" + name + "( HL7_Segment *segment )\n" \
+ "{\n" \
+ indent + "return " + elementGetPrefix + "hl7_segment_" + elementType + segmentAccessorSuffix + \
"( segment" + indexStr + " )" + elementGetSuffix + ";\n}\n\n"
accessorDef += "/* ------------------------------------------------------------------------ */\n" \
"HL7_EXPORT int hl7_" + self.segmentId_ + "_set_" + name + "( HL7_Segment *segment, HL7_Element *element )\n" \
+ "{\n" \
+ indent + "return hl7_segment_set_" + elementType + segmentAccessorSuffix \
+ "( segment" + indexStr + ", element );\n}\n\n"
accessorDef += "/* ------------------------------------------------------------------------ */\n" \
"HL7_EXPORT int hl7_" + self.segmentId_ + "_set_" + name + typedSetterSuffix + "( HL7_Segment *segment, const " + elementSetType + "value )\n" \
+ "{\n" \
+ indent + "int rc;\n" \
+ indent + "HL7_Element element;\n\n" \
+ indent + "rc = " + elementSetPrefix + "&element, value" + elementSetSuffix + ";\n\n" \
+ indent + "return ( rc == 0 ? hl7_segment_set_" + elementType + segmentAccessorSuffix \
+ "( segment" + indexStr + ", &element ) : rc );\n}\n\n"
os.write( self.sourceFile_, accessorDef )
def accessorSuffix( self, elementType, name, indexList, dataType, length ):
suffix = ""
if self.indexCount.has_key( elementType ):
correctIndexCount = self.indexCount[elementType]
if correctIndexCount != len( indexList ):
if correctIndexCount + 1 == len( indexList ):
suffix = "_rep"
else:
print "WARNING: segment " + string.upper( self.segmentId_ ) + ", " + elementType + " " + name + " has " \
+ str( len( indexList ) ) + " elements and should have " + str( correctIndexCount )
return suffix
def tab( self, level ):
return ' ' * ( self.tabLength_ * level )
def usage():
print "\nUsage: python " + sys.argv[0] + " <file name>\n" \
"\n" \
"Generates the source files for the HL7 segment accessor functions\n" \
"described in the XML file passed as argument.\n\n"
sys.exit( 1 )
try:
xmlFile = sys.argv[1]
except:
usage()
parser = make_parser()
parser.setContentHandler( AccessorDefHandler() )
try:
parser.parse( xmlFile )
except IOError, e:
print "\nERROR: " + xmlFile + ": " + str( e )
except saxlib.SAXException, e:
print "\nERROR: " + str( e )
|
{
"content_hash": "700e5d6d5cae7471b3c5c70565fa3ef8",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 169,
"avg_line_length": 40.27450980392157,
"alnum_prop": 0.46028654889414383,
"repo_name": "jcomellas/hl7parser",
"id": "efc251518c585c87e7ee04943e32e14284380983",
"size": "14378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hl7segdef.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "438292"
},
{
"name": "C++",
"bytes": "700"
},
{
"name": "IDL",
"bytes": "7379"
},
{
"name": "Python",
"bytes": "14378"
}
],
"symlink_target": ""
}
|
from bson import ObjectId
from django.views.decorators.csrf import csrf_exempt
import pymongo
from PenBlog.func import *
__author__ = 'quanix'
def show_all(request):
db = connect_mongodb_database(request)
info = db.infos.find_one()
categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))
for category in categories:
category['Id'] = str(category['_id'])
return render_admin_and_back(request, 'show-categories.html', {
'page':u'分类',
'categories':categories,
'selection':'categories',
})
@csrf_exempt
def new(request):
db = connect_mongodb_database(request)
# 普通访问
if request.method == 'GET':
return render_admin_and_back(request, 'edit-category.html', {
'page':u'新分类',
})
elif request.method == 'POST':
d = request.POST
order = int(d['category-order']) if d['category-order'] else 0
update = {
'Title':d['category-title'],
'Description':d['category-description'],
'Order': order,
}
# 插入新的Category
db.categories.insert(update)
# 对链接重新排序
categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))
for i in xrange(0, len(categories)):
if categories[i]['Order'] != i:
db.categories.update(categories[i], {"$set":{'Order': i}})
return redirect(request, '新建分类成功', 'admin/show-categories/')
def update_category_of_articles(coll, old_cat, new_cat):
"""
更新文章集合的分类
"""
old = old_cat['Title']
new = new_cat['Title']
if old != new:
for a in coll.find({'Categories': old}):
array = a['Categories']
array[array.index(old)] = new
coll.update({'Id': a['Id']}, {'$set': {'Categories': array}})
@csrf_exempt
def edit(request, objectId):
db = connect_mongodb_database(request)
id = ObjectId(objectId)
# 普通访问
if request.method == 'GET':
category = db.categories.find_one({'_id':id})
return render_admin_and_back(request, 'edit-category.html', {
'page':u'编辑分类',
'category': category,
})
elif request.method == 'POST':
d = request.POST
order = int(d['category-order']) if d['category-order'] else 0
update = {
'Title':d['category-title'],
'Description':d['category-description'],
'Order':order,
}
# 取得所有Category
categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))
# 创建或取得编辑中的Category
category = filter(lambda i: i['_id'] == id, categories)[0]
db.categories.update(category, {'$set': update})
categories.remove(category)
categories.insert(order, category)
# 对所有链接重新排序
for i in xrange(0, len(categories)):
if categories[i]['Order'] != i:
db.categories.update(categories[i], {"$set":{'Order': i}})
# 更新所有文章的分类
update_category_of_articles(db.articles, category, update)
update_category_of_articles(db.hidden_articles, category, update)
return redirect(request, '编辑分类成功', 'admin/show-categories/')
@csrf_exempt
def delete(request, objectId):
db = connect_mongodb_database(request)
id = ObjectId(objectId)
if request.method == 'GET':
db.categories.remove({'_id': id})
# 取得所有Category
categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))
# 对所有链接重新排序
for i in xrange(0, len(categories)):
if categories[i]['Order'] != i:
db.categories.update(categories[i], {"$set":{'Order': i}})
return redirect(request, '删除分类成功', 'admin/show-categories/')
|
{
"content_hash": "dd743a12c7d498c7326f7306a76b0ffb",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 82,
"avg_line_length": 29.061068702290076,
"alnum_prop": 0.5763068032571579,
"repo_name": "quanix/PenBlog",
"id": "67568d8adb1d99412e8008baa4be853ea8c3a7e7",
"size": "4050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PenBlog/admin/category.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "375983"
},
{
"name": "HTML",
"bytes": "157973"
},
{
"name": "JavaScript",
"bytes": "886645"
},
{
"name": "Python",
"bytes": "46915"
},
{
"name": "Shell",
"bytes": "142"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import operator
import sympy as sym
import z3
import utils as U
from utils import print
def sympy2z3(sympy_exprs):
"""
convert a sympy expression to a z3 expression. This returns
(z3_vars, z3_expression)
Parameters
----------
sympy_exprs : iterable of expressions
Returns
-------
Notes
------
"""
assert(isinstance(sympy_exprs, collections.Iterable))
z3_exprs = []
sym2Z3_varmap = {}
for expr in sympy_exprs:
assert(isinstance(expr, sym.Expr))
#print(expr)
sympy_vars = expr.free_symbols
for v in sympy_vars:
U.dict_unique_add(sym2Z3_varmap, v, z3.Real(str(v)))
#sym2Z3_varmap = {v: z3.Real(str(v)) for v in sympy_vars}
# sym2Z3_varmap = {v: z3.Real('x{}'.format(idx))
# for idx, v in enumerate(sympy_vars)}
t = Sympy2z3(sym2Z3_varmap)
z3_expr = t.visit(expr)
z3_exprs.append(z3_expr)
return sym2Z3_varmap, z3_exprs
class Sympy2z3(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def __init__(self, sym2Z3_varmap):
#assert(isinstance(sympy_vars, collections.Iterable))
#self.sym2Z3_varmap = {v: z3.Real('x{}'.format(idx))
# for idx, v in enumerate(sympy_vars)}
self.sym2Z3_varmap = sym2Z3_varmap
return
def visit(self, node):
"""Visit a node."""
class_str = str(node.__class__).strip("<>'")
class_name = class_str[class_str.rfind('.')+1:]
method = 'visit_' + class_name
#print(method)
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
#print(node)
raise RuntimeError
# for field, value in iter_fields(node):
# if isinstance(value, list):
# for item in value:
# if isinstance(item, AST):
# self.visit(item)
# elif isinstance(value, AST):
# self.visit(value)
def visit_Symbol(self, node):
assert(isinstance(node, sym.Symbol))
return self.sym2Z3_varmap[node]
# Does not get triggered, instead integer, float, etc gets called
def visit_Number(self, node):
assert(isinstance(node, sym.Number))
return float(node)
def visit_Integer(self, node):
assert(isinstance(node, sym.Integer))
return float(node)
def visit_Float(self, node):
assert(isinstance(node, sym.Float))
return float(node)
def visit_NegativeOne(self, node):
return float(-1)
def visit_Zero(self, node):
return float(0)
def visit_Mul(self, node):
assert(isinstance(node, sym.Mul))
visited_terms = (self.visit(terms) for terms in node.args)
return reduce(operator.mul, visited_terms)
def visit_Add(self, node):
assert(isinstance(node, sym.Add))
visited_terms = (self.visit(terms) for terms in node.args)
return reduce(operator.add, visited_terms)
def visit_Pow(self, node):
assert(isinstance(node, sym.Pow))
return self.visit(node.args[0]) ** self.visit(node.args[1])
def visit_LessThan(self, node):
assert(isinstance(node, sym.LessThan))
return self.visit(node.args[0]) <= self.visit(node.args[1])
|
{
"content_hash": "ebef8455e2613e215f6cfb6f93d879ae",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 77,
"avg_line_length": 30.55944055944056,
"alnum_prop": 0.6160183066361556,
"repo_name": "zutshi/S3CAMR",
"id": "834d18cce3443c726ba6dec57f4805be57fa58ec",
"size": "4370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sympy2z3/sympy2z3.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2970"
},
{
"name": "MATLAB",
"bytes": "14618"
},
{
"name": "Makefile",
"bytes": "232"
},
{
"name": "Python",
"bytes": "528716"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
from ftw import ruleset, errors
import pytest
def test_output():
with pytest.raises(errors.TestError) as excinfo:
ruleset.Output({})
assert(excinfo.value.args[0].startswith('Need at least'))
with pytest.raises(errors.TestError) as excinfo:
ruleset.Output({'status': 'derp'})
assert(excinfo.value.args[0].startswith('Need at least'))
with pytest.raises(TypeError) as excinfo:
ruleset.Output({'log_contains': 10})
ruleset.Output({'status': 200})
ruleset.Output({'status': [100, 200]})
with pytest.raises(errors.TestError) as excinfo:
ruleset.Output({'status': [100, 'derp']})
assert(excinfo.value.args[0].startswith('Non integers found'))
def test_input():
input_1 = ruleset.Input()
assert(input_1.uri == '/')
headers = {'Host': 'domain.com', 'User-Agent': 'Zack'}
dictionary = {}
dictionary['headers'] = headers
input_2 = ruleset.Input(**dictionary)
assert(len(input_2.headers.keys()) == 2)
dictionary_2 = {'random_key': 'bar'}
with pytest.raises(TypeError):
ruleset.Input(**dictionary_2)
def test_testobj():
with pytest.raises(KeyError) as excinfo:
ruleset.Test({}, {})
assert 'test_title' in str(excinfo.value)
stages_dict = {'test_title': 1, 'stages': [{'stage':
{'output': {'log_contains': 'foo'}, 'input': {}}}]}
ruleset.Test(stages_dict, {})
def test_ruleset():
with pytest.raises(KeyError):
ruleset.Ruleset({})
|
{
"content_hash": "ef0481010a9701fb78e629a75ecf6a43",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 70,
"avg_line_length": 33.15555555555556,
"alnum_prop": 0.6260053619302949,
"repo_name": "CRS-support/ftw",
"id": "d2d503585eb81a02a04b816cf87d09d20d9b0069",
"size": "1492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/test_ruleset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "73544"
}
],
"symlink_target": ""
}
|
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SubtitleV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'value': 'str'
}
attribute_map = {
'value': 'value'
}
def __init__(self, value=None): # noqa: E501
"""SubtitleV30Rc1 - a model defined in Swagger""" # noqa: E501
self._value = None
self.discriminator = None
if value is not None:
self.value = value
@property
def value(self):
"""Gets the value of this SubtitleV30Rc1. # noqa: E501
:return: The value of this SubtitleV30Rc1. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this SubtitleV30Rc1.
:param value: The value of this SubtitleV30Rc1. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SubtitleV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubtitleV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "5609470a3dde7a48a86d8fa529e262bf",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 119,
"avg_line_length": 27.871559633027523,
"alnum_prop": 0.5375246872942725,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "f8a7e349d85f87d57b9f03a6d090abb9d8e9f680",
"size": "3055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/subtitle_v30_rc1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
}
|
"""
Flask application for serving poolvr.
"""
import os
import logging
import json
import shutil
import subprocess
from copy import deepcopy
from flask import Flask, request, Markup
from jinja2 import Environment, FileSystemLoader
_logger = logging.getLogger(__name__)
_here = os.path.dirname(os.path.abspath(__file__))
PACKAGE = json.loads(open('package.json').read())
STATIC_FOLDER = _here
TEMPLATE_FOLDER = STATIC_FOLDER
DIST_OUTPUT_DIR = os.path.join(_here, 'dist')
GIT_REVS = []
try:
completed_proc = subprocess.run(['git', 'rev-list', '--max-count=4', 'HEAD'], stdout=subprocess.PIPE, check=True, universal_newlines=True)
for line in completed_proc.stdout.splitlines():
GIT_REVS.append(line)
except Exception as err:
_logger.warn('could not obtain git info:\n%s' % err)
app = Flask(__name__,
static_folder=STATIC_FOLDER,
static_url_path='',
template_folder=TEMPLATE_FOLDER)
env = Environment(loader=FileSystemLoader(TEMPLATE_FOLDER))
template = env.get_template('poolvr_template.html')
WebVRConfig = {
"FORCE_ENABLE_VR": False,
"K_FILTER": 0.98,
"PREDICTION_TIME_S": 0.010,
"TOUCH_PANNER_DISABLED": False,
"YAW_ONLY": False,
"MOUSE_KEYBOARD_CONTROLS_DISABLED": False,
"KEYBOARD_CONTROLS_DISABLED": True
}
INCH2METER = 0.0254
POOLVR = {
'version': PACKAGE['version'],
'config': {
'gravity' : 9.81,
'useBasicMaterials' : True,
'useShadowMap' : False,
'useSpotLight' : True,
'usePointLight' : False,
'useTextGeomLogger' : True,
'L_table' : 2.3368,
'H_table' : 0.77,
'ball_diameter' : 2.25 * INCH2METER,
'soundVolume' : 0.0,
'toolOptions': {
'tipShape' : 'Cylinder',
'numSegments' : 8,
'toolRadius' : 0.009, #0.01325 / 2,
'tipRadius' : 0.009, #0.01325 / 2,
'toolLength' : 0.37,
'tipLength' : 0.37,
'toolMass' : 0.54,
'offset' : [0, 0, 0.37 / 2],
'interactionPlaneOpacity': 0.22,
'useImplicitCylinder' : True
}
}
}
def get_webvr_config():
"""
Constructs WebVRConfig dict based on request url parameters.
"""
config = deepcopy(WebVRConfig)
args = dict({k: v for k, v in request.args.items()
if k in config})
for k, v in args.items():
if v == 'false':
args[k] = False
elif v == 'true':
args[k] = True
elif not (v is False or v is True or v is None):
try:
args[k] = float(v)
except Exception as err:
_logger.warning(err)
config.update(args)
return config
def get_poolvr_config():
"""
Constructs poolvr config dict based on request url parameters.
"""
config = deepcopy(POOLVR['config'])
args = dict({k: v for k, v in request.args.items()
if k in config})
for k, v in args.items():
if v == 'false':
args[k] = False
elif v == 'true':
args[k] = True
elif not (v is None):
try:
args[k] = float(v)
except Exception as err:
_logger.warning(err)
config.update(args)
return {'config': config,
'version': POOLVR['version']}
def render_poolvr_template(webvr_config=None, poolvr_config=None):
import pool_table
if webvr_config is None:
webvr_config = WebVRConfig
if poolvr_config is None:
poolvr_config = POOLVR
return template.render(config={'DEBUG': app.debug},
json_config=Markup(r"""<script>
var WebVRConfig = %s;
var POOLVR = %s;
var THREEPY_SCENE = %s;
</script>""" % (json.dumps(webvr_config, indent=2),
json.dumps(poolvr_config, indent=2),
json.dumps(pool_table.pool_hall(**poolvr_config['config']).export()))),
version=poolvr_config['version'],
version_content=Markup(r"""
<table>
<tr>
<td>
<a href="https://github.com/jzitelli/poolvr/commit/{0}">current commit ({3})</a>
</td>
</tr>
<tr>
<td>
<a href="https://github.com/jzitelli/poolvr/commit/{1}">previous commit ({4})</a>
</td>
</tr>
</table>
""".format(GIT_REVS[0], GIT_REVS[1], poolvr_config['version'], '%s...' % GIT_REVS[0][:6], '%s...' % GIT_REVS[1][:6])) if GIT_REVS else None)
@app.route('/')
def poolvr():
"""
Serves the poolvr app HTML.
"""
webvr_config = get_webvr_config()
poolvr_config = get_poolvr_config()
return render_poolvr_template(webvr_config=webvr_config, poolvr_config=poolvr_config)
def main():
werkzeug_logger = logging.getLogger('werkzeug')
werkzeug_logger.setLevel(logging.WARNING)
# werkzeug_logger.disabled = True
_logger.info("""
***********
p o o l v r
*************************
{0}
*******************************
STARTING FLASK APP!!!!!!!!!!!!!
*******************************
{0}
*************************
p o o l v r
***********
""".format(POOLVR['version']))
PORT = 5000
# _logger.debug("app.config =\n%s" % '\n'.join([' %s: %s' % (k, str(v))
# for k, v in sorted(app.config.items(),
# key=lambda i: i[0])]))
_logger.info("""
GO TO:
http://127.0.0.1:%d
""" % PORT)
app.run(host='0.0.0.0', port=PORT)
def make_dist():
_logger.info('building distributable version, output directory: "%s"...', DIST_OUTPUT_DIR)
shutil.rmtree(DIST_OUTPUT_DIR, ignore_errors=True)
shutil.copytree('build', os.path.join(DIST_OUTPUT_DIR, 'build'))
html_path = os.path.join(DIST_OUTPUT_DIR, 'poolvr.html')
with open(html_path, 'w') as f:
f.write(render_poolvr_template())
# copy resources:
shutil.copy('poolvr.css', DIST_OUTPUT_DIR)
shutil.copy('favicon.ico', DIST_OUTPUT_DIR)
shutil.copytree('fonts', os.path.join(DIST_OUTPUT_DIR, 'fonts'))
shutil.copytree('images', os.path.join(DIST_OUTPUT_DIR, 'images'))
shutil.copytree('sounds', os.path.join(DIST_OUTPUT_DIR, 'sounds'))
# copy npm dependencies:
shutil.copytree(os.path.join('node_modules', 'cannon', 'build'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'cannon', 'build'))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'leapjs'))
shutil.copy(os.path.join('node_modules', 'leapjs', 'leap-0.6.4.min.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'leapjs'))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'build'))
shutil.copy(os.path.join('node_modules', 'three', 'build', 'three.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'build'))
shutil.copy(os.path.join('node_modules', 'three', 'build', 'three.min.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'build'))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'js', 'controls'))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'js', 'effects'))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'js', 'objects'))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'js', 'loaders'))
shutil.copy(os.path.join('node_modules', 'three', 'examples', 'js', 'controls', 'VRControls.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'js', 'controls'))
shutil.copy(os.path.join('node_modules', 'three', 'examples', 'js', 'effects', 'VREffect.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'js', 'effects'))
shutil.copy(os.path.join('node_modules', 'three', 'examples', 'js', 'objects', 'ShadowMesh.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'js', 'objects'))
shutil.copy(os.path.join('node_modules', 'three', 'examples', 'js', 'loaders', 'OBJLoader.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'js', 'loaders'))
shutil.copytree(os.path.join('node_modules', 'three', 'examples', 'models'), os.path.join(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'models')))
shutil.copytree(os.path.join('node_modules', 'three', 'examples', 'textures'), os.path.join(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three', 'examples', 'textures')))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three.py'))
shutil.copytree(os.path.join('node_modules', 'three.py', 'js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'three.py', 'js'))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'stats.js', 'build'))
shutil.copy(os.path.join('node_modules', 'stats.js', 'build', 'stats.min.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'stats.js', 'build'))
os.makedirs(os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'webvr-polyfill', 'build'))
shutil.copy(os.path.join('node_modules', 'webvr-polyfill', 'build', 'webvr-polyfill.js'), os.path.join(DIST_OUTPUT_DIR, 'node_modules', 'webvr-polyfill', 'build'))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--release', help='release (non-debug) mode', action='store_true')
parser.add_argument('-v', '--verbose', help='enable verbose logging to stdout', action='store_true')
parser.add_argument('--dist', help='build distributable version', action='store_true')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=(logging.DEBUG if app.debug else logging.INFO),
format="%(asctime)s %(levelname)s %(name)s %(funcName)s %(lineno)d: %(message)s")
app.debug = True
if args.release:
app.debug = False
if args.dist:
make_dist()
else:
main()
|
{
"content_hash": "5350d45fd60f5ad825f94854788a0578",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 187,
"avg_line_length": 38.64905660377359,
"alnum_prop": 0.5727396992774849,
"repo_name": "jzitelli/poolvr",
"id": "4e86a8680a494cc65fc6ba872875c23332f12dc7",
"size": "10242",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "poolvr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2572"
},
{
"name": "HTML",
"bytes": "3526"
},
{
"name": "JavaScript",
"bytes": "108948"
},
{
"name": "Python",
"bytes": "21496"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.