text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import time
import uuid
import sublime_plugin
TOUCH_EVENT_TIME = None
TOUCH_EVENT_HANDLERS = {}
TOUCH_EVENT_HANDLERS_ASYNC = {}
def add_event_handler(view, region, handler=None, handler_id=None, HANDLERS=TOUCH_EVENT_HANDLERS):
if handler_id is None:
handler_id = uuid.uuid4()
HANDLERS.setdefault(view.id(), {})[handler_id] = [region, handler]
return handler_id
def add_event_handler_async(view, region, handler, handler_id=None):
return add_event_handler(view, region, handler, handler_id, TOUCH_EVENT_HANDLERS_ASYNC)
def add_event_handlers(view, regions, handlers, handler_ids=None, HANDLERS=TOUCH_EVENT_HANDLERS):
handler_ids = [] if handler_ids is None else handler_ids
for i in range(regions):
if len(handler_ids) == i:
handler_ids.append(None)
handler_ids[i] = add_event_handler(view, regions[i], handlers[i], handler_ids[i], HANDLERS)
return handler_ids
def add_event_handlers_async(view, regions, handlers, handler_ids=None):
return add_event_handlers(view, regions, handlers, handler_ids, TOUCH_EVENT_HANDLERS_ASYNC)
def remove_event_handler(view, handler_id, HANDLERS=TOUCH_EVENT_HANDLERS):
if view.id() in HANDLERS and handler_id in HANDLERS[view.id()]:
del HANDLERS[view.id()][handler_id]
return handler_id
def remove_event_handler_async(view, handler_id):
return remove_event_handler(view, handler_id, TOUCH_EVENT_HANDLERS_ASYNC)
def remove_event_handlers(view, HANDLERS=TOUCH_EVENT_HANDLERS):
if view.id() in HANDLERS:
handler_ids = HANDLERS[view.id()].keys()
del HANDLERS[view.id()]
return handler_ids
def remove_event_handlers_async(view):
return remove_event_handlers(view, TOUCH_EVENT_HANDLERS_ASYNC)
def event_handler(view, HANDLERS=TOUCH_EVENT_HANDLERS):
global TOUCH_EVENT_TIME
if not view.id() in HANDLERS:
return None
regions = view.sel()
event_time = time.time()
if len(regions) == 1 and regions[0].empty() and (
TOUCH_EVENT_TIME is None or
TOUCH_EVENT_TIME < event_time - 0.2
):
point = regions[0].begin()
TOUCH_EVENT_TIME = event_time
view_handler_items = list(HANDLERS[view.id()].items())
for handler_id, region_handler in view_handler_items:
region, handler = region_handler
if region.contains(point):
handler(handler_id, view, region, point)
class LiveEventListener(sublime_plugin.EventListener):
def on_selection_modified(self, view):
event_handler(view)
def on_selection_modified_async(self, view):
event_handler(view, TOUCH_EVENT_HANDLERS_ASYNC)
def on_close(self, view):
remove_event_handlers(view)
remove_event_handlers_async(view)
|
{
"content_hash": "7e82d7bb4a541fbaf3f75bf7f99b0331",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 99,
"avg_line_length": 31.602272727272727,
"alnum_prop": 0.678173318950018,
"repo_name": "sligodave/sublime_games",
"id": "ba13f2c4ab6acd888f3b1c434161840bc00c27ac",
"size": "2782",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "touch/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25348"
}
],
"symlink_target": ""
}
|
import vcr
class MockSerializer(object):
def __init__(self):
self.serialize_count = 0
self.deserialize_count = 0
self.load_args = None
def deserialize(self, cassette_string):
self.serialize_count += 1
self.cassette_string = cassette_string
return {'interactions': []}
def serialize(self, cassette_dict):
self.deserialize_count += 1
return ""
def test_registered_serializer(tmpdir):
ms = MockSerializer()
my_vcr = vcr.VCR()
my_vcr.register_serializer('mock', ms)
tmpdir.join('test.mock').write('test_data')
with my_vcr.use_cassette(str(tmpdir.join('test.mock')), serializer='mock'):
# Serializer deserialized once
assert ms.serialize_count == 1
# and serialized the test data string
assert ms.cassette_string == 'test_data'
# and hasn't serialized yet
assert ms.deserialize_count == 0
assert ms.serialize_count == 1
|
{
"content_hash": "399d52d488bd249050533fb05487d788",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 29.393939393939394,
"alnum_prop": 0.6288659793814433,
"repo_name": "poussik/vcrpy",
"id": "734f301ee2c56237ce7d3cad4d72316f1ca9b9a5",
"size": "970",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/integration/test_register_serializer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "230968"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
}
|
import sys, struct
from test import support
from test.support import import_fresh_module
import types
import unittest
cET = import_fresh_module('xml.etree.ElementTree',
fresh=['_elementtree'])
cET_alias = import_fresh_module('xml.etree.cElementTree',
fresh=['_elementtree', 'xml.etree'])
@unittest.skipUnless(cET, 'requires _elementtree')
class MiscTests(unittest.TestCase):
# Issue #8651.
@support.bigmemtest(size=support._2G + 100, memuse=1, dry_run=False)
def test_length_overflow(self, size):
data = b'x' * size
parser = cET.XMLParser()
try:
self.assertRaises(OverflowError, parser.feed, data)
finally:
data = None
def test_del_attribute(self):
element = cET.Element('tag')
element.tag = 'TAG'
with self.assertRaises(AttributeError):
del element.tag
self.assertEqual(element.tag, 'TAG')
with self.assertRaises(AttributeError):
del element.text
self.assertIsNone(element.text)
element.text = 'TEXT'
with self.assertRaises(AttributeError):
del element.text
self.assertEqual(element.text, 'TEXT')
with self.assertRaises(AttributeError):
del element.tail
self.assertIsNone(element.tail)
element.tail = 'TAIL'
with self.assertRaises(AttributeError):
del element.tail
self.assertEqual(element.tail, 'TAIL')
with self.assertRaises(AttributeError):
del element.attrib
self.assertEqual(element.attrib, {})
element.attrib = {'A': 'B', 'C': 'D'}
with self.assertRaises(AttributeError):
del element.attrib
self.assertEqual(element.attrib, {'A': 'B', 'C': 'D'})
def test_trashcan(self):
# If this test fails, it will most likely die via segfault.
e = root = cET.Element('root')
for i in range(200000):
e = cET.SubElement(e, 'x')
del e
del root
support.gc_collect()
@unittest.skipUnless(cET, 'requires _elementtree')
class TestAliasWorking(unittest.TestCase):
# Test that the cET alias module is alive
def test_alias_working(self):
e = cET_alias.Element('foo')
self.assertEqual(e.tag, 'foo')
@unittest.skipUnless(cET, 'requires _elementtree')
@support.cpython_only
class TestAcceleratorImported(unittest.TestCase):
# Test that the C accelerator was imported, as expected
def test_correct_import_cET(self):
# SubElement is a function so it retains _elementtree as its module.
self.assertEqual(cET.SubElement.__module__, '_elementtree')
def test_correct_import_cET_alias(self):
self.assertEqual(cET_alias.SubElement.__module__, '_elementtree')
def test_parser_comes_from_C(self):
# The type of methods defined in Python code is types.FunctionType,
# while the type of methods defined inside _elementtree is
# <class 'wrapper_descriptor'>
self.assertNotIsInstance(cET.Element.__init__, types.FunctionType)
@unittest.skipUnless(cET, 'requires _elementtree')
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.elementsize = support.calcobjsize('5P')
# extra
self.extra = struct.calcsize('PnnP4P')
check_sizeof = support.check_sizeof
def test_element(self):
e = cET.Element('a')
self.check_sizeof(e, self.elementsize)
def test_element_with_attrib(self):
e = cET.Element('a', href='about:')
self.check_sizeof(e, self.elementsize + self.extra)
def test_element_with_children(self):
e = cET.Element('a')
for i in range(5):
cET.SubElement(e, 'span')
# should have space for 8 children now
self.check_sizeof(e, self.elementsize + self.extra +
struct.calcsize('8P'))
def test_main():
from test import test_xml_etree, test_xml_etree_c
# Run the tests specific to the C implementation
support.run_unittest(
MiscTests,
TestAliasWorking,
TestAcceleratorImported,
SizeofTest,
)
# Run the same test suite as the Python module
test_xml_etree.test_main(module=cET)
if __name__ == '__main__':
test_main()
|
{
"content_hash": "62c76a6db23859e50aa8d0c54e0e4582",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 76,
"avg_line_length": 32.36296296296296,
"alnum_prop": 0.6280613412680247,
"repo_name": "batermj/algorithm-challenger",
"id": "bfced1225e0faeccde684ea1f85725b6e5ad9506",
"size": "4403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_xml_etree_c.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ConstructionConfig(AppConfig):
name = 'construction'
|
{
"content_hash": "ef4d5f91b2e363ae94195099081c0980",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 20,
"alnum_prop": 0.7714285714285715,
"repo_name": "j-windsor/thebeau",
"id": "1523c1d20ea895deb2816dd34edc063df4413e45",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "construction/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86781"
},
{
"name": "HTML",
"bytes": "15007"
},
{
"name": "JavaScript",
"bytes": "16209"
},
{
"name": "Python",
"bytes": "15005"
}
],
"symlink_target": ""
}
|
from fjord.base.tests import TestCase
from fjord.search.models import Record
from fjord.search.tests import RecordFactory
class TestRecord(TestCase):
def test_mark(self):
"""Test marking as fail/success."""
r = RecordFactory()
assert Record.objects.filter(status=Record.STATUS_NEW).count() == 1
assert Record.objects.filter(status=Record.STATUS_FAIL).count() == 0
assert Record.objects.filter(status=Record.STATUS_SUCCESS).count() == 0
r.mark_fail('Errorz!')
assert Record.objects.filter(status=Record.STATUS_NEW).count() == 0
assert Record.objects.filter(status=Record.STATUS_FAIL).count() == 1
assert Record.objects.filter(status=Record.STATUS_SUCCESS).count() == 0
r.mark_success()
assert Record.objects.filter(status=Record.STATUS_NEW).count() == 0
assert Record.objects.filter(status=Record.STATUS_FAIL).count() == 0
assert Record.objects.filter(status=Record.STATUS_SUCCESS).count() == 1
|
{
"content_hash": "644450e6679be6a241f9beb782802945",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 43.78260869565217,
"alnum_prop": 0.6852035749751738,
"repo_name": "hoosteeno/fjord",
"id": "e3749bdddd3ddc07393e0f914e8366d6253fede5",
"size": "1007",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fjord/search/tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "161316"
},
{
"name": "HTML",
"bytes": "143979"
},
{
"name": "JavaScript",
"bytes": "304890"
},
{
"name": "Python",
"bytes": "901824"
},
{
"name": "Shell",
"bytes": "11314"
},
{
"name": "Smarty",
"bytes": "691"
}
],
"symlink_target": ""
}
|
from django import forms
from usuarios.models import Usuario
from corporaciones.models import Corporacion
from votantes.models import Votante
"""
Este formulario se encuentran los datos para registrar el votante
"""
class FormularioRegistroVotante(forms.Form):
cedula_usuario = forms.IntegerField(
widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí la cedula de usuario', 'min':'1' , 'required':'true'}))
nombre_usuario = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí el nombre del usuario', 'required':'true'}))
apellido_usuario = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí el apellido del usuario', 'required':'true'}))
email = forms.EmailField(
widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí su correo electronico', 'required':'true'}))
codigo_estudiante = forms.IntegerField(
required=False, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí el códido de estudiante', 'min':'1' , 'required':'false'}))
plan_estudiante = forms.ModelChoiceField(queryset=Corporacion.objects.filter(facultad__isnull=False), required=False, empty_label=None)
def usuariovotante_existe(self):
diccionario_limpio = self.cleaned_data
cedula = diccionario_limpio.get('cedula_usuario')
votante = Votante.usuario.objects.get(username=cedula)
if not votante is None:
raise self.ValidationError("El votante ya existe")
return cedula
class FormularioEditarVotante(forms.Form):
cedula_usuario = forms.IntegerField(
widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí la cedula de usuario', 'min':'1' , 'required':'true'}))
nombre_usuario = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí el nombre del usuario', 'required':'true'}))
apellido_usuario = forms.CharField(
widget=forms.TextInput( attrs={'class': 'form-control', 'placeholder': 'Escriba aquí el apellido del usuario', 'required':'true'}))
esta_activo = forms.BooleanField(initial=True, required=False,
widget=forms.CheckboxInput(attrs={'class': 'form-checkbox form-icon'}))
email = forms.EmailField(
widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí su correo electronico', 'required':'true'}))
codigo_estudiante = forms.IntegerField(
required=False, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Escriba aquí el códido de estudiante', 'min':'1', 'required':'false'}))
plan_estudiante = forms.ModelChoiceField(queryset=Corporacion.objects.filter(facultad__isnull=False), required=False, empty_label=None)
class FormularioCargar(forms.Form):
file = forms.FileField(label='Seleccionar un archivo' , widget=forms.FileInput(attrs={'accept':".csv"}))
|
{
"content_hash": "ea76e8c93535749f6ff55d68fa01c9a1",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 169,
"avg_line_length": 48.80952380952381,
"alnum_prop": 0.7079674796747968,
"repo_name": "Jorgesolis1989/SIVORE",
"id": "25387407a8bbc6a1ba393be78e61c825f4dafc24",
"size": "3087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "votantes/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "653531"
},
{
"name": "CoffeeScript",
"bytes": "14351"
},
{
"name": "HTML",
"bytes": "4583180"
},
{
"name": "JavaScript",
"bytes": "4954274"
},
{
"name": "Makefile",
"bytes": "285"
},
{
"name": "PHP",
"bytes": "30096"
},
{
"name": "Python",
"bytes": "171284"
},
{
"name": "Shell",
"bytes": "680"
}
],
"symlink_target": ""
}
|
import urllib2
from xml.dom import minidom
import xml.etree.ElementTree as ET
from cinder.volume.drivers.violin.vxg.core.error import *
class XGRequest(object):
"""Request to XML gateway."""
def __init__(self, type="query", nodes=[], action=None, event=None,
flat=False, values_only=False):
if type not in ["query", "set", "action", "event"]:
raise TypeError("Unknown request type %s." % (type))
self.type = type
self.nodes = nodes
if values_only and not flat:
raise TypeError("values_only requires flat = True")
self.flat = flat
self.values_only = values_only
if type == "action" and action is None:
raise TypeError("Missing action name for action request.")
if type != "action" and action is not None:
raise TypeError("Action name specified for non-action request.")
self.action = action
if type == "event" and event is None:
raise TypeError("Missing event name for event request.")
if type != "event" and event is not None:
raise TypeError("Event name specified for non-event request.")
self.event = event
def __repr__(self):
return ('<XGRequest type:%s action:%s nodes:%r>'
% (self.type, self.action, self.nodes))
def to_xml(self, pretty_print=True):
"""Return an XML document describing this XGRequest.
Arguments:
pretty_print -- Get a properly formatted XML doc as opposed
to a single-line string with XML tags (bool)
Returns:
This request object as an XML string.
"""
root = ET.Element('xg-request')
req = ET.SubElement(root, '%s-request' % (self.type,))
if self.action is not None:
action = ET.SubElement(req, 'action-name')
action.text = self.action
if self.event is not None:
event = ET.SubElement(req, 'event-name')
event.text = self.event
if len(self.nodes) > 0:
nodes = ET.SubElement(req, 'nodes')
for n in self.nodes:
nodes.append(n.as_element_tree(self.type))
if pretty_print:
return self._pretty_print(root)
else:
return ET.tostring(root)
def _pretty_print(self, node):
"""Return a properly formatted XML document with newlines and
spaces.
Arguments:
node -- An instance of xml.etree.Element
Returns:
A properly formatted XML document.
"""
reparsed = minidom.parseString(ET.tostring(node))
return self._tighten_xml(reparsed.toprettyxml(' ', "\n", 'UTF-8'))
def _tighten_xml(self, xml):
"""Tighten the value and close tags to the opening tag in an
XML document.
The XML gateway will not be able to process a document that is
formatted like so:
<tag>
tagValue
</tag>
Unfortunately, this is how toprettyxml() outputs the XML. So the
purpose of this function is to turn the above into this:
<tag>tagValue</tag>
Arguments:
xml -- XML output from the toprettyxml() function
Returns:
A properly formatted XML document.
"""
newxml = []
prevLeadingSpaces = 0
leadingSpaces = 0
ascended = False
for line in xml.split('\n'):
leadingSpaces = len(line) - len(line.lstrip())
if leadingSpaces > prevLeadingSpaces:
# Increase in indent, just append
newxml.append(line)
ascended = True
elif leadingSpaces < prevLeadingSpaces:
if ascended:
# Single close tag, merge lines
value = newxml.pop().lstrip()
newxml[-1] += value + line.lstrip()
else:
# Multiple closing tags, so just append
newxml.append(line)
ascended = False
else:
# Same indent, just append
newxml.append(line)
prevLeadingSpaces = leadingSpaces
return '\n'.join(newxml)
class XGQuery(XGRequest):
"""Class for XML Gateway queries.
"""
def __init__(self, nodes=[], flat=False, values_only=False):
super(XGQuery, self).__init__('query', nodes,
flat=flat, values_only=values_only)
class XGAction(XGRequest):
"""Class for XML Gateway actions.
"""
def __init__(self, action, nodes=[], flat=False, values_only=False):
super(XGAction, self).__init__('action', nodes, action,
flat=flat, values_only=values_only)
class XGEvent(XGRequest):
"""Class for XML Gateway events.
"""
def __init__(self, *args, **kwargs):
raise Exception("Not yet implemented.")
class XGSet(XGRequest):
"""Class for XML Gateway set operations.
"""
def __init__(self, nodes=[], flat=False, values_only=False):
super(XGSet, self).__init__('set', nodes,
flat=flat, values_only=values_only)
class BasicJsonRequest(urllib2.Request):
"""A basic JSON request.
Certain JSON requests need this type of request, but for the most part,
this class exists to be subclassed.
"""
_mixins = {'X-Requested-With': 'XMLHttpRequest'}
def __init__(self, *args, **kwargs):
if len(args) > 2:
args[2].update(self._mixins)
else:
kwargs.setdefault('headers', {})
kwargs['headers'].update(self._mixins)
urllib2.Request.__init__(self, *args, **kwargs)
class RESTRequest(BasicJsonRequest):
"""A core request type for JSON sessions.
"""
_mixins = {'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json'}
class GetRequest(RESTRequest):
"""A core request type for JSON sessions.
Implements HTTP GET requests.
"""
def get_method(self):
return 'GET'
class PostRequest(RESTRequest):
"""A core request type for JSON sessions.
Implements HTTP POST requests.
"""
pass
class PutRequest(RESTRequest):
"""A core request type for JSON sessions.
Implements HTTP PUT requests.
"""
def get_method(self):
return 'PUT'
class DeleteRequest(RESTRequest):
"""A core request type for JSON sessions.
Implements HTTP DELETE requests.
"""
def get_method(self):
return 'DELETE'
|
{
"content_hash": "b686eaf9dff271b6682f57f3de5880f9",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 76,
"avg_line_length": 28.113445378151262,
"alnum_prop": 0.566432521297265,
"repo_name": "rlucio/cinder-violin-driver-icehouse",
"id": "6080bf34b385988b5b01e9ed20ba3d49a707e925",
"size": "7394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/violin/vxg/core/request.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "348064"
}
],
"symlink_target": ""
}
|
"""Defines the public namespace for SQL expression constructs.
Prior to version 0.9, this module contained all of "elements", "dml",
"default_comparator" and "selectable". The module was broken up
and most "factory" functions were moved to be grouped with their associated
class.
"""
__all__ = [
'Alias', 'Any', 'All', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
'except_', 'except_all', 'exists', 'extract', 'func', 'modifier',
'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label',
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery',
'table', 'text',
'tuple_', 'type_coerce', 'union', 'union_all', 'update', 'within_group']
from .visitors import Visitable
from .functions import func, modifier, FunctionElement, Function
from ..util.langhelpers import public_factory
from .elements import ClauseElement, ColumnElement,\
BindParameter, CollectionAggregate, UnaryExpression, BooleanClauseList, \
Label, Cast, Case, ColumnClause, TextClause, Over, Null, \
True_, False_, BinaryExpression, Tuple, TypeClause, Extract, \
Grouping, WithinGroup, not_, \
collate, literal_column, between,\
literal, outparam, type_coerce, ClauseList, FunctionFilter
from .elements import SavepointClause, RollbackToSavepointClause, \
ReleaseSavepointClause
from .base import ColumnCollection, Generative, Executable, \
PARSE_AUTOCOMMIT
from .selectable import Alias, Join, Select, Selectable, TableClause, \
CompoundSelect, CTE, FromClause, FromGrouping, SelectBase, \
alias, GenerativeSelect, \
subquery, HasPrefixes, HasSuffixes, Exists, ScalarSelect, TextAsFrom
from .dml import Insert, Update, Delete, UpdateBase, ValuesBase
# factory functions - these pull class-bound constructors and classmethods
# from SQL elements and selectables into public functions. This allows
# the functions to be available in the sqlalchemy.sql.* namespace and
# to be auto-cross-documenting from the function to the class itself.
all_ = public_factory(CollectionAggregate._create_all, ".expression.all_")
any_ = public_factory(CollectionAggregate._create_any, ".expression.any_")
and_ = public_factory(BooleanClauseList.and_, ".expression.and_")
or_ = public_factory(BooleanClauseList.or_, ".expression.or_")
bindparam = public_factory(BindParameter, ".expression.bindparam")
select = public_factory(Select, ".expression.select")
text = public_factory(TextClause._create_text, ".expression.text")
table = public_factory(TableClause, ".expression.table")
column = public_factory(ColumnClause, ".expression.column")
over = public_factory(Over, ".expression.over")
within_group = public_factory(WithinGroup, ".expression.within_group")
label = public_factory(Label, ".expression.label")
case = public_factory(Case, ".expression.case")
cast = public_factory(Cast, ".expression.cast")
extract = public_factory(Extract, ".expression.extract")
tuple_ = public_factory(Tuple, ".expression.tuple_")
except_ = public_factory(CompoundSelect._create_except, ".expression.except_")
except_all = public_factory(
CompoundSelect._create_except_all, ".expression.except_all")
intersect = public_factory(
CompoundSelect._create_intersect, ".expression.intersect")
intersect_all = public_factory(
CompoundSelect._create_intersect_all, ".expression.intersect_all")
union = public_factory(CompoundSelect._create_union, ".expression.union")
union_all = public_factory(
CompoundSelect._create_union_all, ".expression.union_all")
exists = public_factory(Exists, ".expression.exists")
nullsfirst = public_factory(
UnaryExpression._create_nullsfirst, ".expression.nullsfirst")
nullslast = public_factory(
UnaryExpression._create_nullslast, ".expression.nullslast")
asc = public_factory(UnaryExpression._create_asc, ".expression.asc")
desc = public_factory(UnaryExpression._create_desc, ".expression.desc")
distinct = public_factory(
UnaryExpression._create_distinct, ".expression.distinct")
true = public_factory(True_._instance, ".expression.true")
false = public_factory(False_._instance, ".expression.false")
null = public_factory(Null._instance, ".expression.null")
join = public_factory(Join._create_join, ".expression.join")
outerjoin = public_factory(Join._create_outerjoin, ".expression.outerjoin")
insert = public_factory(Insert, ".expression.insert")
update = public_factory(Update, ".expression.update")
delete = public_factory(Delete, ".expression.delete")
funcfilter = public_factory(
FunctionFilter, ".expression.funcfilter")
# internal functions still being called from tests and the ORM,
# these might be better off in some other namespace
from .base import _from_objects
from .elements import _literal_as_text, _clause_element_as_expr,\
_is_column, _labeled, _only_column_elements, _string_or_unprintable, \
_truncated_label, _clone, _cloned_difference, _cloned_intersection,\
_column_as_key, _literal_as_binds, _select_iterables, \
_corresponding_column_or_error, _literal_as_label_reference, \
_expression_literal_as_text
from .selectable import _interpret_as_from
# old names for compatibility
_Executable = Executable
_BindParamClause = BindParameter
_Label = Label
_SelectBase = SelectBase
_BinaryExpression = BinaryExpression
_Cast = Cast
_Null = Null
_False = False_
_True = True_
_TextClause = TextClause
_UnaryExpression = UnaryExpression
_Case = Case
_Tuple = Tuple
_Over = Over
_Generative = Generative
_TypeClause = TypeClause
_Extract = Extract
_Exists = Exists
_Grouping = Grouping
_FromGrouping = FromGrouping
_ScalarSelect = ScalarSelect
|
{
"content_hash": "e3ad3f5302a2ebb667ee35bc545ee287",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 80,
"avg_line_length": 44.21052631578947,
"alnum_prop": 0.7409863945578231,
"repo_name": "davidjb/sqlalchemy",
"id": "79d25a39ebafc6be39288d6ab1e89d9672d74769",
"size": "6118",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/sql/expression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "Python",
"bytes": "8724555"
}
],
"symlink_target": ""
}
|
'''
Control Apache Traffic Server
=============================
.. versionadded:: 2015.8.0
'''
def __virtual__():
'''
Only load if the Traffic Server module is available in __salt__
'''
return 'trafficserver' if 'trafficserver.set_var' in __salt__ else False
def bounce_cluster(name):
'''
Bounce all Traffic Server nodes in the cluster. Bouncing Traffic Server
shuts down and immediately restarts Traffic Server, node-by-node.
.. code-block:: yaml
bounce_ats_cluster:
trafficserver.bounce_cluster
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Bouncing cluster'
return ret
__salt__['trafficserver.bounce_cluster']()
ret['result'] = True
ret['comment'] = 'Bounced cluster'
return ret
def bounce_local(name, drain=False):
'''
Bounce Traffic Server on the local node. Bouncing Traffic Server shuts down
and immediately restarts the Traffic Server node.
This option modifies the behavior of traffic_line -b and traffic_line -L
such that traffic_server is not shut down until the number of active client
connections drops to the number given by the
proxy.config.restart.active_client_threshold configuration variable.
.. code-block:: yaml
bounce_ats_local:
trafficserver.bounce_local
bounce_ats_local:
trafficserver.bounce_local
- drain: True
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Bouncing local node'
return ret
if drain:
__salt__['trafficserver.bounce_local'](drain=True)
ret['result'] = True
ret['comment'] = 'Bounced local node with drain option'
return ret
else:
__salt__['trafficserver.bounce_local']()
ret['result'] = True
ret['comment'] = 'Bounced local node'
return ret
def clear_cluster(name):
'''
Clears accumulated statistics on all nodes in the cluster.
.. code-block:: yaml
clear_ats_cluster:
trafficserver.clear_cluster
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Clearing cluster statistics'
return ret
__salt__['trafficserver.clear_cluster']()
ret['result'] = True
ret['comment'] = 'Cleared cluster statistics'
return ret
def clear_node(name):
'''
Clears accumulated statistics on the local node.
.. code-block:: yaml
clear_ats_node:
trafficserver.clear_node
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Clearing local node statistics'
return ret
__salt__['trafficserver.clear_node']()
ret['result'] = True
ret['comment'] = 'Cleared local node statistics'
return ret
def restart_cluster(name):
'''
Restart the traffic_manager process and the traffic_server process on all
the nodes in a cluster.
.. code-block:: bash
restart_ats_cluster:
trafficserver.restart_cluster
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Restarting cluster'
return ret
__salt__['trafficserver.restart_cluster']()
ret['result'] = True
ret['comment'] = 'Restarted cluster'
return ret
def restart_local(name, drain=False):
'''
Restart the traffic_manager and traffic_server processes on the local node.
This option modifies the behavior of traffic_line -b and traffic_line -L
such that traffic_server is not shut down until the number of active client
connections drops to the number given by the
proxy.config.restart.active_client_threshold configuration variable.
.. code-block:: yaml
restart_ats_local:
trafficserver.restart_local
restart_ats_local_drain:
trafficserver.restart_local
- drain: True
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Restarting local node'
return ret
if drain:
__salt__['trafficserver.restart_local'](drain=True)
ret['result'] = True
ret['comment'] = 'Restarted local node with drain option'
return ret
else:
__salt__['trafficserver.restart_local']()
ret['result'] = True
ret['comment'] = 'Restarted local node'
return ret
def set_var(name, value):
'''
Set Traffic Server variable values
.. code-block:: yaml
proxy.config.proxy_name:
trafficserver.set_var:
- value: cdn.site.domain.tld
OR
traffic_server_setting:
trafficserver.set_var:
- name: proxy.config.proxy_name
- value: cdn.site.domain.tld
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Configuring {0} to {1}'.format(
name,
value,
)
return ret
__salt__['trafficserver.set_var'](name, value)
ret['result'] = True
ret['comment'] = 'Configured {0} to {1}'.format(name, value)
return ret
def shutdown(name):
'''
Shut down Traffic Server on the local node.
.. code-block:: yaml
shutdown_ats:
trafficserver.shutdown
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Shutting down local node'
return ret
__salt__['trafficserver.shutdown']()
ret['result'] = True
ret['comment'] = 'Shutdown local node'
return ret
def startup(name):
'''
Start Traffic Server on the local node.
.. code-block:: yaml
startup_ats:
trafficserver.startup
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Starting up local node'
return ret
__salt__['trafficserver.startup']()
ret['result'] = True
ret['comment'] = 'Starting up local node'
return ret
def refresh(name):
'''
Initiate a Traffic Server configuration file reread. Use this command to
update the running configuration after any configuration file modification.
The timestamp of the last reconfiguration event (in seconds since epoch) is
published in the proxy.node.config.reconfigure_time metric.
.. code-block:: yaml
refresh_ats:
trafficserver.refresh
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Refreshing local node configuration'
return ret
__salt__['trafficserver.refresh']()
ret['result'] = True
ret['comment'] = 'Refreshed local node configuration'
return ret
def zero_cluster(name):
'''
Reset performance statistics to zero across the cluster.
.. code-block:: yaml
zero_ats_cluster:
trafficserver.zero_cluster
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Zeroing cluster statistics'
return ret
__salt__['trafficserver.zero_cluster']()
ret['result'] = True
ret['comment'] = 'Zeroed cluster statistics'
return ret
def zero_node(name):
'''
Reset performance statistics to zero on the local node.
.. code-block:: yaml
zero_ats_node:
trafficserver.zero_node
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Zeroing local node statistics'
return ret
__salt__['trafficserver.zero_node']()
ret['result'] = True
ret['comment'] = 'Zeroed local node statistics'
return ret
def offline(name, path):
'''
Mark a cache storage device as offline. The storage is identified by a path
which must match exactly a path specified in storage.config. This removes
the storage from the cache and redirects requests that would have used this
storage to other storage. This has exactly the same effect as a disk
failure for that storage. This does not persist across restarts of the
traffic_server process.
.. code-block:: yaml
offline_ats_path:
trafficserver.offline:
- path: /path/to/cache
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Setting {0} to offline'.format(path)
return ret
__salt__['trafficserver.offline'](path)
ret['result'] = True
ret['comment'] = 'Set {0} as offline'.format(path)
return ret
|
{
"content_hash": "ecbe86d601def0a71b1785e86b98d556",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 79,
"avg_line_length": 23.9515306122449,
"alnum_prop": 0.5659814676749387,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "9588dda0038ae8a53fb3c559ecd76a8c9d6e05de",
"size": "9413",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/salt/states/trafficserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
from google.maps.routing import gapic_version as package_version
__version__ = package_version.__version__
from google.maps.routing_v2.services.routes.async_client import RoutesAsyncClient
from google.maps.routing_v2.services.routes.client import RoutesClient
from google.maps.routing_v2.types.fallback_info import (
FallbackInfo,
FallbackReason,
FallbackRoutingMode,
)
from google.maps.routing_v2.types.location import Location
from google.maps.routing_v2.types.maneuver import Maneuver
from google.maps.routing_v2.types.navigation_instruction import NavigationInstruction
from google.maps.routing_v2.types.polyline import (
Polyline,
PolylineEncoding,
PolylineQuality,
)
from google.maps.routing_v2.types.route import (
Route,
RouteLeg,
RouteLegStep,
RouteLegStepTravelAdvisory,
RouteLegTravelAdvisory,
RouteTravelAdvisory,
)
from google.maps.routing_v2.types.route_label import RouteLabel
from google.maps.routing_v2.types.route_modifiers import RouteModifiers
from google.maps.routing_v2.types.route_travel_mode import RouteTravelMode
from google.maps.routing_v2.types.routes_service import (
ComputeRouteMatrixRequest,
ComputeRoutesRequest,
ComputeRoutesResponse,
RouteMatrixDestination,
RouteMatrixElement,
RouteMatrixElementCondition,
RouteMatrixOrigin,
)
from google.maps.routing_v2.types.routing_preference import RoutingPreference
from google.maps.routing_v2.types.speed_reading_interval import SpeedReadingInterval
from google.maps.routing_v2.types.toll_info import TollInfo
from google.maps.routing_v2.types.toll_passes import TollPass
from google.maps.routing_v2.types.units import Units
from google.maps.routing_v2.types.vehicle_emission_type import VehicleEmissionType
from google.maps.routing_v2.types.vehicle_info import VehicleInfo
from google.maps.routing_v2.types.waypoint import Waypoint
__all__ = (
"RoutesClient",
"RoutesAsyncClient",
"FallbackInfo",
"FallbackReason",
"FallbackRoutingMode",
"Location",
"Maneuver",
"NavigationInstruction",
"Polyline",
"PolylineEncoding",
"PolylineQuality",
"Route",
"RouteLeg",
"RouteLegStep",
"RouteLegStepTravelAdvisory",
"RouteLegTravelAdvisory",
"RouteTravelAdvisory",
"RouteLabel",
"RouteModifiers",
"RouteTravelMode",
"ComputeRouteMatrixRequest",
"ComputeRoutesRequest",
"ComputeRoutesResponse",
"RouteMatrixDestination",
"RouteMatrixElement",
"RouteMatrixOrigin",
"RouteMatrixElementCondition",
"RoutingPreference",
"SpeedReadingInterval",
"TollInfo",
"TollPass",
"Units",
"VehicleEmissionType",
"VehicleInfo",
"Waypoint",
)
|
{
"content_hash": "fbfdf901b866d207ed35fe547dc84070",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 85,
"avg_line_length": 31.511627906976745,
"alnum_prop": 0.7627306273062731,
"repo_name": "googleapis/google-cloud-python",
"id": "3525247e0abdcf1fe285887904a69ec06d1c2400",
"size": "3310",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "packages/google-maps-routing/google/maps/routing/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2895"
},
{
"name": "Python",
"bytes": "5620713"
},
{
"name": "Shell",
"bytes": "51704"
}
],
"symlink_target": ""
}
|
"""Plugin for TV4 Play, swedish TV channel TV4's streaming service."""
import re
from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HDSStream, RTMPStream
ASSET_URL = "http://prima.tv4play.se/api/web/asset/{0}/play"
SWF_URL = "http://www.tv4play.se/flash/tv4video.swf"
_url_re = re.compile(r"""
http(s)?://(www\.)?
(?:
tv4play.se/program/[^\?/]+|
fotbollskanalen.se/video
)
.+(video_id|videoid)=(?P<video_id>\d+)
""", re.VERBOSE)
_asset_schema = validate.Schema(
validate.xml_findall("items/item"),
[
validate.all(
validate.xml_findall("*"),
validate.map(lambda e: (e.tag, e.text)),
validate.transform(dict),
{
"base": validate.text,
"bitrate": validate.all(
validate.text, validate.transform(int)
),
"url": validate.text
}
)
]
)
class TV4Play(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
video_id = match.group("video_id")
res = http.get(ASSET_URL.format(video_id))
assets = http.xml(res, schema=_asset_schema)
streams = {}
for asset in assets:
base = asset["base"]
url = asset["url"]
if urlparse(url).path.endswith(".f4m"):
streams.update(
HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL)
)
elif base.startswith("rtmp"):
name = "{0}k".format(asset["bitrate"])
params = {
"rtmp": asset["base"],
"playpath": url,
"live": True
}
streams[name] = RTMPStream(self.session, params)
return streams
__plugin__ = TV4Play
|
{
"content_hash": "a94fd930ca47d20d9c29039b9ebd5a3d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 27.835616438356166,
"alnum_prop": 0.5285433070866141,
"repo_name": "mmetak/streamlink",
"id": "c6bed2f5eda9602141799c161df0304c5f44cc34",
"size": "2032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/tv4play.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "932019"
},
{
"name": "Shell",
"bytes": "16668"
}
],
"symlink_target": ""
}
|
"""Class to run the redmapper randoms
"""
|
{
"content_hash": "3c2ff468e04b7829274b6c46232f2c63",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 37,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.6744186046511628,
"repo_name": "erykoff/redmapper",
"id": "f81aa4843052037e47fc87f07ee9aee0bda582e8",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "redmapper/run_randoms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35922"
},
{
"name": "Dockerfile",
"bytes": "1872"
},
{
"name": "Python",
"bytes": "971787"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Probe(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, _exec=None, failure_threshold=None, http_get=None, initial_delay_seconds=None, period_seconds=None, success_threshold=None, tcp_socket=None, timeout_seconds=None):
"""
V1Probe - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'_exec': 'V1ExecAction',
'failure_threshold': 'int',
'http_get': 'V1HTTPGetAction',
'initial_delay_seconds': 'int',
'period_seconds': 'int',
'success_threshold': 'int',
'tcp_socket': 'V1TCPSocketAction',
'timeout_seconds': 'int'
}
self.attribute_map = {
'_exec': 'exec',
'failure_threshold': 'failureThreshold',
'http_get': 'httpGet',
'initial_delay_seconds': 'initialDelaySeconds',
'period_seconds': 'periodSeconds',
'success_threshold': 'successThreshold',
'tcp_socket': 'tcpSocket',
'timeout_seconds': 'timeoutSeconds'
}
self.__exec = _exec
self._failure_threshold = failure_threshold
self._http_get = http_get
self._initial_delay_seconds = initial_delay_seconds
self._period_seconds = period_seconds
self._success_threshold = success_threshold
self._tcp_socket = tcp_socket
self._timeout_seconds = timeout_seconds
@property
def _exec(self):
"""
Gets the _exec of this V1Probe.
One and only one of the following should be specified. Exec specifies the action to take.
:return: The _exec of this V1Probe.
:rtype: V1ExecAction
"""
return self.__exec
@_exec.setter
def _exec(self, _exec):
"""
Sets the _exec of this V1Probe.
One and only one of the following should be specified. Exec specifies the action to take.
:param _exec: The _exec of this V1Probe.
:type: V1ExecAction
"""
self.__exec = _exec
@property
def failure_threshold(self):
"""
Gets the failure_threshold of this V1Probe.
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:return: The failure_threshold of this V1Probe.
:rtype: int
"""
return self._failure_threshold
@failure_threshold.setter
def failure_threshold(self, failure_threshold):
"""
Sets the failure_threshold of this V1Probe.
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param failure_threshold: The failure_threshold of this V1Probe.
:type: int
"""
self._failure_threshold = failure_threshold
@property
def http_get(self):
"""
Gets the http_get of this V1Probe.
HTTPGet specifies the http request to perform.
:return: The http_get of this V1Probe.
:rtype: V1HTTPGetAction
"""
return self._http_get
@http_get.setter
def http_get(self, http_get):
"""
Sets the http_get of this V1Probe.
HTTPGet specifies the http request to perform.
:param http_get: The http_get of this V1Probe.
:type: V1HTTPGetAction
"""
self._http_get = http_get
@property
def initial_delay_seconds(self):
"""
Gets the initial_delay_seconds of this V1Probe.
Number of seconds after the container has started before liveness probes are initiated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
:return: The initial_delay_seconds of this V1Probe.
:rtype: int
"""
return self._initial_delay_seconds
@initial_delay_seconds.setter
def initial_delay_seconds(self, initial_delay_seconds):
"""
Sets the initial_delay_seconds of this V1Probe.
Number of seconds after the container has started before liveness probes are initiated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
:param initial_delay_seconds: The initial_delay_seconds of this V1Probe.
:type: int
"""
self._initial_delay_seconds = initial_delay_seconds
@property
def period_seconds(self):
"""
Gets the period_seconds of this V1Probe.
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:return: The period_seconds of this V1Probe.
:rtype: int
"""
return self._period_seconds
@period_seconds.setter
def period_seconds(self, period_seconds):
"""
Sets the period_seconds of this V1Probe.
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param period_seconds: The period_seconds of this V1Probe.
:type: int
"""
self._period_seconds = period_seconds
@property
def success_threshold(self):
"""
Gets the success_threshold of this V1Probe.
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
:return: The success_threshold of this V1Probe.
:rtype: int
"""
return self._success_threshold
@success_threshold.setter
def success_threshold(self, success_threshold):
"""
Sets the success_threshold of this V1Probe.
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
:param success_threshold: The success_threshold of this V1Probe.
:type: int
"""
self._success_threshold = success_threshold
@property
def tcp_socket(self):
"""
Gets the tcp_socket of this V1Probe.
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
:return: The tcp_socket of this V1Probe.
:rtype: V1TCPSocketAction
"""
return self._tcp_socket
@tcp_socket.setter
def tcp_socket(self, tcp_socket):
"""
Sets the tcp_socket of this V1Probe.
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
:param tcp_socket: The tcp_socket of this V1Probe.
:type: V1TCPSocketAction
"""
self._tcp_socket = tcp_socket
@property
def timeout_seconds(self):
"""
Gets the timeout_seconds of this V1Probe.
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
:return: The timeout_seconds of this V1Probe.
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""
Sets the timeout_seconds of this V1Probe.
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
:param timeout_seconds: The timeout_seconds of this V1Probe.
:type: int
"""
self._timeout_seconds = timeout_seconds
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "2986ec58fedf1e7953b2bba803a3a5c9",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 186,
"avg_line_length": 32.40816326530612,
"alnum_prop": 0.5990764063811923,
"repo_name": "skuda/client-python",
"id": "74a4748eba9c3fa946305d5506ccb14b1079a3a7",
"size": "9545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_probe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
}
|
import sys
import asyncio
import random
from telepot import glance
import telepot.aio
import telepot.aio.helper
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
from telepot.aio.delegate import (
per_chat_id, per_callback_query_origin, create_open, pave_event_space)
"""
$ python3.5 quiza.py <token>
Send a chat message to the bot. It will give you a math quiz. Stay silent for
10 seconds to end the quiz.
It handles callback query by their origins. All callback query originated from
the same chat message will be handled by the same `CallbackQueryOriginHandler`.
"""
class QuizStarter(telepot.aio.helper.ChatHandler):
def __init__(self, *args, **kwargs):
super(QuizStarter, self).__init__(*args, **kwargs)
async def on_chat_message(self, msg):
content_type, chat_type, chat_id = glance(msg)
await self.sender.sendMessage(
'Press START to do some math ...',
reply_markup=InlineKeyboardMarkup(
inline_keyboard=[[
InlineKeyboardButton(text='START', callback_data='start'),
]]
)
)
self.close() # let Quizzer take over
class Quizzer(telepot.aio.helper.CallbackQueryOriginHandler):
def __init__(self, *args, **kwargs):
super(Quizzer, self).__init__(*args, **kwargs)
self._score = {True: 0, False: 0}
self._answer = None
async def _show_next_question(self):
x = random.randint(1,50)
y = random.randint(1,50)
sign, op = random.choice([('+', lambda a,b: a+b),
('-', lambda a,b: a-b),
('x', lambda a,b: a*b)])
answer = op(x,y)
question = '%d %s %d = ?' % (x, sign, y)
choices = sorted(list(map(random.randint, [-49]*4, [2500]*4)) + [answer])
await self.editor.editMessageText(question,
reply_markup=InlineKeyboardMarkup(
inline_keyboard=[
list(map(lambda c: InlineKeyboardButton(text=str(c), callback_data=str(c)), choices))
]
)
)
return answer
async def on_callback_query(self, msg):
query_id, from_id, query_data = glance(msg, flavor='callback_query')
if query_data != 'start':
self._score[self._answer == int(query_data)] += 1
self._answer = await self._show_next_question()
async def on__idle(self, event):
text = '%d out of %d' % (self._score[True], self._score[True]+self._score[False])
await self.editor.editMessageText(text, reply_markup=None)
self.close()
TOKEN = sys.argv[1]
bot = telepot.aio.DelegatorBot(TOKEN, [
pave_event_space()(
per_chat_id(), create_open, QuizStarter, timeout=3),
pave_event_space()(
per_callback_query_origin(), create_open, Quizzer, timeout=10),
])
loop = asyncio.get_event_loop()
loop.create_task(bot.message_loop())
print('Listening ...')
loop.run_forever()
|
{
"content_hash": "4dfc275c29a32cfffa9684bf30dadf94",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 105,
"avg_line_length": 33.831460674157306,
"alnum_prop": 0.603786117568914,
"repo_name": "mpunkenhofer/irc-telegram-bot",
"id": "0c55492efba333aa55c23e3e91a33b1f938bf475",
"size": "3011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telepot/examples/callback/quiza.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8068"
},
{
"name": "Python",
"bytes": "364472"
}
],
"symlink_target": ""
}
|
from .base import Config
from django.utils.functional import SimpleLazyObject
__version__ = '1.0'
try:
from django.apps import AppConfig # noqa
except ImportError:
config = SimpleLazyObject(Config)
else:
default_app_config = 'constance.apps.ConstanceConfig'
|
{
"content_hash": "77b62c631eeac45f80c71192b064eb26",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.7481751824817519,
"repo_name": "metalpriest/django-constance",
"id": "3c35abb7806319fd1088faad4ddb7853cc531158",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "constance/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3385"
},
{
"name": "Python",
"bytes": "36993"
}
],
"symlink_target": ""
}
|
import product
import website
import sale_order
|
{
"content_hash": "570ddacf9ab1b72ae5b17c52cbba9763",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 17,
"avg_line_length": 16,
"alnum_prop": 0.8541666666666666,
"repo_name": "vileopratama/vitech",
"id": "ed23b1cd13311c1f9672b3dcf05aa31704127960",
"size": "48",
"binary": false,
"copies": "422",
"ref": "refs/heads/master",
"path": "src/addons/website_event_sale/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Message'
db.create_table('swtopics_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('swtopics', ['Message'])
def backwards(self, orm):
# Deleting model 'Message'
db.delete_table('swtopics_message')
models = {
'swtopics.message': {
'Meta': {'object_name': 'Message'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['swtopics']
|
{
"content_hash": "1ae739c775c06ca8a23efc0164a76c41",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 85,
"avg_line_length": 28,
"alnum_prop": 0.5714285714285714,
"repo_name": "snswa/swsites",
"id": "e90667c3496329c21eb0e29af1211ef26d48e8ce",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swtopics/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "338722"
},
{
"name": "Python",
"bytes": "301670"
},
{
"name": "Shell",
"bytes": "2618"
}
],
"symlink_target": ""
}
|
import unittest
from rfdevices.protocol import BasebandValue, Protocol, PulseOrder
class TestProtocol(unittest.TestCase):
@staticmethod
def get_test_protocol(repeat: int=None):
kwargs = {}
if repeat is not None:
kwargs['repeat'] = repeat
return Protocol(pulse_length=1,
pulse_order=PulseOrder.LowHigh,
sync=BasebandValue(low=1, high=2),
zero=BasebandValue(low=3, high=4),
one=BasebandValue(low=5, high=6),
message_length=16,
**kwargs)
def test_init_no_repeat_specified(self):
protocol = self.get_test_protocol()
self.assertEqual(protocol.pulse_length, 1)
self.assertIs(protocol.pulse_order, PulseOrder.LowHigh)
self.assertEqual(protocol.sync.low, 1)
self.assertEqual(protocol.sync.high, 2)
self.assertEqual(protocol.zero.low, 3)
self.assertEqual(protocol.zero.high, 4)
self.assertEqual(protocol.one.low, 5)
self.assertEqual(protocol.one.high, 6)
self.assertEqual(protocol.message_length, 16)
self.assertEqual(protocol.repeat, 1)
def test_init_custom_repeat(self):
protocol = self.get_test_protocol(101)
self.assertEqual(protocol.repeat, 101)
def test_prepare_code(self):
test_protocol = self.get_test_protocol()
code = 100
binary = test_protocol.prepare_code(code)
self.assertEqual(len(binary), test_protocol.message_length)
self.assertEqual(binary, '0000000001100100')
|
{
"content_hash": "e49f558162e4d2147768d7cb7e19029b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 36.06666666666667,
"alnum_prop": 0.618607516943931,
"repo_name": "milas/rfdevices",
"id": "e6d7db0061dc708bedd657a59ed45152603a8687",
"size": "1623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_protocol.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "108792"
}
],
"symlink_target": ""
}
|
"""Stores all of the different word classes
The base Word class is inherited by all of the other classes. It contains
the logic to store the string of the word in particular, as well of the index
of where that word appeared in the user's command.
Any instance of any of these classes can be referred to as a word object,
however, an action word object can be simplified as just an action object.
"""
class Word:
"""Class to define the attributes and behaviors of a generic word object
+---------------------+------+---------------------------------+
| Instance Attributes | Type | Description |
+=====================+======+=================================+
| word | str | Recognized word |
+---------------------+------+---------------------------------+
| index | int | Index of word in user's command |
+---------------------+------+---------------------------------+
"""
def __init__(self, word, index):
self.word = word
self.index = index
class Action(Word):
"""Class to define the attributes and behaviors of an action object"""
def __init__(self, word, index):
super().__init__(word, index)
direct_objects = []
class Object(Word):
"""Class to define the attributes and behaviors of an action object"""
def __init__(self, word, index, object_type):
super().__init__(word, index)
self.adjectives = []
self.type = object_type
def get_fullname(self):
if self.adjectives:
return "".join([adj.word + " " for adj in self.adjectives]) + self.word
else:
return self.word
class Adjective(Word):
"""Class to define the attributes and behaviors of an adjective object"""
def __init__(self, word, index):
super().__init__(word, index)
class Preposition(Word):
"""Class to define the attributes and behaviors of a preposition object"""
def __init__(self, word, index):
super().__init__(word, index)
class Pronoun(Word):
"""Class to define the attributes and behaviors of a pronoun object"""
def __init__(self, word, index):
super().__init__(word, index)
|
{
"content_hash": "ee7b56af7e22836003609aedd23731d7",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 83,
"avg_line_length": 28.50632911392405,
"alnum_prop": 0.5404085257548845,
"repo_name": "huntermalm/SevenStories",
"id": "d80ef13ee70fecfff6531fc940503de63a3715ac",
"size": "2252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SevenStories/wordtypes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42007"
}
],
"symlink_target": ""
}
|
from . import bestfriend
|
{
"content_hash": "7acadce410ba7ca498f4a7bcb71b1738",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 24,
"alnum_prop": 0.8333333333333334,
"repo_name": "budihartono/odoo_addons",
"id": "2f179d76d303ee3bf0ac1820f935c56c7b39b74b",
"size": "24",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puspabeauty/models/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10397"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('todo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='created_date',
field=models.DateField(auto_now=True),
),
migrations.AlterField(
model_name='item',
name='priority',
field=models.PositiveIntegerField(),
),
]
|
{
"content_hash": "12201737a7caa2eb8b3d13ba3348bcf3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 50,
"avg_line_length": 22.565217391304348,
"alnum_prop": 0.558766859344894,
"repo_name": "carlosedb/django-todo",
"id": "a346e88d4b7b31cfd4f32f55ac42e849b2608ee3",
"size": "543",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "todo/migrations/0002_auto_20150614_2339.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4914"
},
{
"name": "HTML",
"bytes": "15309"
},
{
"name": "JavaScript",
"bytes": "16664"
},
{
"name": "Python",
"bytes": "30358"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import User
class AccountHead(models.Model):
HEAD_TYPES = (
("ast", 'asset'),
("lib", 'liability'),
("oe", 'owners equity'),
("exp", 'expense'),
("inc", 'income'),
)
user = models.ForeignKey(User, on_delete=models.PROTECT)
parent_head_code = models.IntegerField(null=True, blank=True)
name = models.TextField()
type = models.CharField(choices=HEAD_TYPES, max_length=5, blank=True)
head_code = models.IntegerField()
ledger_head_code = models.CharField(max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'accounting_account_head'
class Transaction(models.Model):
STATUS_TYPES = (
(0, 'disabled'),
(1, 'entered'),
(2, 'processing'),
(3, 'processed'),
)
VOUCHER_TYPES = (
(1, 'Purchase'),
(2, 'Expense'),
(3, 'Journal'),
(4, 'Contra'),
(5, 'Payment'),
(6, 'Receipt'),
(7, 'Debit note'),
(8, 'Credit note'),
)
user = models.ForeignKey(User, on_delete=models.PROTECT)
transaction_date = models.DateField(null=True, blank=True)
transaction_ref = models.ForeignKey('Transaction', null=True, blank=True, on_delete=models.PROTECT)
voucher_type = models.SmallIntegerField(choices=VOUCHER_TYPES)
voucher_number = models.CharField(max_length=255, blank=True)
voucher_status = models.SmallIntegerField(choices=STATUS_TYPES)
description = models.TextField(null=True,blank=True)
created_at = models.DateTimeField(auto_now_add=True)
class TransactionDetails(models.Model):
TRANS_POSITION = (
("dr", 'debit'),
("cr", 'credit'),
)
transaction = models.ForeignKey('Transaction', on_delete=models.PROTECT)
account_head = models.ForeignKey('AccountHead', on_delete=models.PROTECT, related_name="head_name")
position = models.CharField(choices=TRANS_POSITION, max_length=5)
amount = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'accounting_transaction_details'
class DashboardMeta(models.Model):
meta_key = models.TextField()
meta_value = models.TextField()
user = models.ForeignKey(User,on_delete=models.PROTECT)
class Meta:
db_table = 'accounting_dashboard_meta'
|
{
"content_hash": "21cfde2423e8941d972e91a11f6dad64",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 103,
"avg_line_length": 31.871794871794872,
"alnum_prop": 0.6427996781979083,
"repo_name": "pyprism/MoneyBag",
"id": "0e604ba5c504704d2642042710a1b2176b1ed331",
"size": "2486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "accounting/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "209884"
},
{
"name": "HTML",
"bytes": "87848"
},
{
"name": "JavaScript",
"bytes": "1077459"
},
{
"name": "Python",
"bytes": "81217"
}
],
"symlink_target": ""
}
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase, ET
class Error(ElementBase):
"""
XMPP stanzas of type 'error' should include an <error> stanza that
describes the nature of the error and how it should be handled.
Use the 'XEP-0086: Error Condition Mappings' plugin to include error
codes used in older XMPP versions.
Example error stanza:
<error type="cancel" code="404">
<item-not-found xmlns="urn:ietf:params:xml:ns:xmpp-stanzas" />
<text xmlns="urn:ietf:params:xml:ns:xmpp-stanzas">
The item was not found.
</text>
</error>
Stanza Interface:
code -- The error code used in older XMPP versions.
condition -- The name of the condition element.
text -- Human readable description of the error.
type -- Error type indicating how the error should be handled.
Attributes:
conditions -- The set of allowable error condition elements.
condition_ns -- The namespace for the condition element.
types -- A set of values indicating how the error
should be treated.
Methods:
setup -- Overrides ElementBase.setup.
get_condition -- Retrieve the name of the condition element.
set_condition -- Add a condition element.
del_condition -- Remove the condition element.
get_text -- Retrieve the contents of the <text> element.
set_text -- Set the contents of the <text> element.
del_text -- Remove the <text> element.
"""
namespace = 'jabber:client'
name = 'error'
plugin_attrib = 'error'
interfaces = set(('code', 'condition', 'text', 'type',
'gone', 'redirect', 'by'))
sub_interfaces = set(('text',))
plugin_attrib_map = {}
plugin_tag_map = {}
conditions = set(('bad-request', 'conflict', 'feature-not-implemented',
'forbidden', 'gone', 'internal-server-error',
'item-not-found', 'jid-malformed', 'not-acceptable',
'not-allowed', 'not-authorized', 'payment-required',
'recipient-unavailable', 'redirect',
'registration-required', 'remote-server-not-found',
'remote-server-timeout', 'resource-constraint',
'service-unavailable', 'subscription-required',
'undefined-condition', 'unexpected-request'))
condition_ns = 'urn:ietf:params:xml:ns:xmpp-stanzas'
types = set(('cancel', 'continue', 'modify', 'auth', 'wait'))
def setup(self, xml=None):
"""
Populate the stanza object using an optional XML object.
Overrides ElementBase.setup.
Sets a default error type and condition, and changes the
parent stanza's type to 'error'.
Arguments:
xml -- Use an existing XML object for the stanza's values.
"""
if ElementBase.setup(self, xml):
#If we had to generate XML then set default values.
self['type'] = 'cancel'
self['condition'] = 'feature-not-implemented'
if self.parent is not None:
self.parent()['type'] = 'error'
def get_condition(self):
"""Return the condition element's name."""
for child in self.xml:
if "{%s}" % self.condition_ns in child.tag:
cond = child.tag.split('}', 1)[-1]
if cond in self.conditions:
return cond
return ''
def set_condition(self, value):
"""
Set the tag name of the condition element.
Arguments:
value -- The tag name of the condition element.
"""
if value in self.conditions:
del self['condition']
self.xml.append(ET.Element("{%s}%s" % (self.condition_ns, value)))
return self
def del_condition(self):
"""Remove the condition element."""
for child in self.xml:
if "{%s}" % self.condition_ns in child.tag:
tag = child.tag.split('}', 1)[-1]
if tag in self.conditions:
self.xml.remove(child)
return self
def get_text(self):
"""Retrieve the contents of the <text> element."""
return self._get_sub_text('{%s}text' % self.condition_ns)
def set_text(self, value):
"""
Set the contents of the <text> element.
Arguments:
value -- The new contents for the <text> element.
"""
self._set_sub_text('{%s}text' % self.condition_ns, text=value)
return self
def del_text(self):
"""Remove the <text> element."""
self._del_sub('{%s}text' % self.condition_ns)
return self
def get_gone(self):
return self._get_sub_text('{%s}gone' % self.condition_ns, '')
def get_redirect(self):
return self._get_sub_text('{%s}redirect' % self.condition_ns, '')
def set_gone(self, value):
if value:
del self['condition']
return self._set_sub_text('{%s}gone' % self.condition_ns, value)
elif self['condition'] == 'gone':
del self['condition']
def set_redirect(self, value):
if value:
del self['condition']
ns = self.condition_ns
return self._set_sub_text('{%s}redirect' % ns, value)
elif self['condition'] == 'redirect':
del self['condition']
def del_gone(self):
self._del_sub('{%s}gone' % self.condition_ns)
def del_redirect(self):
self._del_sub('{%s}redirect' % self.condition_ns)
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
Error.getCondition = Error.get_condition
Error.setCondition = Error.set_condition
Error.delCondition = Error.del_condition
Error.getText = Error.get_text
Error.setText = Error.set_text
Error.delText = Error.del_text
|
{
"content_hash": "3cefd5c143b32f9cda15d73188d952e9",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 78,
"avg_line_length": 35.672413793103445,
"alnum_prop": 0.5798292250684711,
"repo_name": "isandlaTech/cohorte-devtools",
"id": "56558ba814a747b4c29622494e3ed6c884c6f556",
"size": "6207",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "qualifier/deploy/cohorte-home/repo/sleekxmpp/stanza/error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151318"
},
{
"name": "HTML",
"bytes": "113064"
},
{
"name": "Java",
"bytes": "172793"
},
{
"name": "JavaScript",
"bytes": "2165497"
},
{
"name": "Python",
"bytes": "13926564"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
}
|
"""The YAWT extension package.
This file contains utility classes in use by various extensions in YAWT
"""
import os
import jsonpickle
from flask import g, request
from yawt.utils import load_file, save_file, abs_state_folder, cfg,\
single_dict_var, ReprMixin, EqMixin, fullname, content_folder
class Plugin(object):
"""Base YAWT Plugin"""
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Base implementation of init_app, does nothing"""
pass
class BranchedVisitor(Plugin):
"""Visitor plugin which will divide the article space into groups that
match the root folders supplied. It will instantiate a separate visitor
for each root folder and make sure that it only gets articles for the
that root"""
def __init__(self, roots_cfg, processor_factory, app=None):
super(BranchedVisitor, self).__init__(app)
self.roots_cfg = roots_cfg
self.processor_factory = processor_factory
self.processors = {}
def _roots(self):
return cfg(self.roots_cfg) or ['']
def on_pre_walk(self):
"""Lazily create the processor for each root and call on_pre_walk()
on them"""
for root in self._roots():
processor = self.processor_factory(root)
self.processors[root] = processor
processor.on_pre_walk()
def on_visit_article(self, article):
"""call on_visit_article(), but only for those visitors which need to
process the article"""
for root in [r for r in self._roots() if article.info.under(r)]:
self.processors[root].on_visit_article(article)
def on_post_walk(self):
"""Call on_post_walk() for all visitors"""
for root in self._roots():
self.processors[root].on_post_walk()
def on_files_changed(self, changed):
split_map = {}
for root in self._roots():
path = os.path.join(content_folder(), root)
split_map[root] = changed.filter(path)
for root in split_map.keys():
processor = self.processor_factory(root)
processor.on_files_changed(split_map[root])
class ArticleProcessor(object):
"""A plugin implementing the Walk protocol that will only be expecting
a subset of the site's articles, matching those under the defined root"""
def __init__(self, root=None):
self.root = root
def on_pre_walk(self):
pass
def on_visit_article(self, article):
pass
def unvisit(self, name):
pass
def on_post_walk(self):
pass
def on_files_changed(self, changed):
changed = changed.content_changes().normalize()
for repofile in changed.deleted + changed.modified:
name = fullname(repofile)
if name:
self.unvisit(name)
for a in g.site.fetch_articles_by_repofiles(changed.modified + changed.added):
self.on_visit_article(a)
self.on_post_walk()
class SummaryProcessor(ArticleProcessor):
"""A special kind of ArticleProcessor which will keep track of a summary
file (a jsonpickle'd python object) in a _state subfolder matching the
root supplied.
Subclasses will typically have to implement _init_summary(),
on_visit_article() and unvisit().
"""
def __init__(self, root, plugin_name, summary_file):
super(SummaryProcessor, self).__init__(root)
self.plugin_name = plugin_name
self.summary_file = summary_file
self.summary = None
def on_pre_walk(self):
self._init_summary()
def _init_summary(self):
raise NotImplementedError()
def on_post_walk(self):
self._save_summary()
def on_files_changed(self, changed):
self._load_summary()
super(SummaryProcessor, self).on_files_changed(changed)
def _load_summary(self):
self.summary = jsonpickle.decode(load_file(self._abs_summary_file()))
def _save_summary(self):
save_file(self._abs_summary_file(), jsonpickle.encode(self.summary))
def _abs_summary_file(self):
path = os.path.join(abs_state_folder(),
self.plugin_name,
self.root,
self.summary_file)
return path
@staticmethod
def context_processor(summaryfile_cfg, bases_cfg, varname):
"""Return a single value dictionary containing the summary file which
currently applies"""
summary_file = cfg(summaryfile_cfg)
bases = cfg(bases_cfg) or ['']
for base in bases:
if request.path.startswith('/'+base):
path = os.path.join(abs_state_folder(), base, summary_file)
loaded_file = load_file(path)
return single_dict_var(varname, jsonpickle.decode(loaded_file))
return {}
def _split_category(category):
(head, rest) = category, ''
if '/' in category:
(head, rest) = category.split('/', 1)
return (head, rest)
class HierarchyCount(ReprMixin, EqMixin):
"""Class which can process paths to count a 'hierarchy'. You pass in
something like blah/foo.bar and we will count at each level.
"""
def __init__(self, **kwargs):
self.category = kwargs.get('category', '')
self.count = kwargs.get('count', 0)
self.children = kwargs.get('children', [])
def add(self, hierarchy):
"""Pass in something like 'blah/foo/hello' and we'll keep track of a
tree where each node of the tree is an element in the hierarchy,
keeping track of the counts below it.
"""
self.count += 1
if hierarchy:
(head, rest) = _split_category(hierarchy)
next_node = None
for child in self.children:
if child.category == head:
next_node = child
if next_node is None:
next_node = HierarchyCount()
next_node.category = head
self.children.append(next_node)
next_node.add(rest)
def remove(self, hierarchy):
"""Pass in something like 'blah/foo/hello' and we'll keep track of a
tree where each node of the tree is an element in the hierarchy,
keeping track of the counts below it."""
self.count -= 1
if hierarchy:
(head, rest) = _split_category(hierarchy)
for child in self.children:
if child.category == head:
child.remove(rest)
break
self.children = [child for child in self.children
if child.count > 0]
def child(self, category):
"""Return node matching category"""
for child in self.children:
if child.category == category:
return child
return None
def sort(self, reverse=False):
"""Recursively sort the children of this tree"""
if self.children:
for child in self.children:
child.sort(reverse)
self.children.sort(key=lambda c: c.category, reverse=reverse)
|
{
"content_hash": "dd536002ff2c6c936f75d28fb2e47a7e",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 86,
"avg_line_length": 33.822429906542055,
"alnum_prop": 0.5991986736667587,
"repo_name": "drivet/yawt",
"id": "28b381040b3bbfde1ae430ee3cc92f584a84d96a",
"size": "7238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yawtext/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2273"
},
{
"name": "Python",
"bytes": "192131"
},
{
"name": "Shell",
"bytes": "1034"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .core import (
get_stations,
get_station_data,
get_station_parameters,
)
|
{
"content_hash": "49ee70b9ce7aa005e618cf3cf45aa5a0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 24.166666666666668,
"alnum_prop": 0.6137931034482759,
"repo_name": "ocefpaf/ulmo",
"id": "9f8c3fe76b4059939e72e989352be6d862cd2718",
"size": "145",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ulmo/usace/rivergages/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "ColdFusion",
"bytes": "578001"
},
{
"name": "HTML",
"bytes": "160237"
},
{
"name": "PowerShell",
"bytes": "2352"
},
{
"name": "Python",
"bytes": "316815"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class LinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="linecolor", parent_name="layout.scene.yaxis", **kwargs
):
super(LinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "c80fdaf9572f717f0e45f1b665eac420",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 33.714285714285715,
"alnum_prop": 0.5995762711864406,
"repo_name": "plotly/python-api",
"id": "a08a2df324ad3f1dd3b4fbcb33e46bdc8a32e6b6",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/scene/yaxis/_linecolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import mock
from ironic.common import exception
from ironic.drivers import base as driver_base
from ironic.tests import base
class FakeVendorInterface(driver_base.VendorInterface):
def get_properties(self):
pass
@driver_base.passthru(['POST'])
def noexception(self):
return "Fake"
@driver_base.passthru(['POST'])
def ironicexception(self):
raise exception.IronicException("Fake!")
@driver_base.passthru(['POST'])
def normalexception(self):
raise Exception("Fake!")
def validate(self, task, **kwargs):
pass
def driver_validate(self, **kwargs):
pass
class PassthruDecoratorTestCase(base.TestCase):
def setUp(self):
super(PassthruDecoratorTestCase, self).setUp()
self.fvi = FakeVendorInterface()
driver_base.LOG = mock.Mock()
def test_passthru_noexception(self):
result = self.fvi.noexception()
self.assertEqual("Fake", result)
def test_passthru_ironicexception(self):
self.assertRaises(exception.IronicException,
self.fvi.ironicexception, mock.ANY)
driver_base.LOG.exception.assert_called_with(
mock.ANY, 'ironicexception')
def test_passthru_nonironicexception(self):
self.assertRaises(exception.VendorPassthruException,
self.fvi.normalexception, mock.ANY)
driver_base.LOG.exception.assert_called_with(
mock.ANY, 'normalexception')
|
{
"content_hash": "549117f52a96f58b7106c0bb542a6d2e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 60,
"avg_line_length": 28.153846153846153,
"alnum_prop": 0.671448087431694,
"repo_name": "Tehsmash/ironic",
"id": "1f4f35f139c907b889c9bfaceea5f2502d416ae2",
"size": "2099",
"binary": false,
"copies": "2",
"ref": "refs/heads/staging/kiloplus",
"path": "ironic/tests/drivers/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2250030"
}
],
"symlink_target": ""
}
|
"""Pasta enables AST-based transformations on python source code."""
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pasta.base import annotate
from pasta.base import ast_utils
from pasta.base import codegen
def parse(src):
t = ast_utils.parse(src)
annotator = annotate.AstAnnotator(src)
annotator.visit(t)
return t
def dump(tree):
return codegen.to_str(tree)
|
{
"content_hash": "c303c45c46c7ca5dda933292d6eed16b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 31.275862068965516,
"alnum_prop": 0.7574421168687983,
"repo_name": "google/pasta",
"id": "69ed8575d58c45892d71748deafefdaf4db8100c",
"size": "922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pasta/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "191244"
}
],
"symlink_target": ""
}
|
DEBUG = True
SECRET_KEY = 'super-secret-key'
SQLALCHEMY_DATABASE_URI = "postgresql://localhost/sup"
|
{
"content_hash": "096c4764aa82492c85355f83c5eae6ed",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 54,
"avg_line_length": 25.25,
"alnum_prop": 0.7425742574257426,
"repo_name": "dfm/sup",
"id": "f28abd6d2739d23ddaeb0c4647ef2453a1871834",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sup/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3233"
},
{
"name": "HTML",
"bytes": "6204"
},
{
"name": "Python",
"bytes": "14593"
}
],
"symlink_target": ""
}
|
"""
Implementation of an OpenFlow flow table
"""
from libopenflow_01 import *
from pox.lib.revent import *
from pox.core import core
import time
import math
# FlowTable Entries:
# match - ofp_match (13-tuple)
# counters - hash from name -> count. May be stale
# actions - ordered list of ofp_action_*s to apply for matching packets
class TableEntry (object):
"""
Models a flow table entry, with a match, actions, and options/flags/counters.
Note: The current time can either be specified explicitely with the optional
'now' parameter or is taken from time.time()
"""
def __init__ (self, priority=OFP_DEFAULT_PRIORITY, cookie=0, idle_timeout=0,
hard_timeout=0, flags=0, match=ofp_match(), actions=[],
buffer_id=None, now=None):
"""
Initialize table entry
"""
if now is None: now = time.time()
self.created = now
self.last_touched = self.created
self.byte_count = 0
self.packet_count = 0
self.priority = priority
self.cookie = cookie
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.flags = flags
self.match = match
self.actions = actions
self.buffer_id = buffer_id
@staticmethod
def from_flow_mod (flow_mod):
return TableEntry(priority=flow_mod.priority,
cookie=flow_mod.cookie,
idle_timeout=flow_mod.idle_timeout,
hard_timeout=flow_mod.hard_timeout,
flags=flow_mod.flags,
match=flow_mod.match,
actions=flow_mod.actions,
buffer_id=flow_mod.buffer_id)
def to_flow_mod (self, flags=None, **kw):
if flags is None: flags = self.flags
return ofp_flow_mod(priority=self.priority,
cookie=self.cookie,
match=self.match,
idle_timeout=self.idle_timeout,
hard_timeout=self.hard_timeout,
actions=self.actions,
buffer_id=self.buffer_id,
flags=flags, **kw)
@property
def effective_priority (self):
"""
Exact matches effectively have an "infinite" priority
"""
return self.priority if self.match.is_wildcarded else (1<<16) + 1
def is_matched_by (self, match, priority=None, strict=False, out_port=None):
"""
Tests whether a given match object matches this entry
Used for, e.g., flow_mod updates
If out_port is any value besides None, the the flow entry must contain an
output action to the specified port.
"""
match_a = lambda a: isinstance(a, ofp_action_output) and a.port == out_port
port_matches = (out_port is None) or any(match_a(a) for a in self.actions)
if strict:
return port_matches and self.match == match and self.priority == priority
else:
return port_matches and match.matches_with_wildcards(self.match)
def touch_packet (self, byte_count, now=None):
"""
Updates information of this entry based on encountering a packet.
Updates both the cumulative given byte counts of packets encountered and
the expiration timer.
"""
if now is None: now = time.time()
self.byte_count += byte_count
self.packet_count += 1
self.last_touched = now
def is_idle_timed_out (self, now=None):
if now is None: now = time.time()
if self.idle_timeout > 0:
if (now - self.last_touched) > self.idle_timeout:
return True
return False
def is_hard_timed_out (self, now=None):
if now is None: now = time.time()
if self.hard_timeout > 0:
if (now - self.created) > self.hard_timeout:
return True
return False
def is_expired (self, now=None):
"""
Tests whether this flow entry is expired due to its idle or hard timeout
"""
if now is None: now = time.time()
return self.is_idle_timed_out(now) or self.is_hard_timed_out(now)
def __str__ (self):
return type(self).__name__ + "\n " + self.show()
def __repr__ (self):
return "TableEntry(" + self.show() + ")"
def show (self):
outstr = ''
outstr += "priority=%s, " % self.priority
outstr += "cookie=%x, " % self.cookie
outstr += "idle_timeout=%d, " % self.idle_timeout
outstr += "hard_timeout=%d, " % self.hard_timeout
outstr += "match=%s, " % self.match
outstr += "actions=%s, " % repr(self.actions)
outstr += "buffer_id=%s" % str(self.buffer_id)
return outstr
def flow_stats (self, now=None):
if now is None: now = time.time()
dur_nsec,dur_sec = math.modf(now - self.created)
return ofp_flow_stats(match=self.match,
duration_sec=int(dur_sec),
duration_nsec=int(dur_nsec * 1e9),
priority=self.priority,
idle_timeout=self.idle_timeout,
hard_timeout=self.hard_timeout,
cookie=self.cookie,
packet_count=self.packet_count,
byte_count=self.byte_count,
actions=self.actions)
def to_flow_removed (self, now=None, reason=None):
#TODO: Rename flow_stats to to_flow_stats and refactor?
if now is None: now = time.time()
dur_nsec,dur_sec = math.modf(now - self.created)
fr = ofp_flow_removed()
fr.match = self.match
fr.cookie = self.cookie
fr.priority = self.priority
fr.reason = reason
fr.duration_sec = int(dur_sec)
fr.duration_nsec = int(dur_nsec * 1e9)
fr.idle_timeout = self.idle_timeout
fr.hard_timeout = self.hard_timeout
fr.packet_count = self.packet_count
fr.byte_count = self.byte_count
return fr
class FlowTableModification (Event):
def __init__ (self, added=[], removed=[], reason=None):
Event.__init__(self)
self.added = added
self.removed = removed
# Reason for modification.
# Presently, this is only used for removals and is either one of OFPRR_x,
# or None if it does not correlate to any of the items in the spec.
self.reason = reason
class FlowTable (EventMixin):
"""
General model of a flow table.
Maintains an ordered list of flow entries, and finds matching entries for
packets and other entries. Supports expiration of flows.
"""
_eventMixin_events = set([FlowTableModification])
def __init__ (self):
EventMixin.__init__(self)
# Table is a list of TableEntry sorted by descending effective_priority.
self._table = []
def _dirty (self):
"""
Call when table changes
"""
pass
@property
def entries (self):
return self._table
def __len__ (self):
return len(self._table)
def add_entry (self, entry):
assert isinstance(entry, TableEntry)
#self._table.append(entry)
#self._table.sort(key=lambda e: e.effective_priority, reverse=True)
# Use binary search to insert at correct place
# This is faster even for modest table sizes, and way, way faster
# as the tables grow larger.
priority = entry.effective_priority
table = self._table
low = 0
high = len(table)
while low < high:
middle = (low + high) // 2
if priority >= table[middle].effective_priority:
high = middle
continue
low = middle + 1
table.insert(low, entry)
self._dirty()
self.raiseEvent(FlowTableModification(added=[entry]))
def remove_entry (self, entry, reason=None):
assert isinstance(entry, TableEntry)
self._table.remove(entry)
self._dirty()
self.raiseEvent(FlowTableModification(removed=[entry], reason=reason))
def matching_entries (self, match, priority=0, strict=False, out_port=None):
entry_match = lambda e: e.is_matched_by(match, priority, strict, out_port)
return [ entry for entry in self._table if entry_match(entry) ]
def flow_stats (self, match, out_port=None, now=None):
mc_es = self.matching_entries(match=match, strict=False, out_port=out_port)
return [ e.flow_stats(now) for e in mc_es ]
def aggregate_stats (self, match, out_port=None):
mc_es = self.matching_entries(match=match, strict=False, out_port=out_port)
packet_count = 0
byte_count = 0
flow_count = 0
for entry in mc_es:
packet_count += entry.packet_count
byte_count += entry.byte_count
flow_count += 1
return ofp_aggregate_stats(packet_count=packet_count,
byte_count=byte_count,
flow_count=flow_count)
def _remove_specific_entries (self, flows, reason=None):
#for entry in flows:
# self._table.remove(entry)
#self._table = [entry for entry in self._table if entry not in flows]
if not flows: return
self._dirty()
remove_flows = set(flows)
i = 0
while i < len(self._table):
entry = self._table[i]
if entry in remove_flows:
del self._table[i]
remove_flows.remove(entry)
if not remove_flows: break
else:
i += 1
assert len(remove_flows) == 0
self.raiseEvent(FlowTableModification(removed=flows, reason=reason))
def remove_expired_entries (self, now=None):
idle = []
hard = []
if now is None: now = time.time()
for entry in self._table:
if entry.is_idle_timed_out(now):
idle.append(entry)
elif entry.is_hard_timed_out(now):
hard.append(entry)
self._remove_specific_entries(idle, OFPRR_IDLE_TIMEOUT)
self._remove_specific_entries(hard, OFPRR_HARD_TIMEOUT)
def remove_matching_entries (self, match, priority=0, strict=False,
out_port=None, reason=None):
remove_flows = self.matching_entries(match, priority, strict, out_port)
self._remove_specific_entries(remove_flows, reason=reason)
return remove_flows
def entry_for_packet (self, packet, in_port):
"""
Finds the flow table entry that matches the given packet.
Returns the highest priority flow table entry that matches the given packet
on the given in_port, or None if no matching entry is found.
"""
packet_match = ofp_match.from_packet(packet, in_port, spec_frags = True)
for entry in self._table:
if entry.match.matches_with_wildcards(packet_match,
consider_other_wildcards=False):
return entry
return None
def check_for_overlapping_entry (self, in_entry):
#FELIPE TOMM - TCC
#Esta funcao verifica se ja existe uma flow identica.
#Sera essa minha funcao base?
log = core.getLogger()
log.debug("Debug TCC - Flow Table: Iniciando analise de conflito")
"""
Tests if the input entry overlaps with another entry in this table.
Returns true if there is an overlap, false otherwise. Since the table is
sorted, there is only a need to check a certain portion of it.
"""
#NOTE: Assumes that entries are sorted by decreasing effective_priority
#NOTE: Ambiguous whether matching should be based on effective_priority
# or the regular priority. Doing it based on effective_priority
# since that's what actually affects packet matching.
#NOTE: We could improve performance by doing a binary search to find the
# right priority entries.
priority = in_entry.effective_priority
for e in self._table:
if e.effective_priority < priority:
break
elif e.effective_priority > priority:
continue
else:
if e.is_matched_by(in_entry.match) or in_entry.is_matched_by(e.match):
print ("Debug TCC - Flow Table: O Flow: ja existe.")
return True
return False
|
{
"content_hash": "bf52da99d71bb6b45a2ff00eefb63719",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 79,
"avg_line_length": 33.69740634005764,
"alnum_prop": 0.6263576498759942,
"repo_name": "felipetomm/POX-Django",
"id": "4aff52dfb013d865678539377a03d26c662c312d",
"size": "12280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pox/openflow/flow_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "18383"
},
{
"name": "JavaScript",
"bytes": "9532"
},
{
"name": "Python",
"bytes": "1091184"
},
{
"name": "Shell",
"bytes": "20300"
}
],
"symlink_target": ""
}
|
import sys
import time
import logging
from datetime import timedelta, datetime
import elasticsearch
# This solves https://github.com/elasticsearch/curator/issues/12
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
__version__ = '1.0.1-dev'
# Elasticsearch versions supported
version_max = (2, 0, 0)
version_min = (1, 0, 0)
logger = logging.getLogger(__name__)
DEFAULT_ARGS = {
'host': 'localhost',
'url_prefix': '',
'port': 9200,
'ssl': False,
'timeout': 30,
'prefix': 'logstash-',
'separator': '.',
'curation_style': 'time',
'time_unit': 'days',
'max_num_segments': 2,
'dry_run': False,
'debug': False,
'log_level': 'INFO',
'show_indices': False
}
def make_parser():
""" Creates an ArgumentParser to parse the command line options. """
help_desc = 'Curator for Elasticsearch indices. Can delete (by space or time), close, disable bloom filters and optimize (forceMerge) your indices.'
try:
import argparse
parser = argparse.ArgumentParser(description=help_desc)
parser.add_argument('-v', '--version', action='version', version='%(prog)s '+__version__)
except ImportError:
import optparse
parser = optparse.OptionParser(description=help_desc, version='%prog '+ __version__)
parser.parse_args_orig = parser.parse_args
parser.parse_args = lambda: parser.parse_args_orig()[0]
parser.add_argument = parser.add_option
parser.add_argument('--host', help='Elasticsearch host. Default: localhost', default=DEFAULT_ARGS['host'])
parser.add_argument('--url_prefix', help='Elasticsearch http url prefix. Default: none', default=DEFAULT_ARGS['url_prefix'])
parser.add_argument('--port', help='Elasticsearch port. Default: 9200', default=DEFAULT_ARGS['port'], type=int)
parser.add_argument('--ssl', help='Connect to Elasticsearch through SSL. Default: false', action='store_true', default=DEFAULT_ARGS['ssl'])
parser.add_argument('-t', '--timeout', help='Elasticsearch timeout. Default: 30', default=DEFAULT_ARGS['timeout'], type=int)
parser.add_argument('-p', '--prefix', help='Prefix for the indices. Indices that do not have this prefix are skipped. Default: logstash-', default=DEFAULT_ARGS['prefix'])
parser.add_argument('-s', '--separator', help='Time unit separator. Default: .', default=DEFAULT_ARGS['separator'])
parser.add_argument('-C', '--curation-style', dest='curation_style', action='store', help='Curate indices by [time, space] Default: time', default=DEFAULT_ARGS['curation_style'], type=str)
parser.add_argument('-T', '--time-unit', dest='time_unit', action='store', help='Unit of time to reckon by: [days, hours] Default: days', default=DEFAULT_ARGS['time_unit'], type=str)
parser.add_argument('-d', '--delete', dest='delete_older', action='store', help='Delete indices older than n TIME_UNITs.', type=int)
parser.add_argument('-c', '--close', dest='close_older', action='store', help='Close indices older than n TIME_UNITs.', type=int)
parser.add_argument('-b', '--bloom', dest='bloom_older', action='store', help='Disable bloom filter for indices older than n TIME_UNITs.', type=int)
parser.add_argument('-g', '--disk-space', dest='disk_space', action='store', help='Delete indices beyond n GIGABYTES.', type=float)
parser.add_argument('-r', '--require', help='Update indices required routing allocation rules. Ex. tag=ssd', type=int)
parser.add_argument('--required_rule', help='Index routing allocation rule to require. Ex. tag=ssd', type=str)
parser.add_argument('--max_num_segments', action='store', help='Maximum number of segments, post-optimize. Default: 2', type=int, default=DEFAULT_ARGS['max_num_segments'])
parser.add_argument('-o', '--optimize', action='store', help='Optimize (Lucene forceMerge) indices older than n TIME_UNITs. Must increase timeout to stay connected throughout optimize operation, recommend no less than 3600.', type=int)
parser.add_argument('-n', '--dry-run', action='store_true', help='If true, does not perform any changes to the Elasticsearch indices.', default=DEFAULT_ARGS['dry_run'])
parser.add_argument('-D', '--debug', dest='debug', action='store_true', help='Debug mode', default=DEFAULT_ARGS['debug'])
parser.add_argument('-ll', '--loglevel', dest='log_level', action='store', help='Log level', default=DEFAULT_ARGS['log_level'], type=str)
parser.add_argument('-l', '--logfile', dest='log_file', help='log file', type=str)
parser.add_argument('--show-indices', dest='show_indices', action='store_true', help='Show indices matching prefix', default=DEFAULT_ARGS['show_indices'])
return parser
def validate_args(myargs):
"""Validate that arguments aren't stomping on each other or conflicting"""
success = True
messages = []
if myargs.curation_style == 'time':
if not myargs.delete_older and not myargs.close_older and not myargs.bloom_older and not myargs.optimize and not myargs.require:
success = False
messages.append('Must specify at least one of --delete, --close, --bloom, --optimize or --require')
if ((myargs.delete_older and myargs.delete_older < 1) or
(myargs.close_older and myargs.close_older < 1) or
(myargs.bloom_older and myargs.bloom_older < 1) or
(myargs.optimize and myargs.optimize < 1)):
success = False
messages.append('Values for --delete, --close, --bloom or --optimize must be > 0')
if myargs.time_unit != 'days' and myargs.time_unit != 'hours':
success = False
messages.append('Values for --time-unit must be either "days" or "hours"')
if myargs.disk_space:
success = False
messages.append('Cannot specify --disk-space and --curation-style "time"')
if myargs.optimize and myargs.timeout < 300:
success = False
messages.append('Timeout should be much higher for optimize transactions, recommend no less than 3600 seconds')
else: # Curation-style is 'space'
if (myargs.delete_older or myargs.close_older or myargs.bloom_older or myargs.optimize):
success = False
messages.append('Cannot specify --curation-style "space" and any of --delete, --close, --bloom or --optimize')
if (myargs.disk_space == 0) or (myargs.disk_space < 0):
success = False
messages.append('Value for --disk-space must be greater than 0')
if success:
return True
else:
return messages
def get_index_time(index_timestamp, separator='.'):
""" Gets the time of the index.
:param index_timestamp: A string on the format YYYY.MM.DD[.HH]
:return The creation time (datetime) of the index.
"""
try:
return datetime.strptime(index_timestamp, separator.join(('%Y', '%m', '%d', '%H')))
except ValueError:
return datetime.strptime(index_timestamp, separator.join(('%Y', '%m', '%d')))
def get_indices(client, prefix='logstash-'):
"""Return a sorted list of indices matching prefix"""
return sorted(client.indices.get_settings(index=prefix+'*', params={'expand_wildcards': 'closed'}).keys())
def get_version(client):
"""Return ES version number as a tuple"""
version = client.info()['version']['number']
return tuple(map(int, version.split('.')))
def find_expired_indices(client, time_unit, unit_count, separator='.', prefix='logstash-', utc_now=None):
""" Generator that yields expired indices.
:return: Yields tuples on the format ``(index_name, expired_by)`` where index_name
is the name of the expired index and expired_by is the interval (timedelta) that the
index was expired by.
"""
# time-injection for test purposes only
utc_now = utc_now if utc_now else datetime.utcnow()
# reset to start of the period to be sure we are not retiring a human by mistake
utc_now = utc_now.replace(minute=0, second=0, microsecond=0)
if time_unit == 'hours':
required_parts = 4
else:
required_parts = 3
utc_now = utc_now.replace(hour=0)
cutoff = utc_now - timedelta(**{time_unit: (unit_count - 1)})
index_list = get_indices(client, prefix)
for index_name in index_list:
unprefixed_index_name = index_name[len(prefix):]
# find the timestamp parts (i.e ['2011', '01', '05'] from '2011.01.05') using the configured separator
parts = unprefixed_index_name.split(separator)
# verify we have a valid cutoff - hours for 4-part indices, days for 3-part
if len(parts) != required_parts:
logger.debug('Skipping {0} because it is of a type (hourly or daily) that I\'m not asked to evaluate.'.format(index_name))
continue
try:
index_time = get_index_time(unprefixed_index_name, separator=separator)
except ValueError:
logger.error('Could not find a valid timestamp from the index: {0}'.format(index_name))
continue
# if the index is older than the cutoff
if index_time < cutoff:
yield index_name, cutoff-index_time
else:
logger.info('{0} is {1} above the cutoff.'.format(index_name, index_time-cutoff))
def find_overusage_indices(client, disk_space_to_keep, separator='.', prefix='logstash-'):
""" Generator that yields over usage indices.
:return: Yields tuples on the format ``(index_name, 0)`` where index_name
is the name of the expired index. The second element is only here for
compatiblity reasons.
"""
disk_usage = 0.0
disk_limit = disk_space_to_keep * 2**30
stats = client.indices.status(index=prefix+'*')
sorted_indices = sorted(
(
(index_name, index_stats['index']['primary_size_in_bytes'])
for (index_name, index_stats) in stats['indices'].items()
),
reverse=True
)
for index_name, index_size in sorted_indices:
disk_usage += index_size
if disk_usage > disk_limit:
yield index_name, 0
else:
logger.info('skipping {0}, disk usage is {1:.3f} GB and disk limit is {2:.3f} GB.'.format(index_name, disk_usage/2**30, disk_limit/2**30))
def index_closed(client, index_name):
"""Return True if index is closed"""
index_metadata = client.cluster.state(
index=index_name,
metric='metadata',
)
return index_metadata['metadata']['indices'][index_name]['state'] == 'close'
def _close_index(client, index_name, **kwargs):
if index_closed(client, index_name):
logger.info('Skipping index {0}: Already closed.'.format(index_name))
return True
else:
client.indices.close(index=index_name)
def _delete_index(client, index_name, **kwargs):
client.indices.delete(index=index_name)
def _optimize_index(client, index_name, max_num_segments=2, **kwargs):
if index_closed(client, index_name): # Don't try to optimize a closed index
logger.info('Skipping index {0}: Already closed.'.format(index_name))
return True
else:
shards, segmentcount = get_segmentcount(client, index_name)
logger.debug('Index {0} has {1} shards and {2} segments total.'.format(index_name, shards, segmentcount))
if segmentcount > (shards * max_num_segments):
logger.info('Optimizing index {0} to {1} segments per shard. Please wait...'.format(index_name, max_num_segments))
client.indices.optimize(index=index_name, max_num_segments=max_num_segments)
else:
logger.info('Skipping index {0}: Already optimized.'.format(index_name))
return True
def _bloom_index(client, index_name, **kwargs):
if index_closed(client, index_name): # Don't try to disable bloom filter on a closed index. It will re-open them
logger.info('Skipping index {0}: Already closed.'.format(index_name))
return True
else:
client.indices.put_settings(index=index_name, body='index.codec.bloom.load=false')
def _require_index(client, index_name, attr, **kwargs):
key = attr.split('=')[0]
value = attr.split('=')[1]
if index_closed(client, index_name):
logger.info('Skipping index {0}: Already closed.'.format(index_name))
return True
else:
logger.info('Updating index setting index.routing.allocation.{0}={1}'.format(key,value))
client.indices.put_settings(index=index_name, body='index.routing.allocation.{0}={1}'.format(key,value))
OP_MAP = {
'close': (_close_index, {'op': 'close', 'verbed': 'closed', 'gerund': 'Closing'}),
'delete': (_delete_index, {'op': 'delete', 'verbed': 'deleted', 'gerund': 'Deleting'}),
'optimize': (_optimize_index, {'op': 'optimize', 'verbed': 'optimized', 'gerund': 'Optimizing'}),
'bloom': (_bloom_index, {'op': 'disable bloom filter for', 'verbed': 'bloom filter disabled', 'gerund': 'Disabling bloom filter for'}),
'require': (_require_index, {'op': 'update require allocation rules for', 'verbed':'index routing allocation updated', 'gerund': 'Updating required index routing allocation rules for'}),
}
def index_loop(client, operation, expired_indices, dry_run=False, by_space=False, **kwargs):
op, words = OP_MAP[operation]
for index_name, expiration in expired_indices:
if dry_run and not by_space:
logger.info('Would have attempted {0} index {1} because it is {2} older than the calculated cutoff.'.format(words['gerund'].lower(), index_name, expiration))
continue
elif dry_run and by_space:
logger.info('Would have attempted {0} index {1} due to space constraints.'.format(words['gerund'].lower(), index_name))
continue
if not by_space:
logger.info('Attempting to {0} index {1} because it is {2} older than cutoff.'.format(words['op'], index_name, expiration))
else:
logger.info('Attempting {0} index {1} due to space constraints.'.format(words['gerund'].lower(), index_name))
skipped = op(client, index_name, **kwargs)
if skipped:
continue
# if no error was raised and we got here that means the operation succeeded
logger.info('{0}: Successfully {1}.'.format(index_name, words['verbed']))
logger.info('{0} index operations completed.'.format(words['op'].upper()))
def get_segmentcount(client, index_name):
"""Return a list of shardcount, segmentcount"""
shards = client.indices.segments(index=index_name)['indices'][index_name]['shards']
segmentcount = 0
totalshards = 0 # We will increment this manually to capture all replicas...
for shardnum in shards:
for shard in range(0,len(shards[shardnum])):
segmentcount += shards[shardnum][shard]['num_search_segments']
totalshards += 1
return totalshards, segmentcount
def main():
start = time.time()
parser = make_parser()
arguments = parser.parse_args()
# Do not log and force dry-run if we opt to show indices.
if arguments.show_indices:
arguments.log_file = '/dev/null'
arguments.dry_run = True
# Setup logging
if arguments.debug:
numeric_log_level = logging.DEBUG
else:
numeric_log_level = getattr(logging, arguments.log_level.upper(), None)
if not isinstance(numeric_log_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_log_level,
format='%(asctime)s.%(msecs)03d %(levelname)-9s %(funcName)22s:%(lineno)-4d %(message)s',
datefmt="%Y-%m-%dT%H:%M:%S",
stream=open(arguments.log_file, 'a') if arguments.log_file else sys.stderr)
logging.info("Job starting...")
# Setting up NullHandler to handle nested elasticsearch.trace Logger instance in elasticsearch python client
logging.getLogger('elasticsearch.trace').addHandler(NullHandler())
if arguments.show_indices:
pass # Skip checking args if we're only showing indices
else:
check_args = validate_args(arguments) # Returns either True or a list of errors
if not check_args == True:
logger.error('Malformed arguments: {0}'.format(';'.join(check_args)))
parser.print_help()
return
client = elasticsearch.Elasticsearch(host=arguments.host, port=arguments.port, url_prefix=arguments.url_prefix, timeout=arguments.timeout, use_ssl=arguments.ssl)
version_number = get_version(client)
logger.debug('Detected Elasticsearch version {0}'.format(".".join(map(str,version_number))))
if version_number >= version_max or version_number < version_min:
print('Expected Elasticsearch version range > {0} < {1}'.format(".".join(map(str,version_min)),".".join(map(str,version_max))))
print('ERROR: Incompatible with version {0} of Elasticsearch. Exiting.'.format(".".join(map(str,version_number))))
sys.exit(1)
# Show indices then exit
if arguments.show_indices:
for index_name in get_indices(client, arguments.prefix):
print('{0}'.format(index_name))
sys.exit(0)
# Delete by space first
if arguments.disk_space:
logger.info('Deleting indices by disk usage over {0} gigabytes'.format(arguments.disk_space))
expired_indices = find_overusage_indices(client, arguments.disk_space, arguments.separator, arguments.prefix)
index_loop(client, 'delete', expired_indices, arguments.dry_run, by_space=True)
# Delete by time
if arguments.delete_older:
logger.info('Deleting indices older than {0} {1}...'.format(arguments.delete_older, arguments.time_unit))
expired_indices = find_expired_indices(client, time_unit=arguments.time_unit, unit_count=arguments.delete_older, separator=arguments.separator, prefix=arguments.prefix)
index_loop(client, 'delete', expired_indices, arguments.dry_run)
# Close by time
if arguments.close_older:
logger.info('Closing indices older than {0} {1}...'.format(arguments.close_older, arguments.time_unit))
expired_indices = find_expired_indices(client, time_unit=arguments.time_unit, unit_count=arguments.close_older, separator=arguments.separator, prefix=arguments.prefix)
index_loop(client, 'close', expired_indices, arguments.dry_run)
# Disable bloom filter by time
if arguments.bloom_older:
logger.info('Disabling bloom filter on indices older than {0} {1}...'.format(arguments.bloom_older, arguments.time_unit))
expired_indices = find_expired_indices(client, time_unit=arguments.time_unit, unit_count=arguments.bloom_older, separator=arguments.separator, prefix=arguments.prefix)
index_loop(client, 'bloom', expired_indices, arguments.dry_run)
# Optimize index
if arguments.optimize:
logger.info('Optimizing indices older than {0} {1}...'.format(arguments.optimize, arguments.time_unit))
expired_indices = find_expired_indices(client, time_unit=arguments.time_unit, unit_count=arguments.optimize, separator=arguments.separator, prefix=arguments.prefix)
index_loop(client, 'optimize', expired_indices, arguments.dry_run, max_num_segments=arguments.max_num_segments)
# Required routing rules
if arguments.require:
logger.info('Updating required routing allocation rules on indices older than {0} {1}...'.format(arguments.require, arguments.time_unit))
expired_indices = find_expired_indices(client, time_unit=arguments.time_unit, unit_count=arguments.require, separator=arguments.separator, prefix=arguments.prefix)
index_loop(client, 'require', expired_indices, arguments.dry_run, attr=arguments.required_rule)
logger.info('Done in {0}.'.format(timedelta(seconds=time.time()-start)))
if __name__ == '__main__':
main()
|
{
"content_hash": "1e1e8193f1f23cb765b0d4a1be461951",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 240,
"avg_line_length": 50.0498753117207,
"alnum_prop": 0.6610861983059293,
"repo_name": "waja/elasticsearch-curator",
"id": "fdcf75819e4740ad16f34c84d83c866ba17c953d",
"size": "21458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "curator/curator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35807"
}
],
"symlink_target": ""
}
|
def getPeaks(vs, ts, threshV=1):
"""This function loops through lists of Voltage and Time and returns one
array containing the voltage peaks, and one with their respective time
points.
:param vs: voltage data
:param ts: time data
:param threshV: threshold value for peak detection
:return: PeakTs [list]
"""
PeakTs = []
threshPassedBool = False
for i in range(0, len(vs)):
inV = vs[i]
inT = ts[i]
if inV > threshV and not threshPassedBool:
threshPassedBool = True
# PeakVs.append(inV)
PeakTs.append(inT)
if inV < threshV:
threshPassedBool = False
return PeakTs
def instHR(pTs, instT=0):
"""This function takes in a time array of PEAKS (len>1; output from
getPeaks) and the desired instantaneous timepoint. If the timepoint lies
between two peaks, the time difference between the two peaks will be
calculated. This time difference represents the bpm once divided by 60.
:param pTs: time points associated with peaks
:param instT: user selected time point to calculate heart rate
:return: instBPM [float]
"""
try:
prevT = pTs[0]
if instT < pTs[1]: # set index to second if before second time point
instT = pTs[1]
elif instT >= pTs[len(pTs)-1]: # set index to last if after last
instT = pTs[len(pTs)-1]
prevT = pTs[len(pTs)-2]
else:
lastT = pTs[0]
for peakT in pTs:
if instT <= peakT and instT > lastT:
instT = peakT
prevT = lastT
break
else:
lastT = peakT
# compute instantaneous BPM using previous time and current time
timeDiff = instT - prevT
return 60.0/timeDiff
except:
print("ERROR: length of pTs must be greater than 1.")
if __name__ == '__main__':
# mV, time = readFile(test1.csv)
mV = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
time = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
PeakTs = getPeaks(mV, time, 2)
print(PeakTs)
print(instHR([6, 10, 13], 3))
print(instHR(PeakTs, 11))
print(instHR(getPeaks([0, 0, 0], [0, 1, 2]), 1))
|
{
"content_hash": "53814d29c58b33cbe4b075f78ea05d90",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 77,
"avg_line_length": 32.47142857142857,
"alnum_prop": 0.5714914210294765,
"repo_name": "qidizhai/bme590hrm",
"id": "a7eb99cb4ddd2c5db8a445cde4eb6aab15d9dce9",
"size": "2273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instHR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8821"
}
],
"symlink_target": ""
}
|
import discord
from discord.ext import commands
from cogs.utils.checks import embed_perms, cmd_prefix_len
'''Module for the info command.'''
class Userinfo:
def __init__(self, bot):
self.bot = bot
@commands.group(invoke_without_command=True, aliases=['user', 'uinfo', 'info', 'ui'])
async def userinfo(self, ctx, *, name=""):
"""Get user info. Ex: [p]info @user"""
if ctx.invoked_subcommand is None:
pre = cmd_prefix_len()
if name:
try:
user = ctx.message.mentions[0]
except IndexError:
user = ctx.guild.get_member_named(name)
if not user:
user = ctx.guild.get_member(int(name))
if not user:
user = self.bot.get_user(int(name))
if not user:
await ctx.send(self.bot.bot_prefix + 'Could not find user.')
return
else:
user = ctx.message.author
if user.avatar_url_as(static_format='png')[54:].startswith('a_'):
avi = user.avatar_url.rsplit("?", 1)[0]
else:
avi = user.avatar_url_as(static_format='png')
if isinstance(user, discord.Member):
role = user.top_role.name
if role == "@everyone":
role = "N/A"
voice_state = None if not user.voice else user.voice.channel
if embed_perms(ctx.message):
em = discord.Embed(timestamp=ctx.message.created_at, colour=0x708DD0)
em.add_field(name='User ID', value=user.id, inline=True)
if isinstance(user, discord.Member):
em.add_field(name='Nick', value=user.nick, inline=True)
em.add_field(name='Status', value=user.status, inline=True)
em.add_field(name='In Voice', value=voice_state, inline=True)
em.add_field(name='Game', value=user.activity, inline=True)
em.add_field(name='Highest Role', value=role, inline=True)
em.add_field(name='Account Created', value=user.created_at.__format__('%A, %d. %B %Y @ %H:%M:%S'))
if isinstance(user, discord.Member):
em.add_field(name='Join Date', value=user.joined_at.__format__('%A, %d. %B %Y @ %H:%M:%S'))
em.set_thumbnail(url=avi)
em.set_author(name=user, icon_url='https://i.imgur.com/RHagTDg.png')
await ctx.send(embed=em)
else:
if isinstance(user, discord.Member):
msg = '**User Info:** ```User ID: %s\nNick: %s\nStatus: %s\nIn Voice: %s\nGame: %s\nHighest Role: %s\nAccount Created: %s\nJoin Date: %s\nAvatar url:%s```' % (user.id, user.nick, user.status, voice_state, user.activity, role, user.created_at.__format__('%A, %d. %B %Y @ %H:%M:%S'), user.joined_at.__format__('%A, %d. %B %Y @ %H:%M:%S'), avi)
else:
msg = '**User Info:** ```User ID: %s\nAccount Created: %s\nAvatar url:%s```' % (user.id, user.created_at.__format__('%A, %d. %B %Y @ %H:%M:%S'), avi)
await ctx.send(self.bot.bot_prefix + msg)
await ctx.message.delete()
@userinfo.command()
async def avi(self, ctx, txt: str = None):
"""View bigger version of user's avatar. Ex: [p]info avi @user"""
if txt:
try:
user = ctx.message.mentions[0]
except IndexError:
user = ctx.guild.get_member_named(txt)
if not user:
user = ctx.guild.get_member(int(txt))
if not user:
user = self.bot.get_user(int(txt))
if not user:
await ctx.send(self.bot.bot_prefix + 'Could not find user.')
return
else:
user = ctx.message.author
if user.avatar_url_as(static_format='png')[54:].startswith('a_'):
avi = user.avatar_url.rsplit("?", 1)[0]
else:
avi = user.avatar_url_as(static_format='png')
if embed_perms(ctx.message):
em = discord.Embed(colour=0x708DD0)
em.set_image(url=avi)
await ctx.send(embed=em)
else:
await ctx.send(self.bot.bot_prefix + avi)
await ctx.message.delete()
def setup(bot):
bot.add_cog(Userinfo(bot))
|
{
"content_hash": "07196a69c2a718562fd6535cbaf2275e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 361,
"avg_line_length": 45.53061224489796,
"alnum_prop": 0.5186015239802779,
"repo_name": "appu1232/Selfbot-for-Discord",
"id": "7e5c6362b1e83b1935fd476cb4b3181547ac1845",
"size": "4462",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cogs/userinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "Python",
"bytes": "116575"
}
],
"symlink_target": ""
}
|
import itertools
import jmespath
from c7n.exceptions import PolicyExecutionError, PolicyValidationError
from c7n import utils
from .core import Action
class ModifyVpcSecurityGroupsAction(Action):
"""Common action for modifying security groups on a vpc attached resources.
Security groups for add or remove can be specified via group id or
name. Group removal also supports symbolic names such as
'matched', 'network-location' or 'all'. 'matched' uses the
annotations/output of the 'security-group' filter
filter. 'network-location' uses the annotations of the
'network-location' interface filter for `SecurityGroupMismatch`.
Note a vpc attached resource requires at least one security group,
this action will use the sg specified in `isolation-group` to ensure
resources always have at least one security-group.
type: modify-security-groups
add: []
remove: [] | matched | network-location
isolation-group: sg-xyz
"""
schema_alias = True
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['modify-security-groups']},
'add': {'oneOf': [
{'type': 'string'},
{'type': 'array', 'items': {
'type': 'string'}}]},
'remove': {'oneOf': [
{'type': 'array', 'items': {
'type': 'string'}},
{'enum': [
'matched', 'network-location', 'all',
{'type': 'string'}]}]},
'isolation-group': {'oneOf': [
{'type': 'string'},
{'type': 'array', 'items': {
'type': 'string'}}]}},
'anyOf': [
{'required': ['isolation-group', 'remove', 'type']},
{'required': ['add', 'remove', 'type']},
{'required': ['add', 'type']}]
}
SYMBOLIC_SGS = {'all', 'matched', 'network-location'}
sg_expr = None
vpc_expr = None
def validate(self):
sg_filter = self.manager.filter_registry.get('security-group')
if not sg_filter or not sg_filter.RelatedIdsExpression:
raise PolicyValidationError(self._format_error((
"policy:{policy} resource:{resource_type} does "
"not support {action_type} action")))
if self.get_action_group_names():
vpc_filter = self.manager.filter_registry.get('vpc')
if not vpc_filter or not vpc_filter.RelatedIdsExpression:
raise PolicyValidationError(self._format_error((
"policy:{policy} resource:{resource_type} does not support "
"security-group names only ids in action:{action_type}")))
self.vpc_expr = jmespath.compile(vpc_filter.RelatedIdsExpression)
if self.sg_expr is None:
self.sg_expr = jmespath.compile(
self.manager.filter_registry.get('security-group').RelatedIdsExpression)
if 'all' in self._get_array('remove') and not self._get_array('isolation-group'):
raise PolicyValidationError(self._format_error((
"policy:{policy} use of action:{action_type} with "
"remove: all requires specifying isolation-group")))
return self
def get_group_names(self, groups):
names = []
for g in groups:
if g.startswith('sg-'):
continue
elif g in self.SYMBOLIC_SGS:
continue
names.append(g)
return names
def get_action_group_names(self):
"""Return all the security group names configured in this action."""
return self.get_group_names(
list(itertools.chain(
*[self._get_array('add'),
self._get_array('remove'),
self._get_array('isolation-group')])))
def _format_error(self, msg, **kw):
return msg.format(
policy=self.manager.ctx.policy.name,
resource_type=self.manager.type,
action_type=self.type,
**kw)
def _get_array(self, k):
v = self.data.get(k, [])
if isinstance(v, (str, bytes)):
return [v]
return v
def get_groups_by_names(self, names):
"""Resolve security names to security groups resources."""
if not names:
return []
client = utils.local_session(
self.manager.session_factory).client('ec2')
sgs = self.manager.retry(
client.describe_security_groups,
Filters=[{
'Name': 'group-name', 'Values': names}]).get(
'SecurityGroups', [])
unresolved = set(names)
for s in sgs:
if s['GroupName'] in unresolved:
unresolved.remove(s['GroupName'])
if unresolved:
raise PolicyExecutionError(self._format_error(
"policy:{policy} security groups not found "
"requested: {names}, found: {groups}",
names=list(unresolved), groups=[g['GroupId'] for g in sgs]))
return sgs
def resolve_group_names(self, r, target_group_ids, groups):
"""Resolve any security group names to the corresponding group ids
With the context of a given network attached resource.
"""
names = self.get_group_names(target_group_ids)
if not names:
return target_group_ids
target_group_ids = list(target_group_ids)
vpc_id = self.vpc_expr.search(r)
if not vpc_id:
raise PolicyExecutionError(self._format_error(
"policy:{policy} non vpc attached resource used "
"with modify-security-group: {resource_id}",
resource_id=r[self.manager.resource_type.id]))
found = False
for n in names:
for g in groups:
if g['GroupName'] == n and g['VpcId'] == vpc_id:
found = g['GroupId']
if not found:
raise PolicyExecutionError(self._format_error((
"policy:{policy} could not resolve sg:{name} for "
"resource:{resource_id} in vpc:{vpc}"),
name=n,
resource_id=r[self.manager.resource_type.id], vpc=vpc_id))
target_group_ids.remove(n)
target_group_ids.append(found)
return target_group_ids
def resolve_remove_symbols(self, r, target_group_ids, rgroups):
"""Resolve the resources security groups that need be modified.
Specifically handles symbolic names that match annotations from policy filters
for groups being removed.
"""
if 'matched' in target_group_ids:
return r.get('c7n:matched-security-groups', ())
elif 'network-location' in target_group_ids:
for reason in r.get('c7n:NetworkLocation', ()):
if reason['reason'] == 'SecurityGroupMismatch':
return list(reason['security-groups'])
elif 'all' in target_group_ids:
return rgroups
return target_group_ids
def get_groups(self, resources):
"""Return lists of security groups to set on each resource
For each input resource, parse the various add/remove/isolation-
group policies for 'modify-security-groups' to find the resulting
set of VPC security groups to attach to that resource.
Returns a list of lists containing the resulting VPC security groups
that should end up on each resource passed in.
:param resources: List of resources containing VPC Security Groups
:return: List of lists of security groups per resource
"""
resolved_groups = self.get_groups_by_names(self.get_action_group_names())
return_groups = []
for idx, r in enumerate(resources):
rgroups = self.sg_expr.search(r) or []
add_groups = self.resolve_group_names(
r, self._get_array('add'), resolved_groups)
remove_groups = self.resolve_remove_symbols(
r,
self.resolve_group_names(
r, self._get_array('remove'), resolved_groups),
rgroups)
isolation_groups = self.resolve_group_names(
r, self._get_array('isolation-group'), resolved_groups)
for g in remove_groups:
if g in rgroups:
rgroups.remove(g)
for g in add_groups:
if g not in rgroups:
rgroups.append(g)
if not rgroups:
rgroups = list(isolation_groups)
return_groups.append(rgroups)
return return_groups
|
{
"content_hash": "f70c5af3d1010226a8ce8f8ae95e5b7e",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 89,
"avg_line_length": 38.91189427312775,
"alnum_prop": 0.5651534020151704,
"repo_name": "thisisshi/cloud-custodian",
"id": "815b34baf6b73dc1cda8a352a053802507d6e307",
"size": "8912",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "c7n/actions/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2126"
},
{
"name": "Go",
"bytes": "146637"
},
{
"name": "HCL",
"bytes": "62085"
},
{
"name": "Jinja",
"bytes": "19775"
},
{
"name": "Makefile",
"bytes": "14242"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "6684814"
},
{
"name": "Shell",
"bytes": "15323"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
from django.db import models
from cms.models.fields import PlaceholderField
from managers import ChunkManager
class Chunk(models.Model):
name = models.CharField(max_length=200)
tags = models.CharField(max_length=200)
code = PlaceholderField('chunk_placeholder', related_name="chunks")
priority = models.IntegerField()
objects = ChunkManager()
def __unicode__(self):
return "Tags: %s. Priority: %s" % (self.tags, self.priority)
def has_tag(self, tag):
tags = [string.strip() for string in str(self.tags).split(",")]
return tag in tags
# Create your models here.
|
{
"content_hash": "03725f6fc3c26df07460c4d9393814c6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 71,
"avg_line_length": 29.476190476190474,
"alnum_prop": 0.6833602584814217,
"repo_name": "devartis/django-cms-chunks",
"id": "73f59d8cb6c1344528ad843a681f5db7e0807a6b",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms_chunks/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14118"
}
],
"symlink_target": ""
}
|
import pymysql.cursors
from fixture.orm import ORMFixture
#
# connection = pymysql.connect(host="127.0.0.1", database="addressbook", user="root", password="")
#
# try:
# cursor = connection.cursor()
# cursor.execute("select * from group_list")
# for row in cursor.fetchall():
# print(row)
# finally:
# connection.close()
db = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
try:
l = db.get_contact_list()
for item in l:
print(item)
print(len(l))
finally:
# db.destroy()
pass
|
{
"content_hash": "9d7ab0bd41805df20b8769bb41b8c304",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 98,
"avg_line_length": 24.17391304347826,
"alnum_prop": 0.6276978417266187,
"repo_name": "zr4x/pythonTests",
"id": "3adb01c644f9cf80c5bd8e0259bff1098197851c",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_db_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34679"
}
],
"symlink_target": ""
}
|
# Copyright 2017 Tate M. Walker
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading ' + c)
sys.stdout.flush()
time.sleep(0.1)
print(chr(27) + "[2J")
sys.stdout.write('\rDone!')
def AvFill(ra,dec): # get AV Schafly Values
coord = ra.to_string()+" "+dec.to_string()
coord = coordinates.SkyCoord(coord, frame = 'fk5')
table = IrsaDust.get_extinction_table(coord, show_progress = False)
# table.show_in_browser('chrome')
return table['A_SandF'][2]
def getLink(name):
link = "https://ned.ipac.caltech.edu/cgi-bin/objsearch?"
inputs = {'objname': name,
'extend': 'no',
'hconst': '73',
'omegam': '0.27',
'omegav': '0.73',
'corr_z': '1',
'out_csys': 'Equatorial',
'out_equinox': 'J2000.0',
'obj_sort': "RA or Longitude",
'of': 'pre_text',
'zv_breaker': '30000.0',
'list_limit': '5',
'img_stamp': 'YES'}
page = requests.get(link, params = inputs)
from bs4 import BeautifulSoup
soup = BeautifulSoup(page.content, 'html.parser')
return soup
def Morphology(name):
try:
soup = getLink(name)
soup = soup.find("a", attrs={'name':'BasicData_0'})
morphology = soup.next_sibling.next_sibling.next_sibling.find("pre")
morphology = list(morphology.children)[2]
morphology = (morphology.split(": ",4)[4]).rstrip()
return morphology
except (IndexError, AttributeError) as e:
morphology = None
return morphology
def Redshift(name):
try:
soup = getLink(name)
soup = soup.find("a", attrs={'name':'BasicData_0'})
soup = soup.next_sibling.next_sibling.next_sibling.find("pre")
redshift = list(soup.children)[1]
redshift = str(redshift).split(">",1)[1]
redshift = redshift.split("<",1)[0]
return(redshift)
except (IndexError, AttributeError) as e:
redshift = None
return redshift
def LatLong(name):
try:
soup = getLink(name)
soup = soup.find("a", attrs={'name':'Positions_0'})
coords = soup.next_sibling.next_sibling.find("pre")
coords = list(coords.children)[4]
coords = (coords.split("Galactic ",1)[1]).lstrip()
longitude = (coords.split()[0])
latitude = (coords.split()[1])
coords = [str(longitude),str(latitude)]
return coords
except (IndexError, AttributeError) as e:
coords = [None,None]
return coords
def scrapeValues(name):
soup = getLink(name)
#----------Get Velocities----------#
try:
soup = soup.find("a", attrs={'name':'DerivedValues_0'})
velocities = soup.next_sibling.next_sibling.next_sibling.find("pre")
Helio = list(velocities.children)[2]
VGS = list(velocities.children)[16]
Helio = Helio.lstrip('\n')
VGS = VGS.lstrip('\n')
Hvals = [int(s) for s in Helio.split() if s.isdigit()]
VGSVals = [int(s) for s in VGS.split() if s.isdigit()]
vels = [Hvals[0],Hvals[1],VGSVals[0],VGSVals[1]]
return vels
except (IndexError, AttributeError) as e:
vels = [None,None,None,None]
return vels
from astropy import units as u
from astropy import coordinates
from astroquery.ned import Ned
from astroquery.irsa_dust import IrsaDust
from astropy.coordinates import Angle,ICRS,SkyCoord
from astropy.coordinates.name_resolve import NameResolveError
import math
import os.path
import sys
import itertools
import threading
import time
import requests
gals = []
start_coord = []
with open("input.txt") as inp:
gals = inp.read().splitlines()
for i in gals[:]: #gets all valid galaxies
try:
tempCoord = SkyCoord.from_name(i, frame = 'icrs')
start_coord.append(tempCoord)
except NameResolveError:
gals.remove(i)
print(gals)
galData = []
done = False
print(chr(27) + "[2J")
threader = threading.Thread(target=animate)
threader.start()
write_file = 'scriptData.csv'
with open(write_file,'w') as output:
output.write("Name, Ra, Dec, Lat, Long, Av, Morph, Red, Helio, Error, VGS, Error, \n")
for i in range(0,len(gals)):
ra = Angle(start_coord[i].ra.hour,unit = u.hour)
dec = start_coord[i].dec
Av = AvFill(ra, dec)
redshift = Redshift(gals[i])
vels = scrapeValues(gals[i])
morphology = Morphology(gals[i])
coords = LatLong(gals[i])
with open(write_file, 'a') as output:
output.write(gals[i] + ',' + ra.to_string() + ',' + dec.to_string() + ',' + coords[0] + ',' + coords[1] + ',' + str(Av) + ',' + str(morphology) + ',' + str(redshift) + ',' + str(vels[0]) + ',' + str(vels[1]) + ',' + str(vels[2]) + ',' + str(vels[3]) + '\n')
print(gals[i] + " Done!")
done = True
|
{
"content_hash": "392ee673913e329b071c61082e27a9a5",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 259,
"avg_line_length": 31.61392405063291,
"alnum_prop": 0.6592592592592592,
"repo_name": "TateWalker/Galactic-Data",
"id": "b33851b66368a65a9163d8fa8632d6f35fd9bc5c",
"size": "4995",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quickData.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31384"
}
],
"symlink_target": ""
}
|
import pathlib
import setuptools
import ghb
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "README.md").read_text(encoding="utf-8")
setuptools.setup(
name="ghb",
version=ghb.__version__,
description="A collection of useful GitHub commands",
long_description=long_description,
license="MIT",
url="https://github.com/keith/ghb",
author="Keith Smiley",
author_email="keithbsmiley@gmail.com",
install_requires=["requests==2.27.1"],
packages=["ghb", "ghb.helpers"],
entry_points={"console_scripts": ["ghb=ghb.__main__:main"]},
python_requires=">=3.8",
)
|
{
"content_hash": "5fd1229925d812f65747ca06ed590947",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 27.304347826086957,
"alnum_prop": 0.6624203821656051,
"repo_name": "keith/ghb",
"id": "0003d46aca28bf0060ff15755821612d8f1a6d77",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40507"
},
{
"name": "Shell",
"bytes": "1936"
}
],
"symlink_target": ""
}
|
from setup import tc, rm, get_sandbox_path
def test_load_csv_with_missing_values_infer_schema(tc):
# Load frame with missing values, inferring the schema
path = "../datasets/missing_values.csv"
frame = tc.frame.import_csv(path, header=False, infer_schema=True)
# Check row count
assert(5 == frame.count())
# Check expected values
expected_value = [['1', 2, None, 4, 5, None],
['1', 2, 3, None, None, 2.5],
['2', 1, 3, 4, 5, None],
['dog', 20, 30, 40, 50, 60.5],
['', None, 13, 14, 15, 16.5]]
assert(expected_value == frame.take(frame.count()))
assert(frame.schema == [('C0', str),('C1', int),('C2', int),('C3', int),('C4', int),('C5', float)])
def test_load_csv_with_missing_values_custom_schema(tc):
path = "../datasets/missing_values.csv"
# specify the schema
schema = [("a", str), ("b", int), ("c", float), ("d", int), ("e", int), ("f", float)]
frame = tc.frame.import_csv(path, schema=schema)
# Check row count
assert(5 == frame.count())
# Check expected values
expected_value = [['1', 2, None, 4, 5, None],
['1', 2, 3.0, None, None, 2.5],
['2', 1, 3.0, 4, 5, None],
['dog', 20, 30.0, 40, 50, 60.5],
['', None, 13.0, 14, 15, 16.5]]
assert(expected_value == frame.take(frame.count()))
assert(frame.schema == schema)
def test_missing_values_add_column(tc):
# Create frame with missing values using upload rows
schema = [('a', int)]
data = [[1],[4],[None],[None],[10],[None]]
frame = tc.frame.create(data, schema)
# Check that frame was correctly created
assert(6, frame.count())
assert(data, frame.take(frame.count()))
# Define function that replaces missing values with zero
def noneToZero(x):
if x is None:
return 0
else:
return x
# Use add columns to create a new column that replaces missing values with 0.
frame.add_columns(lambda row: noneToZero(row['a']), ('a_corrected', int))
expected = [[1],[4],[0],[0],[10],[0]]
assert(expected, frame.take(frame.count(), columns='a_corrected'))
def test_missing_values_drop_rows(tc):
# Create frame with missing values using upload rows
schema = [('a', int)]
data = [[1],[4],[None],[None],[10],[None]]
frame = tc.frame.create(data, schema)
# Check that frame was correctly created
assert(6 == frame.count())
assert(data == frame.take(frame.count()))
# Check that we can drop rows with missing values
frame.drop_rows(lambda row: row['a'] == None)
expected = [[1],[4],[10]]
assert(expected, frame.take(frame.count(), columns='a'))
def test_missing_values_with_frame_create_infer_schema(tc):
data = [[1],[4],[None],[None],[10],[None]]
frame = tc.frame.create(data)
assert(len(frame.schema) == 1)
assert(frame.schema[0][1] == int)
|
{
"content_hash": "9a7db533cde68d1e4878c37817704ec8",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 103,
"avg_line_length": 38.08641975308642,
"alnum_prop": 0.5507293354943273,
"repo_name": "dmsuehir/spark-tk",
"id": "ba73d27eec80d4cfa159b8cbb939cc8fe760bf44",
"size": "3790",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integration-tests/tests/test_frame_missing_values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "11509"
},
{
"name": "Python",
"bytes": "1453671"
},
{
"name": "R",
"bytes": "2242"
},
{
"name": "Scala",
"bytes": "1495386"
},
{
"name": "Shell",
"bytes": "24621"
}
],
"symlink_target": ""
}
|
"""Quotas for instances, volumes, and floating ips."""
from nova import db
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('quota_instances', 10,
'number of instances allowed per project')
flags.DEFINE_integer('quota_cores', 20,
'number of instance cores allowed per project')
flags.DEFINE_integer('quota_ram', 50 * 1024,
'megabytes of instance ram allowed per project')
flags.DEFINE_integer('quota_volumes', 10,
'number of volumes allowed per project')
flags.DEFINE_integer('quota_gigabytes', 1000,
'number of volume gigabytes allowed per project')
flags.DEFINE_integer('quota_floating_ips', 10,
'number of floating ips allowed per project')
flags.DEFINE_integer('quota_metadata_items', 128,
'number of metadata items allowed per instance')
flags.DEFINE_integer('quota_max_injected_files', 5,
'number of injected files allowed')
flags.DEFINE_integer('quota_max_injected_file_content_bytes', 10 * 1024,
'number of bytes allowed per injected file')
flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255,
'number of bytes allowed per injected file path')
def _get_default_quotas():
defaults = {
'instances': FLAGS.quota_instances,
'cores': FLAGS.quota_cores,
'ram': FLAGS.quota_ram,
'volumes': FLAGS.quota_volumes,
'gigabytes': FLAGS.quota_gigabytes,
'floating_ips': FLAGS.quota_floating_ips,
'metadata_items': FLAGS.quota_metadata_items,
'injected_files': FLAGS.quota_max_injected_files,
'injected_file_content_bytes':
FLAGS.quota_max_injected_file_content_bytes,
}
# -1 in the quota flags means unlimited
for key in defaults.keys():
if defaults[key] == -1:
defaults[key] = None
return defaults
def get_project_quotas(context, project_id):
rval = _get_default_quotas()
quota = db.quota_get_all_by_project(context, project_id)
for key in rval.keys():
if key in quota:
rval[key] = quota[key]
return rval
def _get_request_allotment(requested, used, quota):
if quota is None:
return requested
return quota - used
def allowed_instances(context, requested_instances, instance_type):
"""Check quota and return min(requested_instances, allowed_instances)."""
project_id = context.project_id
context = context.elevated()
requested_cores = requested_instances * instance_type['vcpus']
requested_ram = requested_instances * instance_type['memory_mb']
usage = db.instance_data_get_for_project(context, project_id)
used_instances, used_cores, used_ram = usage
quota = get_project_quotas(context, project_id)
allowed_instances = _get_request_allotment(requested_instances,
used_instances,
quota['instances'])
allowed_cores = _get_request_allotment(requested_cores, used_cores,
quota['cores'])
allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])
allowed_instances = min(allowed_instances,
allowed_cores // instance_type['vcpus'],
allowed_ram // instance_type['memory_mb'])
return min(requested_instances, allowed_instances)
def allowed_volumes(context, requested_volumes, size):
"""Check quota and return min(requested_volumes, allowed_volumes)."""
project_id = context.project_id
context = context.elevated()
size = int(size)
requested_gigabytes = requested_volumes * size
used_volumes, used_gigabytes = db.volume_data_get_for_project(context,
project_id)
quota = get_project_quotas(context, project_id)
allowed_volumes = _get_request_allotment(requested_volumes, used_volumes,
quota['volumes'])
allowed_gigabytes = _get_request_allotment(requested_gigabytes,
used_gigabytes,
quota['gigabytes'])
if size != 0:
allowed_volumes = min(allowed_volumes,
int(allowed_gigabytes // size))
return min(requested_volumes, allowed_volumes)
def allowed_floating_ips(context, requested_floating_ips):
"""Check quota and return min(requested, allowed) floating ips."""
project_id = context.project_id
context = context.elevated()
used_floating_ips = db.floating_ip_count_by_project(context, project_id)
quota = get_project_quotas(context, project_id)
allowed_floating_ips = _get_request_allotment(requested_floating_ips,
used_floating_ips,
quota['floating_ips'])
return min(requested_floating_ips, allowed_floating_ips)
def _calculate_simple_quota(context, resource, requested):
"""Check quota for resource; return min(requested, allowed)."""
quota = get_project_quotas(context, context.project_id)
allowed = _get_request_allotment(requested, 0, quota[resource])
return min(requested, allowed)
def allowed_metadata_items(context, requested_metadata_items):
"""Return the number of metadata items allowed."""
return _calculate_simple_quota(context, 'metadata_items',
requested_metadata_items)
def allowed_injected_files(context, requested_injected_files):
"""Return the number of injected files allowed."""
return _calculate_simple_quota(context, 'injected_files',
requested_injected_files)
def allowed_injected_file_content_bytes(context, requested_bytes):
"""Return the number of bytes allowed per injected file content."""
resource = 'injected_file_content_bytes'
return _calculate_simple_quota(context, resource, requested_bytes)
def allowed_injected_file_path_bytes(context):
"""Return the number of bytes allowed in an injected file path."""
return FLAGS.quota_max_injected_file_path_bytes
class QuotaError(exception.ApiError):
"""Quota Exceeded."""
pass
|
{
"content_hash": "b4e80cc9d47e9964b512e364bd204fc4",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 42.443708609271525,
"alnum_prop": 0.6275550007801529,
"repo_name": "salv-orlando/MyRepo",
"id": "771477747e0002ac3a4601318dceef9944b69248",
"size": "7186",
"binary": false,
"copies": "4",
"ref": "refs/heads/bp/xenapi-security-groups",
"path": "nova/quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "4477933"
},
{
"name": "Shell",
"bytes": "34174"
}
],
"symlink_target": ""
}
|
import six
from six import text_type
import time
import warnings
from mwclient.util import parse_timestamp
import mwclient.listing
import mwclient.errors
class Page(object):
def __init__(self, site, name, info=None, extra_properties=None):
if type(name) is type(self):
return self.__dict__.update(name.__dict__)
self.site = site
self.name = name
self.section = None
if not info:
if extra_properties:
prop = 'info|' + '|'.join(six.iterkeys(extra_properties))
extra_props = []
[extra_props.extend(extra_prop) for extra_prop in six.itervalues(extra_properties)]
else:
prop = 'info'
extra_props = ()
if type(name) is int:
info = self.site.api('query', prop=prop, pageids=name,
inprop='protection', *extra_props)
else:
info = self.site.api('query', prop=prop, titles=name,
inprop='protection', *extra_props)
info = six.next(six.itervalues(info['query']['pages']))
self._info = info
self.namespace = info.get('ns', 0)
self.name = info.get('title', u'')
if self.namespace:
self.page_title = self.strip_namespace(self.name)
else:
self.page_title = self.name
self.touched = parse_timestamp(info.get('touched'))
self.revision = info.get('lastrevid', 0)
self.exists = 'missing' not in info
self.length = info.get('length')
self.protection = dict([(i['type'], (i['level'], i['expiry'])) for i in info.get('protection', ()) if i])
self.redirect = 'redirect' in info
self.pageid = info.get('pageid', None)
self.contentmodel = info.get('contentmodel', None)
self.pagelanguage = info.get('pagelanguage', None)
self.restrictiontypes = info.get('restrictiontypes', None)
self.last_rev_time = None
self.edit_time = None
def redirects_to(self):
""" Returns the redirect target page, or None if the page is not a redirect page."""
info = self.site.api('query', prop='pageprops', titles=self.name, redirects='')['query']
if 'redirects' in info:
for page in info['redirects']:
if page['from'] == self.name:
return Page(self.site, page['to'])
return None
else:
return None
def resolve_redirect(self):
""" Returns the redirect target page, or the current page if it's not a redirect page."""
target_page = self.redirects_to()
if target_page is None:
return self
else:
return target_page
def __repr__(self):
return "<Page object '%s' for %s>" % (self.name.encode('utf-8'), self.site)
def __unicode__(self):
return self.name
@staticmethod
def strip_namespace(title):
if title[0] == ':':
title = title[1:]
return title[title.find(':') + 1:]
@staticmethod
def normalize_title(title):
# TODO: Make site dependent
title = title.strip()
if title[0] == ':':
title = title[1:]
title = title[0].upper() + title[1:]
title = title.replace(' ', '_')
return title
def can(self, action):
level = self.protection.get(action, (action, ))[0]
if level == 'sysop':
level = 'editprotected'
return level in self.site.rights
def get_token(self, type, force=False):
return self.site.get_token(type, force, title=self.name)
def get_expanded(self):
"""Deprecated. Use page.text(expandtemplates=True) instead"""
warnings.warn("page.get_expanded() was deprecated in mwclient 0.7.0 and will be removed in 0.8.0, use page.text(expandtemplates=True) instead.",
category=DeprecationWarning, stacklevel=2)
return self.text(expandtemplates=True)
def edit(self, *args, **kwargs):
"""Deprecated. Use page.text() instead"""
warnings.warn("page.edit() was deprecated in mwclient 0.7.0 and will be removed in 0.8.0, please use page.text() instead.",
category=DeprecationWarning, stacklevel=2)
return self.text(*args, **kwargs)
def text(self, section=None, expandtemplates=False):
"""
Returns the current wikitext of the page, or of a specific section.
If the page does not exist, an empty string is returned.
:Arguments:
- `section` : numbered section or `None` to get the whole page (default: `None`)
- `expandtemplates` : set to `True` to expand templates (default: `False`)
"""
if not self.can('read'):
raise mwclient.errors.InsufficientPermission(self)
if not self.exists:
return u''
if section is not None:
section = text_type(section)
revs = self.revisions(prop='content|timestamp', limit=1, section=section, expandtemplates=expandtemplates)
try:
rev = revs.next()
text = rev['*']
self.section = section
self.last_rev_time = rev['timestamp']
except StopIteration:
text = u''
self.section = None
self.last_rev_time = None
if not expandtemplates:
self.edit_time = time.gmtime()
return text
def save(self, text, summary=u'', minor=False, bot=True, section=None, **kwargs):
"""
Update the text of a section or the whole page by performing an edit operation.
"""
if not self.site.logged_in and self.site.force_login:
# Should we really check for this?
raise mwclient.errors.LoginError(self.site, 'By default, mwclient protects you from ' +
'accidentally editing without being logged in. If you ' +
'actually want to edit without logging in, you can set ' +
'force_login on the Site object to False.')
if self.site.blocked:
raise mwclient.errors.UserBlocked(self.site.blocked)
if not self.can('edit'):
raise mwclient.errors.ProtectedPageError(self)
if self.section is not None and section is None:
warnings.warn('From mwclient version 0.8.0, the `save()` method will no longer ' +
'implicitly use the `section` parameter from the last `text()` or ' +
'`edit()` call. Please pass the `section` parameter explicitly to ' +
'the save() method to save only a single section.',
category=DeprecationWarning, stacklevel=2)
section = self.section
if not self.site.writeapi:
raise mwclient.errors.NoWriteApi(self)
data = {}
if minor:
data['minor'] = '1'
if not minor:
data['notminor'] = '1'
if self.last_rev_time:
data['basetimestamp'] = time.strftime('%Y%m%d%H%M%S', self.last_rev_time)
if self.edit_time:
data['starttimestamp'] = time.strftime('%Y%m%d%H%M%S', self.edit_time)
if bot:
data['bot'] = '1'
if section:
data['section'] = section
data.update(kwargs)
def do_edit():
result = self.site.api('edit', title=self.name, text=text,
summary=summary, token=self.get_token('edit'),
**data)
if result['edit'].get('result').lower() == 'failure':
raise mwclient.errors.EditError(self, result['edit'])
return result
try:
result = do_edit()
except mwclient.errors.APIError as e:
if e.code == 'badtoken':
# Retry, but only once to avoid an infinite loop
self.get_token('edit', force=True)
try:
result = do_edit()
except mwclient.errors.APIError as e:
self.handle_edit_error(e, summary)
else:
self.handle_edit_error(e, summary)
# 'newtimestamp' is not included if no change was made
if 'newtimestamp' in result['edit'].keys():
self.last_rev_time = parse_timestamp(result['edit'].get('newtimestamp'))
return result['edit']
def handle_edit_error(self, e, summary):
if e.code == 'editconflict':
raise mwclient.errors.EditError(self, summary, e.info)
elif e.code in ('protectedtitle', 'cantcreate', 'cantcreate-anon', 'noimageredirect-anon',
'noimageredirect', 'noedit-anon', 'noedit'):
raise mwclient.errors.ProtectedPageError(self, e.code, e.info)
else:
raise
def move(self, new_title, reason='', move_talk=True, no_redirect=False):
"""Move (rename) page to new_title.
If user account is an administrator, specify no_direct as True to not
leave a redirect.
If user does not have permission to move page, an InsufficientPermission
exception is raised.
"""
if not self.can('move'):
raise mwclient.errors.InsufficientPermission(self)
if not self.site.writeapi:
raise mwclient.errors.NoWriteApi(self)
data = {}
if move_talk:
data['movetalk'] = '1'
if no_redirect:
data['noredirect'] = '1'
result = self.site.api('move', ('from', self.name), to=new_title,
token=self.get_token('move'), reason=reason, **data)
return result['move']
def delete(self, reason='', watch=False, unwatch=False, oldimage=False):
"""Delete page.
If user does not have permission to delete page, an InsufficientPermission
exception is raised.
"""
if not self.can('delete'):
raise mwclient.errors.InsufficientPermission(self)
if not self.site.writeapi:
raise mwclient.errors.NoWriteApi(self)
data = {}
if watch:
data['watch'] = '1'
if unwatch:
data['unwatch'] = '1'
if oldimage:
data['oldimage'] = oldimage
result = self.site.api('delete', title=self.name,
token=self.get_token('delete'),
reason=reason, **data)
return result['delete']
def purge(self):
"""Purge server-side cache of page. This will re-render templates and other
dynamic content.
"""
self.site.raw_index('purge', title=self.name)
# def watch: requires 1.14
# Properties
def backlinks(self, namespace=None, filterredir='all', redirect=False, limit=None, generator=True):
prefix = mwclient.listing.List.get_prefix('bl', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(prefix, namespace=namespace, filterredir=filterredir))
if redirect:
kwargs['%sredirect' % prefix] = '1'
kwargs[prefix + 'title'] = self.name
return mwclient.listing.List.get_list(generator)(self.site, 'backlinks', 'bl', limit=limit, return_values='title', **kwargs)
def categories(self, generator=True):
if generator:
return mwclient.listing.PagePropertyGenerator(self, 'categories', 'cl')
else:
# TODO: return sortkey if wanted
return mwclient.listing.PageProperty(self, 'categories', 'cl', return_values='title')
def embeddedin(self, namespace=None, filterredir='all', redirect=False, limit=None, generator=True):
prefix = mwclient.listing.List.get_prefix('ei', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(prefix, namespace=namespace, filterredir=filterredir))
if redirect:
kwargs['%sredirect' % prefix] = '1'
kwargs[prefix + 'title'] = self.name
return mwclient.listing.List.get_list(generator)(self.site, 'embeddedin', 'ei', limit=limit, return_values='title', **kwargs)
def extlinks(self):
return mwclient.listing.PageProperty(self, 'extlinks', 'el', return_values='*')
def images(self, generator=True):
if generator:
return mwclient.listing.PagePropertyGenerator(self, 'images', '')
else:
return mwclient.listing.PageProperty(self, 'images', '', return_values='title')
def iwlinks(self):
return mwclient.listing.PageProperty(self, 'iwlinks', 'iw', return_values=('prefix', '*'))
def langlinks(self, **kwargs):
return mwclient.listing.PageProperty(self, 'langlinks', 'll', return_values=('lang', '*'), **kwargs)
def links(self, namespace=None, generator=True, redirects=False):
prefix = mwclient.listing.List.get_prefix('pl', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(prefix, namespace=namespace))
if redirects:
kwargs['redirects'] = '1'
if generator:
return mwclient.listing.PagePropertyGenerator(self, 'links', 'pl', **kwargs)
else:
return mwclient.listing.PageProperty(self, 'links', 'pl', return_values='title', **kwargs)
def revisions(self, startid=None, endid=None, start=None, end=None,
dir='older', user=None, excludeuser=None, limit=50,
prop='ids|timestamp|flags|comment|user', expandtemplates=False, section=None):
kwargs = dict(mwclient.listing.List.generate_kwargs('rv', startid=startid, endid=endid, start=start,
end=end, user=user, excludeuser=excludeuser))
kwargs['rvdir'] = dir
kwargs['rvprop'] = prop
if expandtemplates:
kwargs['rvexpandtemplates'] = '1'
if section is not None:
kwargs['rvsection'] = section
return mwclient.listing.RevisionsIterator(self, 'revisions', 'rv', limit=limit, **kwargs)
def templates(self, namespace=None, generator=True):
prefix = mwclient.listing.List.get_prefix('tl', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(prefix, namespace=namespace))
if generator:
return mwclient.listing.PagePropertyGenerator(self, 'templates', prefix, **kwargs)
else:
return mwclient.listing.PageProperty(self, 'templates', prefix, return_values='title', **kwargs)
|
{
"content_hash": "79e29b11ec26a63c7a0519c41b625726",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 152,
"avg_line_length": 40.78888888888889,
"alnum_prop": 0.5765459002996459,
"repo_name": "semplea/characters-meta",
"id": "ea54f3a80da1e00914c79bd6efbcfe6e14c3bde8",
"size": "14684",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/mwclient/mwclient/page.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "27805"
},
{
"name": "Jupyter Notebook",
"bytes": "504755"
},
{
"name": "Makefile",
"bytes": "7789"
},
{
"name": "Perl",
"bytes": "39921"
},
{
"name": "Python",
"bytes": "525979"
},
{
"name": "Shell",
"bytes": "24663"
},
{
"name": "TeX",
"bytes": "141827"
}
],
"symlink_target": ""
}
|
import pytest
import mock
from android_build_system.run import pre_check
class TestPreCheck():
@mock.patch("android_build_system.pre_checks.env_check.CmdCheck.check",
new=mock.Mock(return_value=True))
@mock.patch("android_build_system.pre_checks.env_check.EnvCheck.check",
new=mock.Mock(return_value=True))
@mock.patch("android_build_system.pre_checks.env_check.AAPTCheck.check",
new=mock.Mock(return_value=True))
@mock.patch("android_build_system.pre_checks.env_check.ZIPALIGNCheck.check",
new=mock.Mock(return_value=True))
def test_all_passed(self):
pre_check()
@mock.patch("android_build_system.pre_checks.env_check.EnvCheck.check",
new=mock.Mock(return_value=False))
@mock.patch("android_build_system.pre_checks.env_check.AAPTCheck.check",
new=mock.Mock(return_value=True))
@mock.patch("android_build_system.pre_checks.env_check.ZIPALIGNCheck.check",
new=mock.Mock(return_value=True))
@mock.patch("android_build_system.pre_checks.env_check.CmdCheck.check",
new=mock.Mock(return_value=True))
def test_failed(self):
with pytest.raises(SystemExit):
pre_check()
|
{
"content_hash": "898d91dad9807bcb33e1a27bb55a540d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 42.1,
"alnum_prop": 0.6603325415676959,
"repo_name": "letter113/android-build-system",
"id": "e8cdd44e93a55ee574ebe4b46e3e1a70dea83663",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "android_build_system/tests/test_run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1539"
},
{
"name": "Python",
"bytes": "19987"
}
],
"symlink_target": ""
}
|
"""ovirt-host-setup apache plugin."""
from otopi import util
from . import core
from . import misc
from . import selinux
from . import ssl
@util.export
def createPlugins(context):
core.Plugin(context=context)
misc.Plugin(context=context)
selinux.Plugin(context=context)
ssl.Plugin(context=context)
# vim: expandtab tabstop=4 shiftwidth=4
|
{
"content_hash": "3ad902829085ec1e172be45afcfac161",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 39,
"avg_line_length": 17.238095238095237,
"alnum_prop": 0.7320441988950276,
"repo_name": "eayun/ovirt-engine",
"id": "07f7d08d7ea7851fe53b77e207ab63030b1dd653",
"size": "997",
"binary": false,
"copies": "10",
"ref": "refs/heads/eayunos-4.2",
"path": "packaging/setup/plugins/ovirt-engine-setup/ovirt-engine-common/apache/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69586"
},
{
"name": "HTML",
"bytes": "16218"
},
{
"name": "Java",
"bytes": "35074047"
},
{
"name": "JavaScript",
"bytes": "69948"
},
{
"name": "Makefile",
"bytes": "24723"
},
{
"name": "PLSQL",
"bytes": "1101"
},
{
"name": "PLpgSQL",
"bytes": "796728"
},
{
"name": "Python",
"bytes": "970860"
},
{
"name": "Roff",
"bytes": "10764"
},
{
"name": "Shell",
"bytes": "163853"
},
{
"name": "XSLT",
"bytes": "54683"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
import hiyapyco
import yaml
from cumulusci.core.config import BaseProjectConfig
from cumulusci.core.exceptions import NotInProject
from cumulusci.core.exceptions import ProjectConfigNotFound
class YamlProjectConfig(BaseProjectConfig):
config_filename = 'cumulusci.yml'
def __init__(self, *args, **kwargs):
# Initialize the dictionaries for the individual configs
self.config_project = {}
self.config_project_local = {}
self.config_additional_yaml = {}
# optionally pass in a kwarg named 'additional_yaml' that will
# be added to the YAML merge stack.
self.additional_yaml = None
if 'additional_yaml' in kwargs:
self.additional_yaml = kwargs.pop('additional_yaml')
super(YamlProjectConfig, self).__init__(*args, **kwargs)
@property
def config_project_local_path(self):
path = os.path.join(
self.project_local_dir,
self.config_filename,
)
if os.path.isfile(path):
return path
def _load_config(self):
""" Loads the configuration for the project """
# Verify that we're in a project
repo_root = self.repo_root
if not repo_root:
raise NotInProject(
'No repository found in current path. You must be inside a repository to initialize the project configuration')
# Verify that the project's root has a config file
if not self.config_project_path:
raise ProjectConfigNotFound(
'The file {} was not found in the repo root: {}'.format(
self.config_filename,
repo_root
)
)
# Start the merged yaml config from the global and global local configs
merge_yaml = [self.global_config_obj.config_global_path]
if self.global_config_obj.config_global_local_path:
merge_yaml.append(self.global_config_obj.config_global_local_path)
# Load the project's yaml config file
with open(self.config_project_path, 'r') as f_config:
project_config = yaml.load(f_config)
if project_config:
self.config_project.update(project_config)
merge_yaml.append(self.config_project_path)
# Load the local project yaml config file if it exists
if self.config_project_local_path:
with open(self.config_project_local_path, 'r') as f_local_config:
local_config = yaml.load(f_local_config)
if local_config:
self.config_project_local.update(local_config)
merge_yaml.append(self.config_project_local_path)
# merge in any additional yaml that was passed along
if self.additional_yaml:
additional_yaml_config = yaml.load(self.additional_yaml)
if additional_yaml_config:
self.config_additional_yaml.update(additional_yaml_config)
merge_yaml.append(self.additional_yaml)
self.config = hiyapyco.load(
*merge_yaml,
method=hiyapyco.METHOD_MERGE,
loglevel='INFO'
)
|
{
"content_hash": "0c9ff60ce3af44d9e58708e5afb0bf1f",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 128,
"avg_line_length": 37.406976744186046,
"alnum_prop": 0.6216972334473112,
"repo_name": "e02d96ec16/CumulusCI",
"id": "d31e319457e9545302f8db4f2b4669bc03d9a18f",
"size": "3217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/core/config/YamlProjectConfig.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "641697"
},
{
"name": "RobotFramework",
"bytes": "9270"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
}
|
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import cinder as c_plugin
from heat.engine.clients.os import keystone as k_plugin
from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests import utils
quota_template = '''
heat_template_version: newton
description: Sample cinder quota heat template
resources:
my_quota:
type: OS::Cinder::Quota
properties:
project: demo
gigabytes: 5
snapshots: 2
volumes: 3
'''
class CinderQuotaTest(common.HeatTestCase):
def setUp(self):
super(CinderQuotaTest, self).setUp()
self.ctx = utils.dummy_context()
self.patchobject(c_plugin.CinderClientPlugin, 'has_extension',
return_value=True)
self.patchobject(k_plugin.KeystoneClientPlugin, 'get_project_id',
return_value='some_project_id')
tpl = template_format.parse(quota_template)
self.stack = parser.Stack(
self.ctx, 'cinder_quota_test_stack',
template.Template(tpl)
)
self.my_quota = self.stack['my_quota']
cinder = mock.MagicMock()
self.cinderclient = mock.MagicMock()
self.my_quota.client = cinder
cinder.return_value = self.cinderclient
self.quotas = self.cinderclient.quotas
self.quota_set = mock.MagicMock()
self.quotas.update.return_value = self.quota_set
self.quotas.delete.return_value = self.quota_set
class FakeVolumeOrSnapshot(object):
def __init__(self, size=1):
self.size = size
self.fv = FakeVolumeOrSnapshot
f_v = self.fv()
self.volume_snapshots = self.cinderclient.volume_snapshots
self.volume_snapshots.list.return_value = [f_v, f_v]
self.volumes = self.cinderclient.volumes
self.volumes.list.return_value = [f_v, f_v, f_v]
self.err_msg = ("Invalid quota %(property)s value(s): %(value)s. "
"Can not be less than the current usage value(s): "
"%(total)s.")
def _test_validate(self, resource, error_msg):
exc = self.assertRaises(exception.StackValidationFailed,
resource.validate)
self.assertIn(error_msg, six.text_type(exc))
def _test_invalid_property(self, prop_name):
my_quota = self.stack['my_quota']
props = self.stack.t.t['resources']['my_quota']['properties'].copy()
props[prop_name] = -2
my_quota.t = my_quota.t.freeze(properties=props)
my_quota.reparse()
error_msg = ('Property error: resources.my_quota.properties.%s:'
' -2 is out of range (min: -1, max: None)' % prop_name)
self._test_validate(my_quota, error_msg)
def test_invalid_gigabytes(self):
self._test_invalid_property('gigabytes')
def test_invalid_snapshots(self):
self._test_invalid_property('snapshots')
def test_invalid_volumes(self):
self._test_invalid_property('volumes')
def test_miss_all_quotas(self):
my_quota = self.stack['my_quota']
props = self.stack.t.t['resources']['my_quota']['properties'].copy()
del props['gigabytes'], props['snapshots'], props['volumes']
my_quota.t = my_quota.t.freeze(properties=props)
my_quota.reparse()
msg = ('At least one of the following properties must be specified: '
'gigabytes, snapshots, volumes.')
self.assertRaisesRegex(exception.PropertyUnspecifiedError, msg,
my_quota.validate)
def test_quota_handle_create(self):
self.my_quota.physical_resource_name = mock.MagicMock(
return_value='some_resource_id')
self.my_quota.reparse()
self.my_quota.handle_create()
self.quotas.update.assert_called_once_with(
'some_project_id',
gigabytes=5,
snapshots=2,
volumes=3
)
self.assertEqual('some_resource_id', self.my_quota.resource_id)
def test_quota_handle_update(self):
tmpl_diff = mock.MagicMock()
prop_diff = mock.MagicMock()
props = {'project': 'some_project_id', 'gigabytes': 6,
'volumes': 4}
json_snippet = rsrc_defn.ResourceDefinition(
self.my_quota.name,
'OS::Cinder::Quota',
properties=props)
self.my_quota.reparse()
self.my_quota.handle_update(json_snippet, tmpl_diff, prop_diff)
self.quotas.update.assert_called_once_with(
'some_project_id',
gigabytes=6,
volumes=4
)
def test_quota_handle_delete(self):
self.my_quota.reparse()
self.my_quota.handle_delete()
self.quotas.delete.assert_called_once_with('some_project_id')
def test_quota_with_invalid_gigabytes(self):
fake_v = self.fv(2)
self.volumes.list.return_value = [fake_v, fake_v]
self.my_quota.physical_resource_name = mock.MagicMock(
return_value='some_resource_id')
self.my_quota.reparse()
err = self.assertRaises(ValueError, self.my_quota.handle_create)
self.assertEqual(
self.err_msg % {'property': 'gigabytes', 'value': 5, 'total': 6},
six.text_type(err))
def test_quota_with_invalid_volumes(self):
fake_v = self.fv(0)
self.volumes.list.return_value = [fake_v, fake_v, fake_v, fake_v]
self.my_quota.physical_resource_name = mock.MagicMock(
return_value='some_resource_id')
self.my_quota.reparse()
err = self.assertRaises(ValueError, self.my_quota.handle_create)
self.assertEqual(
self.err_msg % {'property': 'volumes', 'value': 3, 'total': 4},
six.text_type(err))
def test_quota_with_invalid_snapshots(self):
fake_v = self.fv(0)
self.volume_snapshots.list.return_value = [fake_v, fake_v, fake_v,
fake_v]
self.my_quota.physical_resource_name = mock.MagicMock(
return_value='some_resource_id')
self.my_quota.reparse()
err = self.assertRaises(ValueError, self.my_quota.handle_create)
self.assertEqual(
self.err_msg % {'property': 'snapshots', 'value': 2, 'total': 4},
six.text_type(err))
def _test_quota_with_unlimited_value(self, prop_name):
my_quota = self.stack['my_quota']
props = self.stack.t.t['resources']['my_quota']['properties'].copy()
props[prop_name] = -1
my_quota.t = my_quota.t.freeze(properties=props)
my_quota.reparse()
my_quota.handle_create()
kwargs = {'gigabytes': 5, 'snapshots': 2, 'volumes': 3}
kwargs[prop_name] = -1
self.quotas.update.assert_called_once_with('some_project_id', **kwargs)
def test_quota_with_unlimited_gigabytes(self):
self._test_quota_with_unlimited_value('gigabytes')
def test_quota_with_unlimited_snapshots(self):
self._test_quota_with_unlimited_value('snapshots')
def test_quota_with_unlimited_volumes(self):
self._test_quota_with_unlimited_value('volumes')
|
{
"content_hash": "b2f1a30cb5cd9e1e656da1d11e1ee870",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 38.705263157894734,
"alnum_prop": 0.608784335055752,
"repo_name": "noironetworks/heat",
"id": "62b3f9cbbb2cb7ee10e37d25c4e71ef05b83d1d5",
"size": "7928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/openstack/cinder/test_quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8804896"
},
{
"name": "Shell",
"bytes": "64533"
}
],
"symlink_target": ""
}
|
import os
def get_package_data():
paths_test = [os.path.join('data', '*.xml')]
return {'astroquery.svo_fps.tests': paths_test}
|
{
"content_hash": "5d3a0bee1a9531a96777538a55e776a6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 51,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.635036496350365,
"repo_name": "imbasimba/astroquery",
"id": "21968ea2a2c2e765cb1e1bdd6dbcaf128c40e623",
"size": "203",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astroquery/svo_fps/tests/setup_package.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "493404"
},
{
"name": "Python",
"bytes": "2852847"
}
],
"symlink_target": ""
}
|
from .base import PageObject
class IndexPage(PageObject):
path = "/"
def is_browser_on_page(self):
return self.browser.title == \
"PyPI - the Python Package Index · Warehouse"
|
{
"content_hash": "e0c57aeb76d3884825624568297fb089",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 20.8,
"alnum_prop": 0.625,
"repo_name": "alex/warehouse",
"id": "13a00ef6b7d4e525106bbf8f14254757b9997e1f",
"size": "750",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/functional/pages/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "135448"
},
{
"name": "HTML",
"bytes": "95233"
},
{
"name": "JavaScript",
"bytes": "27705"
},
{
"name": "Makefile",
"bytes": "5817"
},
{
"name": "Mako",
"bytes": "1505"
},
{
"name": "Perl",
"bytes": "15498"
},
{
"name": "Python",
"bytes": "909699"
},
{
"name": "Shell",
"bytes": "4504"
}
],
"symlink_target": ""
}
|
from ...Deep.strings import StringCompiler
def strset(commands, data, node):
""" Компиляция built-in функции strset (задание символа строки) """
node.args.elements[2].compile_vm(commands, data)
commands.clean_type()
node.args.elements[1].compile_vm(commands, data)
commands.clean_type()
array_type = node.args.elements[0].compile_vm(commands, data)
commands.clean_type()
StringCompiler.strset(commands, data, array_type)
|
{
"content_hash": "6f71d3d26bd5806b76d619c98087ecc0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 37.916666666666664,
"alnum_prop": 0.7142857142857143,
"repo_name": "PetukhovVictor/compiler",
"id": "b9aa0eddcaa6677bbb531ae578ca961a0f585d1e",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Compiler/VM/Codegen/statements/strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "16410"
},
{
"name": "Python",
"bytes": "239647"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
}
|
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import db
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
class ExtraSpecsTemplate(xmlutil.TemplateBuilder):
def construct(self):
extra_specs_dict = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
return xmlutil.MasterTemplate(extra_specs_dict, 1)
class ExtraSpecTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector(xmlutil.get_items, 0)
root = xmlutil.TemplateElement('extra_spec', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1)
class FlavorExtraSpecsController(object):
"""The flavor extra specs API controller for the OpenStack API."""
ALIAS = 'flavor-extra-specs'
def __init__(self, *args, **kwargs):
super(FlavorExtraSpecsController, self).__init__(*args, **kwargs)
self.authorize = extensions.extension_authorizer('compute',
'v3:' + self.ALIAS)
def _get_extra_specs(self, context, flavor_id):
extra_specs = db.instance_type_extra_specs_get(context, flavor_id)
return dict(extra_specs=extra_specs)
def _check_body(self, body):
if body is None or body == "":
expl = _('No Request Body')
raise webob.exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ExtraSpecsTemplate)
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
self.authorize(context, action='index')
return self._get_extra_specs(context, flavor_id)
@wsgi.serializers(xml=ExtraSpecsTemplate)
@wsgi.response(201)
def create(self, req, flavor_id, body):
context = req.environ['nova.context']
self.authorize(context, action='create')
self._check_body(body)
specs = body.get('extra_specs', {})
if not specs or type(specs) is not dict:
raise webob.exc.HTTPBadRequest(_('No or bad extra_specs provided'))
try:
db.instance_type_extra_specs_update_or_create(context, flavor_id,
specs)
except db_exc.DBDuplicateEntry as error:
raise webob.exc.HTTPBadRequest(explanation=error.format_message())
return body
@wsgi.serializers(xml=ExtraSpecTemplate)
def update(self, req, flavor_id, id, body):
context = req.environ['nova.context']
self.authorize(context, action='update')
self._check_body(body)
if id not in body:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
expl = _('Request body contains too many items')
raise webob.exc.HTTPBadRequest(explanation=expl)
try:
db.instance_type_extra_specs_update_or_create(context, flavor_id,
body)
except db_exc.DBDuplicateEntry as error:
raise webob.exc.HTTPBadRequest(explanation=error.format_message())
return body
@wsgi.serializers(xml=ExtraSpecTemplate)
def show(self, req, flavor_id, id):
"""Return a single extra spec item."""
context = req.environ['nova.context']
self.authorize(context, action='show')
try:
extra_spec = db.instance_type_extra_specs_get_item(context,
flavor_id, id)
return extra_spec
except exception.InstanceTypeExtraSpecsNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, flavor_id, id):
"""Deletes an existing extra spec."""
context = req.environ['nova.context']
self.authorize(context, action='delete')
try:
db.instance_type_extra_specs_delete(context, flavor_id, id)
except exception.InstanceTypeExtraSpecsNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FlavorsExtraSpecs(extensions.V3APIExtensionBase):
"""Flavors Extension."""
name = 'FlavorsExtraSpecs'
alias = FlavorExtraSpecsController.ALIAS
namespace = "http://docs.openstack.org/compute/core/%s/v3" % alias
version = 1
def get_resources(self):
extra_specs = extensions.ResourceExtension(
self.alias,
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
return [extra_specs]
def get_controller_extensions(self):
return []
|
{
"content_hash": "38aa50fe248591014de58dfe32b81a6d",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 39.48412698412698,
"alnum_prop": 0.6263316582914573,
"repo_name": "SUSE-Cloud/nova",
"id": "9ced7d037fca75be39f5fbf04d382c2f42135414",
"size": "5656",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable/havana",
"path": "nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13441452"
},
{
"name": "Shell",
"bytes": "20579"
}
],
"symlink_target": ""
}
|
import os
import unittest
import shutil
from django.conf import settings
from django.utils.safestring import mark_safe
from django_webpack.exceptions import BundlingError
from django_react import ReactComponent, ReactBundle, render_component
from django_react.exceptions import (
RenderingError, PropSerializationError, ReactComponentCalledDirectly, ReactComponentMissingSource,
SourceFileNotFound,
)
from django_react.settings import RENDERER
class HelloWorld(ReactComponent):
source = 'components/HelloWorld.jsx'
class HelloWorldJS(ReactComponent):
source = 'components/HelloWorld.js'
class ErrorThrowingComponent(ReactComponent):
source = 'components/ErrorThrowingComponent.jsx'
class SyntaxErrorComponent(ReactComponent):
source = 'components/SyntaxErrorComponent.jsx'
class TestDjangoReact(unittest.TestCase):
def tearDown(self):
if os.path.exists(settings.STATIC_ROOT):
shutil.rmtree(settings.STATIC_ROOT)
def test_react_component_cannot_be_called_directly(self):
self.assertRaises(ReactComponentCalledDirectly, ReactComponent)
def test_react_component_requires_source_attribute(self):
class ComponentMissingSourceAttribute(ReactComponent):
pass
self.assertRaises(ReactComponentMissingSource, ComponentMissingSourceAttribute)
class ComponentWithNonExistentSource(ReactComponent):
source = 'some/missing/file.js'
self.assertRaises(ReactComponentMissingSource, ComponentWithNonExistentSource)
class ComponentWithNonExistentPathToSource(ReactComponent):
path_to_source = '/some/missing/file.js'
component = ComponentWithNonExistentPathToSource()
self.assertRaises(SourceFileNotFound, component.render_to_static_markup)
def test_can_render_a_react_component_in_jsx(self):
component = HelloWorld()
rendered = component.render_to_static_markup()
expected = component.render_container(
content=mark_safe('<span>Hello </span>')
)
self.assertEqual(rendered, expected)
def test_can_render_a_react_component_in_js(self):
component = HelloWorldJS()
rendered = component.render_to_static_markup()
expected = component.render_container(
content=mark_safe('<span>Hello </span>')
)
self.assertEqual(rendered, expected)
def test_can_render_a_react_component_with_props(self):
component = HelloWorld(text='world!')
rendered = component.render_to_static_markup()
expected = component.render_container(
content=mark_safe('<span>Hello world!</span>')
)
self.assertEqual(rendered, expected)
def test_can_render_a_react_component_container(self):
component = HelloWorld()
rendered = component.render_container()
self.assertEqual(rendered, '<div id="{0}" class="{1}"></div>'.format(
component.get_container_id(),
component.get_container_class_name(),
))
def test_can_render_a_react_source_element(self):
component = HelloWorld()
rendered = component.render_source()
self.assertTrue(
rendered.startswith('<script src="/static/components/HelloWorld-')
)
self.assertTrue(
rendered.endswith('.js"></script>')
)
def test_can_override_a_components_source_url_generation(self):
class TestComponent(HelloWorld):
def get_url_to_source(self):
return 'some/fake/file.js'
component = TestComponent()
rendered = component.render_source()
self.assertEqual(
rendered,
'<script src="some/fake/file.js"></script>'
)
def test_component_js_rendering_errors_raise_an_exception(self):
component = ErrorThrowingComponent()
self.assertRaises(RenderingError, component.render_to_static_markup)
self.assertRaises(RenderingError, component.render_to_string)
def test_components_with_syntax_errors_raise_exceptions(self):
component = SyntaxErrorComponent()
self.assertRaises(RenderingError, component.render_to_static_markup)
self.assertRaises(RenderingError, component.render_to_string)
self.assertRaises(BundlingError, component.render_source)
def test_unserializable_props_raise_an_exception(self):
component = HelloWorld(text=id)
self.assertRaises(PropSerializationError, component.get_serialized_props)
def test_components_have_a_react_bundle(self):
self.assertEqual(ReactComponent.bundle, ReactBundle)
def test_render_component_has_similar_output_to_react_component_render_methods(self):
component = HelloWorld()
rendered = render_component(
path_to_source=component.get_path_to_source(),
to_static_markup=True
)
expected = component.render_to_static_markup(wrap=False)
self.assertEqual(rendered, expected)
def test_path_to_source_can_be_specified(self):
class ComponentWithPathToSource(ReactComponent):
path_to_source = os.path.join(
os.path.dirname(__file__),
'components/HelloWorld.jsx'
)
component = ComponentWithPathToSource()
self.assertEqual(component.path_to_source, component.get_path_to_source())
self.assertEqual(
component.render_to_static_markup(wrap=False),
HelloWorld().render_to_static_markup(wrap=False)
)
|
{
"content_hash": "9121178053cb8ef21eaf709cef73b204",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 102,
"avg_line_length": 38.56944444444444,
"alnum_prop": 0.6863521786100109,
"repo_name": "fanscribed/django-react",
"id": "03b67d07fd0bb7f62ceac53796136715d96943c2",
"size": "5554",
"binary": false,
"copies": "1",
"ref": "refs/heads/fs",
"path": "tests/test_functionality.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2765"
},
{
"name": "Python",
"bytes": "24318"
}
],
"symlink_target": ""
}
|
import ConfigLoader
import ConfigParser
'''
Loads values from config file
'''
config_file_parser = ConfigParser.ConfigParser()
config_file_parser.readfp(open(r'' + ConfigLoader.CONFIG_FILE))
# ******************** BASIC SETTINGS ***************
CENTRAL_WALLET = config_file_parser.get('Basic', 'CENTRAL_WALLET')
COINDAEMON_TRUSTED_HOST = config_file_parser.get('Basic', 'COINDAEMON_TRUSTED_HOST')
COINDAEMON_TRUSTED_PORT = config_file_parser.getint('Basic', 'COINDAEMON_TRUSTED_PORT')
COINDAEMON_TRUSTED_USER = config_file_parser.get('Basic', 'COINDAEMON_TRUSTED_USER')
COINDAEMON_TRUSTED_PASSWORD = config_file_parser.get('Basic', 'COINDAEMON_TRUSTED_PASSWORD')
COINDAEMON_ALGO = config_file_parser.get('Basic', 'COINDAEMON_ALGO')
COINDAEMON_Reward = config_file_parser.get('Basic', 'COINDAEMON_Reward')
COINDAEMON_TX_MSG = config_file_parser.getboolean('Basic', 'COINDAEMON_TX_MSG')
SCRYPTJANE_NAME = config_file_parser.get('Basic', 'SCRYPTJANE_NAME')
Tx_Message = config_file_parser.get('Basic', 'Tx_Message')
# Masternode Payments - Default to False if not set
try:
MASTERNODE_PAYMENTS = config_file_parser.getboolean('Basic', 'MASTERNODE_PAYMENTS')
except:
MASTERNODE_PAYMENTS = False
# ******************** GENERAL SETTINGS ***************
STRATUM_MINING_PROCESS_NAME = config_file_parser.get('General', 'STRATUM_MINING_PROCESS_NAME')
DEBUG = config_file_parser.getboolean('General', 'DEBUG')
LOGDIR = config_file_parser.get('General', 'LOGDIR')
LOGFILE = config_file_parser.get('General', 'LOGFILE')
LOGLEVEL = config_file_parser.get('General', 'LOGLEVEL')
LOG_ROTATION = config_file_parser.getboolean('General', 'LOG_ROTATION')
LOG_SIZE = config_file_parser.getint('General', 'LOG_SIZE')
LOG_RETENTION = config_file_parser.getint('General', 'LOG_RETENTION')
# ******************** SERVICE SETTINGS *********************
THREAD_POOL_SIZE = config_file_parser.getint('Service', 'THREAD_POOL_SIZE')
HOSTNAME = config_file_parser.get('Service', 'HOSTNAME')
LISTEN_SOCKET_TRANSPORT = config_file_parser.getint('Service', 'LISTEN_SOCKET_TRANSPORT')
PASSWORD_SALT = config_file_parser.get('Service', 'PASSWORD_SALT')
ADMIN_PASSWORD = config_file_parser.get('Service', 'ADMIN_PASSWORD')
ADMIN_RESTRICT_INTERFACE = config_file_parser.get('Service', 'ADMIN_RESTRICT_INTERFACE')
ENABLE_EXAMPLE_SERVICE = True
LISTEN_HTTP_TRANSPORT = None
LISTEN_HTTPS_TRANSPORT = None
LISTEN_WS_TRANSPORT = None
LISTEN_WSS_TRANSPORT = None
IRC_NICK = None
# ******************** Database *********************
DATABASE_DRIVER = 'mysql' # Options: none, sqlite, postgresql or mysql
DATABASE_EXTEND = True # False = pushpool db layout, True = pushpool + extra columns
DB_SQLITE_FILE = 'pooldb.sqlite'
DB_PGSQL_HOST = 'localhost'
DB_PGSQL_DBNAME = 'pooldb'
DB_PGSQL_USER = 'pooldb'
DB_PGSQL_PASS = '**empty**'
DB_PGSQL_SCHEMA = 'public'
# MySQL
DB_MYSQL_HOST = config_file_parser.get('Database', 'DB_MYSQL_HOST')
DB_MYSQL_DBNAME = config_file_parser.get('Database', 'DB_MYSQL_DBNAME')
DB_MYSQL_USER = config_file_parser.get('Database', 'DB_MYSQL_USER')
DB_MYSQL_PASS = config_file_parser.get('Database', 'DB_MYSQL_PASS')
DB_MYSQL_PORT = config_file_parser.getint('Database', 'DB_MYSQL_PORT')
# ******************** Adv. DB Settings *********************
# Don't change these unless you know what you are doing
DB_LOADER_CHECKTIME = 15 # How often we check to see if we should run the loader
DB_LOADER_REC_MIN = 10 # Min Records before the bulk loader fires
DB_LOADER_REC_MAX = 50 # Max Records the bulk loader will commit at a time
DB_LOADER_FORCE_TIME = 300 # How often the cache should be flushed into the DB regardless of size.
DB_STATS_AVG_TIME = 300 # When using the DATABASE_EXTEND option, average speed over X sec # Note: this is also how often it updates
DB_USERCACHE_TIME = 600 # How long the usercache is good for before we refresh
DB_STATS_ENABLE = False # Decides whether or not this process is responisble for updating pool statistics.
# When the share queue gets large, more threads will be swaned up to this amount
# Set to 1 to disable
try:
DB_MAX_IMPORT_THREADS = config_file_parser.getint('Advanced', 'DB_MAX_IMPORT_THREADS')
if DB_MAX_IMPORT_THREADS < 1:
DB_MAX_IMPORT_THREADS = 1
except:
DB_MAX_IMPORT_THREADS = 3
# ******************** Adv. Pool Settings *********************
USERS_AUTOADD = True # Automatically add users to db when they connect.
USERS_CHECK_PASSWORD = False # Check the workers password? (Many pools don't)
COINBASE_EXTRAS = '/TidePool/' # Extra Descriptive String to incorporate in solved blocks
ALLOW_NONLOCAL_WALLET = False # Allow valid, but NON-Local wallet's
INSTANCE_ID = 31 # Used for extranonce and needs to be 0-31
NTIME_AGE = 7200 # Not a clue what this is for... :P (Sometimes is 1000)
# ******************** Pool Settings *********************
PREVHASH_REFRESH_INTERVAL = config_file_parser.getint('Pool', 'PREVHASH_REFRESH_INTERVAL')
MERKLE_REFRESH_INTERVAL = config_file_parser.getint('Pool', 'MERKLE_REFRESH_INTERVAL')
# ******************** Pool Difficulty Settings *********************
VDIFF_X2_TYPE = False # powers of 2 e.g. 2,4,8,16,32,64,128,256,512,1024 (BROKEN)
USE_COINDAEMON_DIFF = False # Set the maximum difficulty to the litecoin difficulty.
DIFF_UPDATE_FREQUENCY = 86400 # Update the litecoin difficulty once a day for the VARDIFF maximum
ALLOW_EXTERNAL_DIFFICULTY = config_file_parser.getboolean('Pool', 'EXTERNAL_DIFFICULTY') # Allows direct difficulty updating via database
VDIFF_FLOAT = config_file_parser.getboolean('Pool', 'VDIFF_FLOAT')
VARIABLE_DIFF = config_file_parser.getboolean('Pool', 'VARIABLE_DIFF')
if VDIFF_FLOAT:
# Floating Point Difficulty
POOL_TARGET = config_file_parser.getfloat('Pool', 'POOL_TARGET')
VDIFF_MIN_TARGET = config_file_parser.getfloat('Pool', 'VDIFF_MIN_TARGET')
VDIFF_MAX_TARGET = config_file_parser.getfloat('Pool', 'VDIFF_MAX_TARGET')
VDIFF_MIN_CHANGE = config_file_parser.getfloat('Pool', 'VDIFF_MIN_CHANGE')
else:
# Integer
POOL_TARGET = config_file_parser.getint('Pool', 'POOL_TARGET')
VDIFF_MIN_TARGET = config_file_parser.getint('Pool', 'VDIFF_MIN_TARGET')
VDIFF_MAX_TARGET = config_file_parser.getint('Pool', 'VDIFF_MAX_TARGET')
VDIFF_MIN_CHANGE = config_file_parser.getint('Pool', 'VDIFF_MIN_CHANGE')
VDIFF_TARGET_TIME = config_file_parser.getint('Pool', 'VDIFF_TARGET_TIME')
VDIFF_RETARGET_TIME = config_file_parser.getint('Pool', 'VDIFF_RETARGET_TIME')
VDIFF_VARIANCE_PERCENT = config_file_parser.getint('Pool', 'VDIFF_VARIANCE_PERCENT')
#### Advanced Option #####
SOLUTION_BLOCK_HASH = config_file_parser.getboolean('Advanced', 'SOLUTION_BLOCK_HASH')
BLOCK_CHECK_ALGO_HASH = config_file_parser.getboolean('Advanced', 'BLOCK_CHECK_ALGO_HASH')
REJECT_STALE_SHARES = config_file_parser.getboolean('Advanced', 'REJECT_STALE_SHARES')
# Defaults to 20% if no value is set
try:
MASTERNODE_PERCENT = config_file_parser.getint('Advanced', 'MASTERNODE_PERCENT')
except:
MASTERNODE_PERCENT = 20
# Defaults to Yes if no value is set
try:
SAVE_SHARES = config_file_parser.getboolean('Advanced', 'SAVE_SHARES')
except:
SAVE_SHARES = True
# ******************** Stats Settings *********************
BASIC_STATS = False # Enable basic stats page. This has stats for ALL users. (Unessesary)
BASIC_STATS_PORT = None # Port to listen on
# ******************** Getwork Proxy Settings *********************
# DISABLED
# This enables a copy of slush's getwork proxy for old clients
# It will also auto-redirect new clients to the stratum interface
# so you can point ALL clients to: http://<yourserver>:<GW_PORT>
GW_ENABLE = False # Enable the Proxy (If enabled you MUST run update_submodules)
GW_PORT = None # Getwork Proxy Port
GW_DISABLE_MIDSTATE = False # Disable midstate's (Faster but breaks some clients)
GW_SEND_REAL_TARGET = False # Propigate >1 difficulty to Clients (breaks some clients)
# ******************** Archival Settings *********************
# Broken
ARCHIVE_SHARES = False # Use share archiving?
ARCHIVE_DELAY = 86400 # Seconds after finding a share to archive all previous shares
ARCHIVE_MODE = 'file' # Do we archive to a file (file) , or to a database table (db)
# Archive file options
ARCHIVE_FILE = 'archives/share_archive' # Name of the archive file ( .csv extension will be appended)
ARCHIVE_FILE_APPEND_TIME = True # Append the Date/Time to the end of the filename (must be true for bzip2 compress)
ARCHIVE_FILE_COMPRESS = 'none' # Method to compress file (none,gzip,bzip2)
# ******************** Worker Ban Options *********************
# UNeeded
ENABLE_WORKER_BANNING = False # enable/disable temporary worker banning
WORKER_CACHE_TIME = 600 # How long the worker stats cache is good before we check and refresh
WORKER_BAN_TIME = 300 # How long we temporarily ban worker
INVALID_SHARES_PERCENT = 50 # Allow average invalid shares vary this % before we ban
# ******************** E-Mail Notification Settings *********************
NOTIFY_EMAIL_TO = config_file_parser.get('Email', 'NOTIFY_EMAIL_TO')
NOTIFY_EMAIL_TO_DEADMINER = '' #non used
NOTIFY_EMAIL_FROM = config_file_parser.get('Email', 'NOTIFY_EMAIL_FROM')
NOTIFY_EMAIL_SERVER = config_file_parser.get('Email', 'NOTIFY_EMAIL_SERVER')
NOTIFY_EMAIL_USERNAME = config_file_parser.get('Email', 'NOTIFY_EMAIL_USERNAME')
NOTIFY_EMAIL_PASSWORD = config_file_parser.get('Email', 'NOTIFY_EMAIL_PASSWORD')
NOTIFY_EMAIL_USETLS = config_file_parser.getboolean('Email', 'NOTIFY_EMAIL_USETLS')
#### Memcache ####
# Not Used
MEMCACHE_ENABLE = False
# Memcahce is a option. Enter the settings below
MEMCACHE_HOST = "localhost" # hostname or IP that runs memcached
MEMCACHE_PORT = 11211 # Port
MEMCACHE_TIMEOUT = 900 # Key timeout
MEMCACHE_PREFIX = "stratum_" # Prefix for keys
# ******************** Admin settings *********************
# If ADMIN_PORT is set, you can issue commands to that port to interact with
# the system for things such as user management. It's a JSON interface following
# REST principles, so '/users' returns a list of users, '/users/1' or '/users/username'
# returns a single user. POSTs are done to lists (so /users), PUTs are done to
# items (so /users/1)
ADMIN_PORT = None #Port for JSON admin commands, None to disable
|
{
"content_hash": "cdd58868745878d9d03ed6f5321fb949",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 138,
"avg_line_length": 48.595238095238095,
"alnum_prop": 0.7038706516413523,
"repo_name": "tuaris/TidePool",
"id": "e366dcf6a23089411f6cf2a46d2f704750d87f6a",
"size": "10205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "208135"
},
{
"name": "C",
"bytes": "8461481"
},
{
"name": "C++",
"bytes": "99163"
},
{
"name": "Makefile",
"bytes": "11057"
},
{
"name": "Python",
"bytes": "1059203"
},
{
"name": "Shell",
"bytes": "4838"
}
],
"symlink_target": ""
}
|
from sqlalchemy import event as sqla_event
from flask.signals import Namespace
from .models import (
Organization,
Team,
User,
UserEmail,
UserEmailClaim,
UserPhone,
UserPhoneClaim,
)
lastuser_signals = Namespace()
model_user_new = lastuser_signals.signal('model-user-new')
model_user_edited = lastuser_signals.signal('model-user-edited')
model_user_deleted = lastuser_signals.signal('model-user-deleted')
model_org_new = lastuser_signals.signal('model-org-new')
model_org_edited = lastuser_signals.signal('model-org-edited')
model_org_deleted = lastuser_signals.signal('model-org-deleted')
model_team_new = lastuser_signals.signal('model-team-new')
model_team_edited = lastuser_signals.signal('model-team-edited')
model_team_deleted = lastuser_signals.signal('model-team-deleted')
model_useremail_new = lastuser_signals.signal('model-useremail-new')
model_useremail_edited = lastuser_signals.signal('model-useremail-edited')
model_useremail_deleted = lastuser_signals.signal('model-useremail-deleted')
model_useremailclaim_new = lastuser_signals.signal('model-useremail-new')
model_useremailclaim_edited = lastuser_signals.signal('model-useremail-edited')
model_useremailclaim_deleted = lastuser_signals.signal('model-useremail-deleted')
model_userphone_new = lastuser_signals.signal('model-useremail-new')
model_userphone_edited = lastuser_signals.signal('model-useremail-edited')
model_userphone_deleted = lastuser_signals.signal('model-useremail-deleted')
model_userphoneclaim_new = lastuser_signals.signal('model-useremail-new')
model_userphoneclaim_edited = lastuser_signals.signal('model-useremail-edited')
model_userphoneclaim_deleted = lastuser_signals.signal('model-useremail-deleted')
resource_access_granted = lastuser_signals.signal('resource-access-granted')
# Higher level signals
user_login = lastuser_signals.signal('user-login')
user_registered = lastuser_signals.signal('user-registered')
user_data_changed = lastuser_signals.signal('user-data-changed')
org_data_changed = lastuser_signals.signal('org-data-changed')
team_data_changed = lastuser_signals.signal('team-data-changed')
session_revoked = lastuser_signals.signal('session-revoked')
@sqla_event.listens_for(User, 'after_insert')
def _user_new(mapper, connection, target):
model_user_new.send(target)
@sqla_event.listens_for(User, 'after_update')
def _user_edited(mapper, connection, target):
model_user_edited.send(target)
@sqla_event.listens_for(User, 'after_delete')
def _user_deleted(mapper, connection, target):
model_user_deleted.send(target)
@sqla_event.listens_for(Organization, 'after_insert')
def _org_new(mapper, connection, target):
model_org_new.send(target)
@sqla_event.listens_for(Organization, 'after_update')
def _org_edited(mapper, connection, target):
model_org_edited.send(target)
@sqla_event.listens_for(Organization, 'after_delete')
def _org_deleted(mapper, connection, target):
model_org_deleted.send(target)
@sqla_event.listens_for(Team, 'after_insert')
def _team_new(mapper, connection, target):
model_team_new.send(target)
@sqla_event.listens_for(Team, 'after_update')
def _team_edited(mapper, connection, target):
model_team_edited.send(target)
@sqla_event.listens_for(Team, 'after_delete')
def _team_deleted(mapper, connection, target):
model_team_deleted.send(target)
@sqla_event.listens_for(UserEmail, 'after_insert')
def _useremail_new(mapper, connection, target):
model_useremail_new.send(target)
@sqla_event.listens_for(UserEmail, 'after_update')
def _useremail_edited(mapper, connection, target):
model_useremail_edited.send(target)
@sqla_event.listens_for(UserEmail, 'after_delete')
def _useremail_deleted(mapper, connection, target):
model_useremail_deleted.send(target)
@sqla_event.listens_for(UserEmailClaim, 'after_insert')
def _useremailclaim_new(mapper, connection, target):
model_useremailclaim_new.send(target)
@sqla_event.listens_for(UserEmailClaim, 'after_update')
def _useremailclaim_edited(mapper, connection, target):
model_useremailclaim_edited.send(target)
@sqla_event.listens_for(UserEmailClaim, 'after_delete')
def _useremailclaim_deleted(mapper, connection, target):
model_useremailclaim_deleted.send(target)
@sqla_event.listens_for(UserPhone, 'after_insert')
def _userphone_new(mapper, connection, target):
model_userphone_new.send(target)
@sqla_event.listens_for(UserPhone, 'after_update')
def _userphone_edited(mapper, connection, target):
model_userphone_edited.send(target)
@sqla_event.listens_for(UserPhone, 'after_delete')
def _userphone_deleted(mapper, connection, target):
model_userphone_deleted.send(target)
@sqla_event.listens_for(UserPhoneClaim, 'after_insert')
def _userphoneclaim_new(mapper, connection, target):
model_userphoneclaim_new.send(target)
@sqla_event.listens_for(UserPhoneClaim, 'after_update')
def _userphoneclaim_edited(mapper, connection, target):
model_userphoneclaim_edited.send(target)
@sqla_event.listens_for(UserPhoneClaim, 'after_delete')
def _userphoneclaim_deleted(mapper, connection, target):
model_userphoneclaim_deleted.send(target)
|
{
"content_hash": "05188915f4ce8083362d0baea5b84ecc",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 81,
"avg_line_length": 32.721518987341774,
"alnum_prop": 0.7665377176015474,
"repo_name": "hasgeek/lastuser",
"id": "bf27dd153f5b224f51a5c5dd50622fee7631552c",
"size": "5195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lastuser_core/signals.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2937"
},
{
"name": "Gherkin",
"bytes": "841"
},
{
"name": "HTML",
"bytes": "49341"
},
{
"name": "JavaScript",
"bytes": "145"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "545882"
},
{
"name": "Ruby",
"bytes": "404"
},
{
"name": "Shell",
"bytes": "1251"
}
],
"symlink_target": ""
}
|
'''
@author: sheng
@contact: sinotradition@gmail.com
@copyright: License according to the project license.
'''
NAME='wushen41'
SPELL='wùshēn'
CN='戊申'
SEQ='45'
if __name__=='__main__':
pass
|
{
"content_hash": "edafec2362f5a5533f1f4537fe1c84fe",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 14.76923076923077,
"alnum_prop": 0.671875,
"repo_name": "sinotradition/sinoera",
"id": "777ceda8e8d960f73b3c8aeadd6bb26d1c299772",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sinoera/ganzhi/wushen41.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74484"
}
],
"symlink_target": ""
}
|
from glob import glob
from PIL import Image
from os import rename
import sqlite3 as db
con = db.connect('wolfe.db')
# Create the media table if it does not exist.
con.execute("""CREATE TABLE IF NOT EXISTS "media" (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`tg_id` TEXT,
`path` INTEGER NOT NULL UNIQUE,
`filetype` TEXT NOT NULL,
`approved` INTEGER NOT NULL DEFAULT 1,
`type` TEXT NOT NULL DEFAULT 'yiff',
`check` INTEGER NOT NULL DEFAULT 1
);""")
con.commit()
RES_FOLDER = 'res/'
data = []
count = 0
# For every file in every subdirectory of res/, try to open it and add it to the database if it can be opened
for f in glob('%s*/*.*' % RES_FOLDER):
fn = f[len(RES_FOLDER):]
folder, file_name = fn.split('/')
file_name, file_type = file_name.rsplit('.', 1)
if file_type not in ['jpg', 'jpeg', 'png', 'gif']:
continue
print('Skipping %s' % fn)
print(folder,file_name,file_type)
try:
im = Image.open(f)
except:
print('Can\'t open %s' % fn)
rename(f, '%s_broken' % f)
continue
data += [(fn, file_type)]
count += 1
con.executemany("INSERT OR IGNORE INTO media (path, filetype) VALUES (?,?)", data)
con.commit()
print('%s images' % count)
|
{
"content_hash": "33ba00f47acfec2c8580b3ae5c6f3553",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 109,
"avg_line_length": 23.137254901960784,
"alnum_prop": 0.6610169491525424,
"repo_name": "TNTINC/WolfeBot",
"id": "b25e23f0d7301caecd6b238b037243954e8c31f4",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mediarefresh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11079"
}
],
"symlink_target": ""
}
|
import cPickle as pickle
from smart import *
import sys, os
import copy
import re
NOTHING = object()
class SysConfig(object):
"""System configuration class.
It has three different kinds of opition maps, regarding the
persistence and priority that maps are queried.
hard - Options are persistent.
soft - Options are not persistent, and have a higher priority
than persistent options.
weak - Options are not persistent, and have a lower priority
than persistent options.
"""
def __init__(self, root=()):
self._hardmap = {}
self._softmap = {}
self._weakmap = {}
self._readonly = False
self._modified = False
self._config = self
def getReadOnly(self):
return self._readonly
def setReadOnly(self, flag):
self._readonly = flag
def getModified(self):
return self._modified
def resetModified(self):
self._modified = False
def assertWritable(self):
if self._readonly:
raise Error, _("Configuration is in readonly mode.")
def load(self, filepath):
filepath = os.path.expanduser(filepath)
if not os.path.isfile(filepath):
raise Error, _("File not found: %s") % filepath
if os.path.getsize(filepath) == 0:
return
file = open(filepath)
self._hardmap.clear()
try:
self._hardmap.update(pickle.load(file))
except:
filepathold = filepath+".old"
if (os.path.isfile(filepathold) and
os.path.getsize(filepathold) > 0):
iface.warning(_("Broken configuration file at %s") % filepath)
iface.warning(_("Trying backup at %s") % filepathold)
file.close()
file = open(filepathold)
try:
self._hardmap.update(pickle.load(file))
except:
raise Error, _("Broken configuration file at %s") % \
filepathold
else:
raise Error, _("Broken configuration file at %s") % \
filepath
file.close()
def save(self, filepath):
filepath = os.path.expanduser(filepath)
if os.path.isfile(filepath):
os.rename(filepath, filepath+".old")
dirname = os.path.dirname(filepath)
if not os.path.isdir(dirname):
os.makedirs(dirname)
file = open(filepath, "w")
pickle.dump(self._hardmap, file, 2)
file.close()
def _traverse(self, obj, path, default=NOTHING, setvalue=NOTHING):
queue = list(path)
marker = NOTHING
newobj = obj
while queue:
obj = newobj
elem = queue.pop(0)
if type(obj) is dict:
newobj = obj.get(elem, marker)
elif type(obj) in (tuple, list):
if type(elem) is int:
try:
newobj = obj[elem]
except IndexError:
newobj = marker
elif elem in obj:
newobj = elem
else:
newobj = marker
else:
if queue:
path = path[:-len(queue)]
raise Error, "Can't traverse %s (%s): %s" % \
(type(obj), pathTupleToString(path), str(obj))
if newobj is marker:
break
if newobj is not marker:
if setvalue is not marker:
newobj = obj[elem] = setvalue
else:
if setvalue is marker:
newobj = default
else:
while True:
if len(queue) > 0:
if type(queue[0]) is int:
newvalue = []
else:
newvalue = {}
else:
newvalue = setvalue
if type(obj) is dict:
newobj = obj[elem] = newvalue
elif type(obj) is list and type(elem) is int:
lenobj = len(obj)
if lenobj <= elem:
obj.append(None)
elem = lenobj
elif elem < 0 and abs(elem) > lenobj:
obj.insert(0, None)
elem = 0
newobj = obj[elem] = newvalue
else:
raise Error, "Can't traverse %s with %s" % \
(type(obj), type(elem))
if not queue:
break
obj = newobj
elem = queue.pop(0)
return newobj
def _getvalue(self, path, soft=False, hard=False, weak=False):
if type(path) is str:
path = pathStringToTuple(path)
marker = NOTHING
if soft:
value = self._traverse(self._softmap, path, marker)
elif hard:
value = self._traverse(self._hardmap, path, marker)
elif weak:
value = self._traverse(self._weakmap, path, marker)
else:
value = self._traverse(self._softmap, path, marker)
if value is marker:
value = self._traverse(self._hardmap, path, marker)
if value is marker:
value = self._traverse(self._weakmap, path, marker)
return value
def has(self, path, value=NOTHING, soft=False, hard=False, weak=False):
obj = self._getvalue(path, soft, hard, weak)
marker = NOTHING
if obj is marker:
return False
elif value is marker:
return True
elif type(obj) in (dict, list):
return value in obj
else:
raise Error, "Can't check %s for containment" % type(obj)
def keys(self, path, soft=False, hard=False, weak=False):
value = self._getvalue(path, soft, hard, weak)
if value is NOTHING:
return []
if type(value) is dict:
return value.keys()
elif type(value) is list:
return range(len(value))
else:
raise Error, "Can't return keys for %s" % type(value)
def get(self, path, default=None, soft=False, hard=False, weak=False):
value = self._getvalue(path, soft, hard, weak)
if value is NOTHING:
return default
if type(value) in (dict, list):
return copy.deepcopy(value)
return value
def set(self, path, value, soft=False, weak=False):
assert path
if type(path) is str:
path = pathStringToTuple(path)
if soft:
map = self._softmap
elif weak:
map = self._weakmap
else:
self.assertWritable()
self._modified = True
map = self._hardmap
self._traverse(map, path, setvalue=value)
def add(self, path, value, unique=False, soft=False, weak=False):
assert path
if type(path) is str:
path = pathStringToTuple(path)
if soft:
map = self._softmap
elif weak:
map = self._weakmap
else:
self.assertWritable()
self._modified = True
map = self._hardmap
if unique:
current = self._traverse(map, path)
if type(current) is list and value in current:
return
path = path+(sys.maxint,)
self._traverse(map, path, setvalue=value)
def remove(self, path, value=NOTHING, soft=False, weak=False):
assert path
if type(path) is str:
path = pathStringToTuple(path)
if soft:
map = self._softmap
elif weak:
map = self._weakmap
else:
self.assertWritable()
self._modified = True
map = self._hardmap
marker = NOTHING
while path:
if value is marker:
obj = self._traverse(map, path[:-1])
elem = path[-1]
else:
obj = self._traverse(map, path)
elem = value
result = False
if obj is marker:
pass
elif type(obj) is dict:
if elem in obj:
del obj[elem]
result = True
elif type(obj) is list:
if value is marker and type(elem) is int:
try:
del obj[elem]
result = True
except IndexError:
pass
elif elem in obj:
obj[:] = [x for x in obj if x != elem]
result = True
else:
raise Error, "Can't remove %s from %s" % \
(`elem`, type(obj))
if not obj:
if value is not marker:
value = marker
else:
path = path[:-1]
else:
break
return result
def move(self, oldpath, newpath, soft=False, weak=False):
if type(oldpath) is str:
oldpath = pathStringToTuple(oldpath)
if type(newpath) is str:
newpath = pathStringToTuple(newpath)
result = False
marker = NOTHING
value = self._getvalue(oldpath, soft, not (soft or weak), weak)
if value is not marker:
self.remove(oldpath, soft=soft, weak=weak)
self.set(newpath, value, weak, soft)
result = True
return result
SPLITPATH = re.compile(r"(\[-?\d+\])|(?<!\\)\.").split
def pathStringToTuple(path):
if "." not in path and "[" not in path:
return (path,)
result = []
tokens = SPLITPATH(path)
for token in tokens:
if token:
if token[0] == "[" and token[-1] == "]":
try:
result.append(int(token[1:-1]))
except ValueError:
raise Error, "Invalid path index: %s" % token
else:
result.append(token.replace(r"\.", "."))
return tuple(result)
def pathTupleToString(path):
result = []
for elem in path:
if type(elem) is int:
result[-1] += "[%d]" % elem
else:
result.append(str(elem).replace(".", "\."))
return ".".join(result)
# vim:ts=4:sw=4:et
|
{
"content_hash": "d2628867d87857a161c5b60d4b136973",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 78,
"avg_line_length": 33.16459627329193,
"alnum_prop": 0.4811311920591816,
"repo_name": "blackPantherOS/packagemanagement",
"id": "23d061ee69877934812fa2f4f2f762ebbd5e30c9",
"size": "11554",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "smartpm/smart/sysconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "605504"
},
{
"name": "C++",
"bytes": "65879"
},
{
"name": "CSS",
"bytes": "4820"
},
{
"name": "HTML",
"bytes": "17187"
},
{
"name": "M4",
"bytes": "170666"
},
{
"name": "Makefile",
"bytes": "5031"
},
{
"name": "Perl",
"bytes": "311801"
},
{
"name": "Prolog",
"bytes": "5458"
},
{
"name": "Python",
"bytes": "2250512"
},
{
"name": "Roff",
"bytes": "1805"
},
{
"name": "Shell",
"bytes": "283804"
},
{
"name": "XSLT",
"bytes": "312"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
searcher = Blueprint('searcher', __name__)
import views
|
{
"content_hash": "aaab6d99180aa3129c42ac5f26b105fb",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 17.2,
"alnum_prop": 0.7441860465116279,
"repo_name": "stultus/PeARS",
"id": "6c18bb1d7a19e83f00e80905af4c1f87118bb8fc",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "pears/searcher/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8236"
},
{
"name": "HTML",
"bytes": "4793"
},
{
"name": "Python",
"bytes": "71427"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
}
|
__author__ = 'Greg Albrecht <gba@splunk.com>, C. Scott Andreas <s@boundary.com>, and Clint Sharp'
__copyright__ = 'Copyright 2013 Boundary and Splunk, Inc.'
__license__ = 'Apache License 2.0'
import os
import csv
import time
import json
import base64
import urllib2
import logging
import ConfigParser
logger = logging.getLogger(__name__)
class Boundary():
def __init__(self, config_file):
config = ConfigParser.ConfigParser()
config.readfp(open(config_file))
logging.basicConfig(level=logging.INFO)
# API config
api_key = config.get('boundary', 'api_key')
self.org_id = config.get('boundary', 'org_id')
self.api_base = config.get('boundary', 'api_base')
self.auth = "Basic %s" % base64.encodestring('%s:' % (api_key))[:-1]
# Output config
self.app_map_output = config.get('boundary', 'app_map_output')
self.host_to_app_map_output = config.get('boundary', 'host_to_app_map_output')
self.meter_info_output = config.get('boundary', 'meter_info_output')
self.app_to_app_output = config.get('boundary', 'app_to_app_output')
if 'SPLUNK_HOME' in os.environ:
self.app_map_output = self.app_map_output.replace('$SPLUNK_HOME', os.environ['SPLUNK_HOME'])
self.host_to_app_map_output = self.host_to_app_map_output.replace('$SPLUNK_HOME', os.environ['SPLUNK_HOME'])
self.meter_info_output = self.meter_info_output.replace('$SPLUNK_HOME', os.environ['SPLUNK_HOME'])
self.app_to_app_output = self.app_to_app_output.replace('$SPLUNK_HOME', os.environ['SPLUNK_HOME'])
# Fetch data from an API endpoint with auth and return the parsed JSON.
def get(self, url):
logger.info("Fetching %s" % url)
req = urllib2.Request(self.api_base + url)
req.add_header("Authorization", self.auth)
return json.loads(urllib2.urlopen(req).read())
# Load data from the Boundary Meters API
def get_meters(self):
return self.get("/%s/meters" % self.org_id)
# Load data from the Boundary Applications API
def get_applications(self):
return self.get("/%s/applications" % self.org_id)
def get_convo_graph(self):
return self.get("/%s/query_state/conversation_graph" % self.org_id)
def get_saved_search(self, saved_search):
try:
return self.get("/%s/searches/%s/results?rows=100" % (self.org_id, saved_search))
except urllib2.HTTPError, e:
logger.warn("Unable to find saved search %s" % saved_search)
return None
# Builds a map of hosts => apps on them.
def build_host_to_app_map(self, apps, meter_info):
hosts = {}
for app in apps:
app_nodes = app['flowProfile']['filter'].get('meters')
saved_search = app['flowProfile']['filter'].get('saved_search')
named_app_nodes = []
if (app_nodes):
for node in app_nodes:
if (node in meter_info):
named_app_nodes.append(meter_info[node])
else:
logger.debug("Couldn't find node %s in meter list", node)
if (saved_search):
entities = self.get_saved_search(saved_search)
if (entities is not None and entities.get("entities")):
for entity in entities.get("entities"):
named_app_nodes.append(meter_info[int(entity.get("body").get("obs_domain_id"))])
logger.debug("Entities for %s: %s", saved_search, repr(entities))
for host in named_app_nodes:
host_no_domain = host.split('.')[0] + '*'
host = host + '*'
apps_for_host = hosts.get(host, [])
apps_for_host.append(app['name'])
hosts[host] = apps_for_host
hosts[host_no_domain] = apps_for_host
return hosts
# Materalize a list of applications into the list of servers hosting them.
def build_app_map(self, apps, meter_info):
app_map = {}
for app in apps:
app_nodes = app['flowProfile']['filter'].get('meters')
saved_search = app['flowProfile']['filter'].get('saved_search')
named_app_nodes = []
if (app_nodes):
for node in app_nodes:
if (node in meter_info):
named_app_nodes.append(meter_info[node])
else:
logger.debug("Couldn't find node %s in meter list", node)
if (saved_search):
entities = self.get_saved_search(saved_search)
if (entities is not None and entities.get("entities")):
for entity in entities.get("entities"):
named_app_nodes.append(meter_info[int(entity.get("body").get("obs_domain_id"))])
logger.debug("Entities for %s: %s", saved_search, repr(entities))
app_map[app['name']] = {'nodes': named_app_nodes, 'id': app['flowProfile']['id']}
return app_map
# Write meter info to a CSV file
def write_meter_info(self, meter_info):
logger.info("Writing meter info to %s" % self.meter_info_output)
c = csv.writer(open(self.meter_info_output, 'wb'))
c.writerow(['host', 'obs_dom_id', 'meter_id', 'export_ip', 'os', 'tags'])
for m in meter_info:
c.writerow([m['name']+'*', m['obs_domain_id'], m['id'], m.get('export_address'), \
m.get('os_distribution_name'), ','.join(m['tags'])])
c.writerow([m['name'].split('.')[0]+'*', m['obs_domain_id'], m['id'], m.get('export_address'), \
m.get('os_distribution_name'), ','.join(m['tags'])])
# Write the app map to a CSV file
def write_app_map(self, app_map):
logger.info("Writing app topology to %s" % self.app_map_output)
c = csv.writer(open(self.app_map_output, 'wb'))
c.writerow(['app_name', 'conversation_id', 'hosts'])
for app_name, info_dict in app_map.iteritems():
c.writerow([app_name, info_dict.get('id'), ','.join(info_dict.get('nodes'))])
# Write the host to app map to a CSV file
def write_host_to_app_map(self, app_map):
logger.info("Writing host to app topology to %s" % self.host_to_app_map_output)
c = csv.writer(open(self.host_to_app_map_output, 'wb'))
c.writerow(['host', 'app_names'])
for hostname, app_list in app_map.iteritems():
c.writerow([hostname, ','.join(app_list)])
def write_app_to_app(self, apps, convo_graph):
logger.info("Writing app-to-app data to %s" % self.app_to_app_output)
c = csv.writer(open(self.app_to_app_output, 'wb'))
c.writerow(['ts', 'client_app', 'server_app', 'ingress_bytes', 'ingress_packets', \
'egress_bytes', 'egress_packets', 'rtt', 'handshake_rtt', 'out_of_order', 'retransmits'])
for obs in convo_graph['observations']:
c.writerow([time.time(), obs['client'], obs['server'], obs['traffic']['ingressOctets'], \
obs['traffic']['ingressPackets'], obs['traffic']['egressOctets'], \
obs['traffic']['egressPackets'], obs['traffic']['appRttUsec'], \
obs['traffic']['handshakeRttUsec'], obs['traffic']['outOfOrder'], \
obs['traffic']['retransmits']])
# Kick things off!
def run(self):
apps = self.get_applications()
meters = self.get_meters()
convo_graph = self.get_convo_graph()
meter_info = {}
for m in meters:
meter_info[int(m['obs_domain_id'])] = m['name']
app_map = self.build_app_map(apps, meter_info)
host_to_app_map = self.build_host_to_app_map(apps, meter_info)
self.write_app_map(app_map)
self.write_host_to_app_map(host_to_app_map)
self.write_meter_info(meters)
self.write_app_to_app(apps, convo_graph)
logger.info('Done!')
if __name__ == '__main__':
if 'SPLUNK_HOME' in os.environ:
path = os.environ['SPLUNK_HOME']+'/etc/apps/boundary/local/boundary.conf'
else:
path = '../local/boundary.conf'
Boundary(path).run()
|
{
"content_hash": "6b52200ab36daa13f42177a9a76fb458",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 120,
"avg_line_length": 42.57948717948718,
"alnum_prop": 0.570757557509334,
"repo_name": "boundary/boundary-event-plugins",
"id": "eda4d3f8a16dbef27fd9a3184d100d7ea321138b",
"size": "8910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "splunk/bin/fetch.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15844"
},
{
"name": "Java",
"bytes": "7239"
},
{
"name": "Python",
"bytes": "45831"
},
{
"name": "Ruby",
"bytes": "32476"
},
{
"name": "Shell",
"bytes": "1446"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('csyllabusapi', '0014_courseresult'),
]
operations = [
migrations.CreateModel(
name='CourseFaculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False)),
],
),
migrations.CreateModel(
name='CourseUniversity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False)),
],
),
migrations.RenameField(
model_name='course',
old_name='winsum',
new_name='level',
),
migrations.AddField(
model_name='course',
name='keywords',
field=models.TextField(default=' '),
preserve_default=False,
),
migrations.AddField(
model_name='course',
name='url',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='courseuniversity',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='csyllabusapi.Course'),
),
migrations.AddField(
model_name='courseuniversity',
name='university',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='csyllabusapi.University'),
),
migrations.AddField(
model_name='coursefaculty',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='csyllabusapi.Course'),
),
migrations.AddField(
model_name='coursefaculty',
name='faculty',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='csyllabusapi.Faculty'),
),
]
|
{
"content_hash": "ca23bf414d7ef90885a6abec1f7456ee",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 114,
"avg_line_length": 34.71875,
"alnum_prop": 0.5675067506750675,
"repo_name": "CSyllabus/webapp",
"id": "b852dca71ac8b62213e7743ba11defaf1543014c",
"size": "2295",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "backend/apps/csyllabusapi/migrations/0015_auto_20171209_2340.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "946"
},
{
"name": "CSS",
"bytes": "231277"
},
{
"name": "HTML",
"bytes": "95429"
},
{
"name": "JavaScript",
"bytes": "913374"
},
{
"name": "PHP",
"bytes": "2280"
},
{
"name": "Python",
"bytes": "313702"
},
{
"name": "Shell",
"bytes": "1341"
},
{
"name": "TypeScript",
"bytes": "235488"
}
],
"symlink_target": ""
}
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
sortedArray = self.sortedListToArray(head)
tree = self.sortedArrayToBST(sortedArray)
return tree
def sortedListToArray(self, head):
arr = []
while head:
arr.append(head.val)
head = head.next
return arr
def sortedArrayToBST(self, nums):
if len(nums) == 0:
return None
head = self.helper(nums, 0, len(nums) - 1)
return head
def helper(self, nums, lo, hi):
if lo > hi: return None
mid = (lo + hi) // 2
node = TreeNode(nums[mid])
node.left = self.helper(nums, lo, mid - 1)
node.right = self.helper(nums, mid + 1, hi)
return node
|
{
"content_hash": "db03b5d7a2e9da3291767bccbcc9a82b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 51,
"avg_line_length": 26.023809523809526,
"alnum_prop": 0.5397987191216834,
"repo_name": "rx2130/Leetcode",
"id": "c2cba4787c566da695d435d3e751f5da23147b03",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/109 Convert Sorted List to Binary Search Tree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "277012"
}
],
"symlink_target": ""
}
|
"""
sentry.db.models.fields.node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import collections
import logging
import six
import warnings
from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete
from south.modelsinspector import add_introspection_rules
from sentry.utils.cache import memoize
from sentry.utils.compat import pickle
from sentry.utils.strings import decompress, compress
from .gzippeddict import GzippedDictField
__all__ = ('NodeField',)
logger = logging.getLogger('sentry')
class NodeUnpopulated(Exception):
pass
class NodeIntegrityFailure(Exception):
pass
class NodeData(collections.MutableMapping):
def __init__(self, field, id, data=None):
self.field = field
self.id = id
self.ref = None
# ref version is used to discredit a previous ref
# (this does not mean the Event is mutable, it just removes ref checking
# in the case of something changing on the data model)
self.ref_version = None
self._node_data = data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
cls_name = type(self).__name__
if self._node_data:
return '<%s: id=%s data=%r>' % (
cls_name, self.id, repr(self._node_data))
return '<%s: id=%s>' % (cls_name, self.id,)
def get_ref(self, instance):
ref_func = self.field.ref_func
if not ref_func:
return
return ref_func(instance)
@memoize
def data(self):
from sentry.app import nodestore
if self._node_data is not None:
return self._node_data
elif self.id:
if settings.DEBUG:
raise NodeUnpopulated('You should populate node data before accessing it.')
else:
warnings.warn('You should populate node data before accessing it.')
self.bind_data(nodestore.get(self.id) or {})
return self._node_data
return {}
def bind_data(self, data, ref=None):
self.ref = data.pop('_ref', ref)
self.ref_version = data.pop('_ref_version', None)
if self.ref_version == self.field.ref_version and ref is not None and self.ref != ref:
raise NodeIntegrityFailure('Node reference for %s is invalid: %s != %s' % (
self.id, ref, self.ref,
))
self._node_data = data
def bind_ref(self, instance):
ref = self.get_ref(instance)
if ref:
self.data['_ref'] = ref
self.data['_ref_version'] = self.field.ref_version
@six.add_metaclass(models.SubfieldBase)
class NodeField(GzippedDictField):
"""
Similar to the gzippedictfield except that it stores a reference
to an external node.
"""
def __init__(self, *args, **kwargs):
self.ref_func = kwargs.pop('ref_func', None)
self.ref_version = kwargs.pop('ref_version', None)
super(NodeField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
super(NodeField, self).contribute_to_class(cls, name)
post_delete.connect(
self.on_delete,
sender=self.model,
weak=False)
def on_delete(self, instance, **kwargs):
from sentry.app import nodestore
value = getattr(instance, self.name)
if not value.id:
return
nodestore.delete(value.id)
def to_python(self, value):
if isinstance(value, six.string_types) and value:
try:
value = pickle.loads(decompress(value))
except Exception as e:
logger.exception(e)
value = {}
elif not value:
value = {}
if 'node_id' in value:
node_id = value.pop('node_id')
data = None
else:
node_id = None
data = value
return NodeData(self, node_id, data)
def get_prep_value(self, value):
from sentry.app import nodestore
if not value and self.null:
# save ourselves some storage
return None
# TODO(dcramer): we should probably do this more intelligently
# and manually
if not value.id:
value.id = nodestore.create(value.data)
else:
nodestore.set(value.id, value.data)
return compress(pickle.dumps({
'node_id': value.id
}))
add_introspection_rules([], ["^sentry\.db\.models\.fields\.node\.NodeField"])
|
{
"content_hash": "71b09d120aeb3a9c1650f34eaf498439",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 94,
"avg_line_length": 28.141242937853107,
"alnum_prop": 0.5894398715117446,
"repo_name": "fotinakis/sentry",
"id": "fc43e6f8872e3b4344495462acd4450e911a4b9c",
"size": "4981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/db/models/fields/node.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "222885"
},
{
"name": "HTML",
"bytes": "282398"
},
{
"name": "JavaScript",
"bytes": "927323"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5812"
},
{
"name": "Python",
"bytes": "11654397"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
import functools
import os.path
import astropy.io.registry as io_registry
from astropy.table import Table
from astropy.utils.misc import NOT_OVERWRITING_MSG
__all__ = ["PANDAS_FMTS"]
# Astropy users normally expect to not have an index, so default to turn
# off writing the index. This structure allows for astropy-specific
# customization of all options.
PANDAS_FMTS = {
"csv": {"read": {}, "write": {"index": False}},
"fwf": {"read": {}}, # No writer
"html": {"read": {}, "write": {"index": False}},
"json": {"read": {}, "write": {}},
}
PANDAS_PREFIX = "pandas."
# Imports for reading HTML
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def import_html_libs():
"""Try importing dependencies for reading HTML.
This is copied from pandas.io.html
"""
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
from astropy.utils.compat.optional_deps import HAS_BS4 as _HAS_BS4
from astropy.utils.compat.optional_deps import HAS_HTML5LIB as _HAS_HTML5LIB
from astropy.utils.compat.optional_deps import HAS_LXML as _HAS_LXML
_IMPORTS = True
def _pandas_read(fmt, filespec, **kwargs):
"""Provide io Table connector to read table using pandas."""
try:
import pandas
except ImportError:
raise ImportError("pandas must be installed to use pandas table reader")
pandas_fmt = fmt[len(PANDAS_PREFIX) :] # chop the 'pandas.' in front
read_func = getattr(pandas, "read_" + pandas_fmt)
# Get defaults and then override with user-supplied values
read_kwargs = PANDAS_FMTS[pandas_fmt]["read"].copy()
read_kwargs.update(kwargs)
# Special case: pandas defaults to HTML lxml for reading, but does not attempt
# to fall back to bs4 + html5lib. So do that now for convenience if user has
# not specifically selected a flavor. If things go wrong the pandas exception
# with instruction to install a library will come up.
if pandas_fmt == "html" and "flavor" not in kwargs:
import_html_libs()
if not _HAS_LXML and _HAS_HTML5LIB and _HAS_BS4:
read_kwargs["flavor"] = "bs4"
df = read_func(filespec, **read_kwargs)
# Special case for HTML
if pandas_fmt == "html":
df = df[0]
return Table.from_pandas(df)
def _pandas_write(fmt, tbl, filespec, overwrite=False, **kwargs):
"""Provide io Table connector to write table using pandas."""
pandas_fmt = fmt[len(PANDAS_PREFIX) :] # chop the 'pandas.' in front
# Get defaults and then override with user-supplied values
write_kwargs = PANDAS_FMTS[pandas_fmt]["write"].copy()
write_kwargs.update(kwargs)
df = tbl.to_pandas()
write_method = getattr(df, "to_" + pandas_fmt)
if not overwrite:
try: # filespec is not always a path-like
exists = os.path.exists(filespec)
except TypeError: # skip invalid arguments
pass
else:
if exists: # only error if file already exists
raise OSError(NOT_OVERWRITING_MSG.format(filespec))
return write_method(filespec, **write_kwargs)
for pandas_fmt, defaults in PANDAS_FMTS.items():
fmt = PANDAS_PREFIX + pandas_fmt # Full format specifier
if "read" in defaults:
func = functools.partial(_pandas_read, fmt)
io_registry.register_reader(fmt, Table, func)
if "write" in defaults:
func = functools.partial(_pandas_write, fmt)
io_registry.register_writer(fmt, Table, func)
|
{
"content_hash": "f485850b2bf35b9f47755019ab1b5a49",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 82,
"avg_line_length": 31.69298245614035,
"alnum_prop": 0.657625242181013,
"repo_name": "mhvk/astropy",
"id": "f6867b8570be41efdf85d89f0cd5b68d823ac297",
"size": "3752",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "astropy/io/misc/pandas/connect.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78776"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12404182"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import copy
import logging
import random
import time
import kafka.common as Errors
from kafka.common import BrokerMetadata
from .future import Future
log = logging.getLogger(__name__)
class ClusterMetadata(object):
DEFAULT_CONFIG = {
'retry_backoff_ms': 100,
'metadata_max_age_ms': 300000,
}
def __init__(self, **configs):
self._brokers = {}
self._partitions = {}
self._groups = {}
self._version = 0
self._last_refresh_ms = 0
self._last_successful_refresh_ms = 0
self._need_update = False
self._future = None
self._listeners = set()
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
def brokers(self):
return set(self._brokers.values())
def broker_metadata(self, broker_id):
return self._brokers.get(broker_id)
def partitions_for_topic(self, topic):
if topic not in self._partitions:
return None
return set(self._partitions[topic].keys())
def leader_for_partition(self, partition):
if partition.topic not in self._partitions:
return None
return self._partitions[partition.topic].get(partition.partition)
def coordinator_for_group(self, group):
return self._groups.get(group)
def ttl(self):
"""Milliseconds until metadata should be refreshed"""
now = time.time() * 1000
if self._need_update:
ttl = 0
else:
metadata_age = now - self._last_successful_refresh_ms
ttl = self.config['metadata_max_age_ms'] - metadata_age
retry_age = now - self._last_refresh_ms
next_retry = self.config['retry_backoff_ms'] - retry_age
return max(ttl, next_retry, 0)
def request_update(self):
"""
Flags metadata for update, return Future()
Actual update must be handled separately. This method will only
change the reported ttl()
"""
self._need_update = True
if not self._future or self._future.is_done:
self._future = Future()
return self._future
def topics(self):
return set(self._partitions.keys())
def failed_update(self, exception):
if self._future:
self._future.failure(exception)
self._future = None
self._last_refresh_ms = time.time() * 1000
def update_metadata(self, metadata):
# In the common case where we ask for a single topic and get back an
# error, we should fail the future
if len(metadata.topics) == 1 and metadata.topics[0][0] != 0:
error_code, topic, _ = metadata.topics[0]
error = Errors.for_code(error_code)(topic)
return self.failed_update(error)
if not metadata.brokers:
log.warning("No broker metadata found in MetadataResponse")
for node_id, host, port in metadata.brokers:
self._brokers.update({
node_id: BrokerMetadata(node_id, host, port)
})
# Drop any UnknownTopic, InvalidTopic, and TopicAuthorizationFailed
# but retain LeaderNotAvailable because it means topic is initializing
self._partitions = {}
for error_code, topic, partitions in metadata.topics:
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
self._partitions[topic] = {}
for _, partition, leader, _, _ in partitions:
self._partitions[topic][partition] = leader
elif error_type is Errors.LeaderNotAvailableError:
log.error("Topic %s is not available during auto-create"
" initialization", topic)
elif error_type is Errors.UnknownTopicOrPartitionError:
log.error("Topic %s not found in cluster metadata", topic)
elif error_type is Errors.TopicAuthorizationFailedError:
log.error("Topic %s is not authorized for this client", topic)
elif error_type is Errors.InvalidTopicError:
log.error("'%s' is not a valid topic name", topic)
else:
log.error("Error fetching metadata for topic %s: %s",
topic, error_type)
if self._future:
self._future.success(self)
self._future = None
self._need_update = False
self._version += 1
now = time.time() * 1000
self._last_refresh_ms = now
self._last_successful_refresh_ms = now
log.debug("Updated cluster metadata version %d to %s",
self._version, self)
for listener in self._listeners:
listener(self)
def add_listener(self, listener):
"""Add a callback function to be called on each metadata update"""
self._listeners.add(listener)
def remove_listener(self, listener):
"""Remove a previously added listener callback"""
self._listeners.remove(listener)
def add_group_coordinator(self, group, response):
"""Update with metadata for a group coordinator
group: name of group from GroupCoordinatorRequest
response: GroupCoordinatorResponse
returns True if metadata is updated, False on error
"""
log.debug("Updating coordinator for %s: %s", group, response)
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
log.error("GroupCoordinatorResponse error: %s", error_type)
self._groups[group] = -1
return False
node_id = response.coordinator_id
coordinator = BrokerMetadata(
response.coordinator_id,
response.host,
response.port)
# Assume that group coordinators are just brokers
# (this is true now, but could diverge in future)
if node_id not in self._brokers:
self._brokers[node_id] = coordinator
# If this happens, either brokers have moved without
# changing IDs, or our assumption above is wrong
elif coordinator != self._brokers[node_id]:
log.error("GroupCoordinator metadata conflicts with existing"
" broker metadata. Coordinator: %s, Broker: %s",
coordinator, self._brokers[node_id])
self._groups[group] = node_id
return False
log.info("Group coordinator for %s is %s", group, coordinator)
self._groups[group] = node_id
return True
def __str__(self):
return 'Cluster(brokers: %d, topics: %d, groups: %d)' % \
(len(self._brokers), len(self._partitions), len(self._groups))
|
{
"content_hash": "0d2a30d34e8099607e9155fa37bbfd96",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 78,
"avg_line_length": 35.66839378238342,
"alnum_prop": 0.5964555490993608,
"repo_name": "gamechanger/kafka-python",
"id": "1cdc8dd40f3aacda43875fca1bada897a201b169",
"size": "6884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kafka/cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "559844"
},
{
"name": "Shell",
"bytes": "2646"
}
],
"symlink_target": ""
}
|
from tasks.meta import OBSTable, OBSColumn, current_session, GEOM_REF
from tasks.base_tasks import ColumnsTask, GeoFile2TempTableTask, RepoFileUnzipTask, TableTask
from tasks.util import shell
from tasks.poi import POIColumns
from tasks.us.ny.nyc.columns import NYCColumns
from tasks.us.ny.nyc.tags import NYCTags
from lib.timespan import get_timespan
from luigi import Parameter, WrapperTask
from collections import OrderedDict
import os
RELEASES = ['17', '16', '15']
class DownloadUnzipMapPLUTO(RepoFileUnzipTask):
borough = Parameter()
release = Parameter()
URL = 'http://www1.nyc.gov/assets/planning/download/zip/data-maps/open-data/{borough}_mappluto_{release}.zip'
def get_url(self):
return self.URL.format(borough=self.borough, release=self.release)
class MapPLUTOTmpTable(GeoFile2TempTableTask):
borough = Parameter()
release = Parameter()
def requires(self):
return DownloadUnzipMapPLUTO(borough=self.borough, release=self.release)
def input_files(self):
return os.path.join(self.input().path, '{}MapPLUTO.shp'.format(self.borough.upper()))
class MapPLUTOColumns(ColumnsTask):
def requires(self):
return {
'tags': NYCTags(),
}
def version(self):
return 5
def columns(self):
input_ = self.input()
cols = OrderedDict([
("zonedist1", OBSColumn(
type="Text",
name='Zoning District 1',
weight=1,
description='''The zoning district classification of the tax
lot. If the tax lot is divided by a zoning boundary line,
ZoneDist1 represents the primary zoning district classification
occupying the greates t percentage of the tax lot's area.
Properties under the jurisdiction of NYC Department of Parks
and Recreation are coded PARK. Properties under the
jurisdiction of NYS Office of Parks, Recreation, and Historic
Preservation are coded PARKNY. DROP LOT is a designation that
City Planning devised to identify tax lots that no longer exist
in the DCP version of the Digital Tax Map but have not yet been
removed from the Department of Finance RPAD File. RPAD retains
tax lots that have been dropped, due to merger, reapportionment
or conversion to condominium, until the end of the City's
Fiscal Year. To avoid confusion DROP LOT was created to
identify these lots.''')),
("zonedist2", OBSColumn(
type="Text",
name='Zoning District 2',
weight=1,
description='''If the tax lot is divided by zoning boundary
lines, ZoneDist2 represents the primary zoning classification
occupying the second greatest percentage of the tax lot's area.
If the tax lot is not divided by a zoning boundary line, the
field is blank.''')),
("zonedist3", OBSColumn(
type="Text",
weight=1,
name='Zoning District 3',
description='''If the tax lot is divided by zoning boundary
lines, ZoneDist3 represents the primary zoning classification
occupying the third greatest percentage of the tax lot's area.
If the tax lot is not divided by a zoning boundary line, the
field is blank.''')),
("zonedist4", OBSColumn(
type="Text",
weight=1,
name='Zoning District 4',
description='''If the tax lot is divided by zoning boundary
lines, ZoneDist4 represents the primary zoning classification
occupying the fourth greatest percentage of the tax lot's area.
If the tax lot is not divided by a zoning boundary line, the
field is blank.''')),
("overlay1", OBSColumn(
type="Text",
weight=1,
name='Primary Commercial overlay',
description='''The commercial overlay assigned to the tax lot.'''
)),
("overlay2", OBSColumn(
type="Text",
weight=1,
name='Secondary Commercial overlay',
description="""A commercial overlay associated with the tax lot. """,
)),
("spdist1", OBSColumn(
type="text",
weight=1,
name="Primary special purpose district",
description="The special purpose district assigned to the tax lot.")),
("spdist2", OBSColumn(
type="text",
weight=1,
name="Secondary special purpose district",
description="The special purpose district assigned to the tax lot.")),
("spdist3", OBSColumn(
type="text",
weight=1,
name="Third special purpose district",
description="The special purpose district assigned to the tax lot.")),
("ltdheight", OBSColumn(
type="text",
weight=1,
name="Limited Height district",
description="")),
("splitzone", OBSColumn(
type="text",
weight=1,
name="Is the tax lot split by a zoning boundary",
description="""A code indicating whether the tax lot is split by
one or more zoning boundary lines.""")),
("bldgclass", OBSColumn(
type="text",
weight=1,
name="Building class",
description="""A code describing the major use of structures on
the tax lot.""",
extra={
"categories": {
'A0': '1 family: Cape Cod',
'A1': '1 family: Two Stories Detached (Small or'
'Moderate Size, With or Without Attic)',
'A2': '1 family: One Story (Permanent Living Quarters)',
'A3': '1 family: Large Suburban Residence',
'A4': '1 family: City Residence',
'A5': '1 family: Attached or Semi-Detached',
'A6': '1 family: Summer Cottages',
'A7': '1 family: Mansion Type or Town House',
'A8': '1 family: Bungalow Colony/Land Coop Owned',
'A9': '1 family: Miscellaneous',
'B1': '2 family: Brick',
'B2': '2 family: Frame',
'B3': '2 family: Converted From One Family',
'B9': '2 family: Miscellaneous',
'C0': 'Walk-up apt: Three Families',
'C1': 'Walk-up apt: Over Six Families Without Stores',
'C2': 'Walk-up apt: Five to Six Families',
'C3': 'Walk-up apt: Four Families',
'C4': 'Walk-up apt: Old Law T enements',
'C5': 'Walk-up apt: Converted Dwelling or Rooming House',
'C6': 'Walk-up apt: Cooperative',
'C7': 'Walk-up apt: Over Six Families With Stores',
'C8': 'Walk-up apt: Co-Op Conversion From Loft/Warehouse',
'C9': 'Walk-up apt: Garden Apartments',
'CM': 'Walk-up apt: Mobile Homes/Trailer Parks',
'D0': 'Elevator apt: Co-op Conversion from Loft/Warehouse',
'D1': 'Elevator apt: Semi-fireproof (Without Stores)',
'D2': 'Elevator apt: Artists in Residence',
'D3': 'Elevator apt: Fireproof (Without Stores)',
'D4': 'Elevator apt: Cooperatives (Other Than Condominiums)',
'D5': 'Elevator apt: Converted',
'D6': 'Elevator apt: Fireproof With Stores',
'D7': 'Elevator apt: Semi-Fireproof With Stores',
'D8': 'Elevator apt: Luxury Type',
'D9': 'Elevator apt: Miscellaneous',
'E1': 'Warehouse: Fireproof',
'E2': "Warehouse: Contractor's Warehouse",
'E3': 'Warehouse: Semi-Fireproof',
'E4': 'Warehouse: Frame, Metal',
'E7': 'Warehouse: Warehouse, Self Storage',
'E9': 'Warehouse: Miscellaneous',
'F1': 'Industrial: Heavy Manufacturing - Fireproof',
'F2': 'Industrial: Special Construction - Fireproof',
'F4': 'Industrial: Semi-Fireproof',
'F5': 'Industrial: Light Manufacturing',
'F8': 'Industrial: Tank Farms',
'F9': 'Industrial: Miscellaneous',
'G0': 'Garage/gas: Residential Tax Class 1 Garage',
'G1': 'Garage/gas: All Parking Garages',
'G2': 'Garage/gas: Auto Body/Collision or Auto Repair',
'G3': 'Garage/gas: Gas Station with Retail Store',
'G4': 'Garage/gas: Gas Station with Service/Auto Repair',
'G5': 'Garage/gas: Gas Station only with/without Small Kiosk',
'G6': 'Garage/gas: Licensed Parking Lot',
'G7': 'Garage/gas: Unlicensed Parking Lot',
'G8': 'Garage/gas: Car Sales/Rental with Showroom',
'G9': 'Garage/gas: Miscellaneous Garage or Gas Station',
'GU': 'Garage/gas: Car Sales/Rental without Showroom',
'GW': 'Garage/gas: Car Wash or Lubritorium Facility',
'H1': 'Hotel: Luxury Type',
'H2': 'Hotel: Full Service Hotel',
'H3': 'Hotel: Limited Service - Many Affiliated with National Chain',
'H4': 'Hotel: Motels',
'H5': 'Hotel: Private Club, Luxury Type',
'H6': 'Hotel: Apartment Hotels',
'H7': 'Hotel: Apartment Hotels-Co-op Owned',
'H8': 'Hotel: Dormitories',
'H9': 'Hotel: Miscellaneous',
'HB': 'Hotel: Boutique 10-100 Rooms, with Luxury '
'Facilities, Themed, Stylish, with Full Service '
'Accommodations',
'HH': 'Hotel: Hostel-Bed Rental in Dorm Like Setting '
'with Shared Rooms & Bathrooms',
'HR': 'Hotel: SRO- 1 or 2 People Housed in Individual '
'Rooms in Multiple Dwelling Affordable Housing',
'HS': 'Hotel: Extended Stay/Suite Amenities Similar to '
'Apt., Typically Charge Weekly Rates & Less '
'Expensive than Full Service Hotel',
'I1': 'Health: Hospitals, Sanitariums, Mental Institutions',
'I2': 'Health: Infirmary',
'I3': 'Health: Dispensary',
'I4': 'Health: Staff Facilities',
'I5': 'Health: Health Center, Child Center, Clinic',
'I6': 'Health: Nursing Home',
'I7': 'Health: Adult Care Facility',
'I9': 'Health: Miscellaneous',
'J1': 'Theatre: Art Type (Seating Capacity under 400 Seats)',
'J2': 'Theatre: Art Type (Seating Capacity Over 400 Seats)',
'J3': 'Theatre: Motion Picture Theatre with Balcony',
'J4': 'Theatre: Legitimate Theatres (Theatre Sole Use of Building)',
'J5': 'Theatre: Theatre in Mixed Use Building',
'J6': 'Theatre: T.V. Studios',
'J7': 'Theatre: Off-Broadway Type',
'J8': 'Theatre: Multiplex Picture Theatre',
'J9': 'Theatre: Miscellaneous',
'K1': 'Store: One Story Retail Building',
'K2': 'Store: Multi-Story Retail Building',
'K3': 'Store: Multi-Story Department Store',
'K4': 'Store: Predominant Retail with Other Uses',
'K5': 'Store: Stand Alone Food Establishment',
'K6': 'Store: Shopping Centers With or Without Parking',
'K7': 'Store: Banking Facilities with or Without Parking',
'K8': 'Store: Big Box Retail Not Affixed & Standing On '
'Own Lot with Parking',
'K9': 'Store: Miscellaneous',
'L1': 'Loft: Over Eight Stores (Mid-Manhattan Type)',
'L2': 'Loft: Fireproof and Storage Type (Without Stores)',
'L3': 'Loft: Semi-Fireproof',
'L8': 'Loft: With Retail Stores Other Than Type 1',
'L9': 'Loft: Miscellaneous',
'M1': 'Religious: Church, Synagogue, Chapel',
'M2': 'Religious: Mission House (non-Residential)',
'M3': 'Religious: Parsonage, Rectory',
'M4': 'Religious: Convents',
'M9': 'Religious: Miscellaneous',
'N1': 'Group home: Asylums',
'N2': 'Group home: Homes for Indigent Children, Aged, Homeless',
'N3': 'Group home: Orphanages',
'N4': 'Group home: Detention House For Wayward Girls',
'N9': 'Group home: Miscellaneous',
'O1': 'Office Only - 1 Story',
'O2': 'Office Only - 2-6 Stories',
'O3': 'Office Only - 7-19 Stories',
'O4': 'Office Only or Office with Comm - 20 Stories or More',
'O5': 'Office with Comm - 1 to 6 Stories',
'O6': 'Office with Comm - 7 to 19 Stories',
'O7': 'Office: Professional Buildings/Stand Alone Funeral Homes',
'O8': 'Office with Apartments Only (No Comm)',
'O9': 'Office: Miscellaneous and Old Style Bank Bldgs',
'P1': 'Cultural: Concert Halls',
'P2': 'Cultural: Lodge Rooms',
'P3': 'Cultural: YWCA, YMCA, YWHA, YMHA, PAL',
'P4': 'Cultural: Beach Club',
'P5': 'Cultural: Community Center',
'P6': 'Cultural: Amusement Place, Bathhouse, Boat House',
'P7': 'Cultural: Museum',
'P8': 'Cultural: Library',
'P9': 'Cultural: Miscellaneous',
'Q0': 'Recreation: Open Space',
'Q1': 'Recreation: Parks/Recreation Facilities',
'Q2': 'Recreation: Playground',
'Q3': 'Recreation: Outdoor Pool',
'Q4': 'Recreation: Beach',
'Q5': 'Recreation: Golf Course',
'Q6': 'Recreation: Stadium, Race Track, Baseball Field',
'Q7': 'Recreation: Tennis Court',
'Q8': 'Recreation: Marina, Yacht Club',
'Q9': 'Recreation: Miscellaneous',
'R0': 'Condo Billing Lot',
'R1': 'Condo: Residential Unit in 2-10 Unit Bldg',
'R2': 'Condo: Residential Unit in Walk-Up Bldg',
'R3': 'Condo: Residential Unit in 1-3 Story Bldg',
'R4': 'Condo: Residential Unit in Elevator Bldg',
'R5': 'Condo: Miscellaneous Commercial',
'R6': 'Condo: Residential Unit of 1-3 Unit Bldg-Orig Class 1',
'R7': 'Condo: Commercial Unit of 1-3 Units Bldg- Orig Class 1',
'R8': 'Condo: Commercial Unit of 2-10 Unit Bldg',
'R9': 'Condo: Co-op within a Condominium',
'RA': 'Condo: Cultural, Medical, Educational, etc.',
'RB': 'Condo: Office Space',
'RC': 'Condo: Commercial Building (Mixed Commercial '
'Condo Building Classification Codes)',
'RD': 'Condo: Residential Building (Mixed Residential '
'Condo Building Classification Codes)',
'RG': 'Condo: Indoor Parking',
'RH': 'Condo: Hotel/Boatel',
'RI': 'Condo: Mixed Warehouse/Factory/Industrial & Commercial',
'RK': 'Condo: Retail Space',
'RM': 'Condo: Mixed Residential & Commercial Building '
'(Mixed Residential & Commercial)',
'RP': 'Condo: Outdoor Parking',
'RR': 'Condo: Condominium Rentals',
'RS': 'Condo: Non-Business Storage Space',
'RT': 'Condo: Terraces/Gardens/Cabanas',
'RW': 'Condo: Warehouse/Factory/Industrial',
'RX': 'Condo: Mixed Residential, Commercial & Industrial',
'RZ': 'Condo: Mixed Residential & Warehouse',
'S0': 'Mixed residence: Primarily One Family with Two Stores or Offices',
'S1': 'Mixed residence: Primarily One Family with One Store or Office',
'S2': 'Mixed residence: Primarily Two Family with One Store or Office',
'S3': 'Mixed residence: Primarily Three Family with One Store or Office',
'S4': 'Mixed residence: Primarily Four Family with One Store or Office',
'S5': 'Mixed residence: Primarily Five to Six Family '
'with One Store or Office',
'S9': 'Mixed residence: Single or Multiple Dwelling with Stores or Offices',
'T1': 'Transportation: Airport, Air Field, Terminal',
'T2': 'Transportation: Pier, Dock, Bulkhead',
'T9': 'Transportation: Miscellaneous',
'U0': 'Utility: Company Land and Building',
'U1': 'Utility: Bridge, Tunnel, Highway',
'U2': 'Utility: Gas or Electric Utility',
'U3': 'Utility: Ceiling Railroad',
'U4': 'Utility: Telephone Utility',
'U5': 'Utility: Communications Facilities Other Than Telephone',
'U6': 'Utility: Railroad - Private Ownership',
'U7': 'Utility: Transportation - Public Ownership',
'U8': 'Utility: Revocable Consent',
'U9': 'Utility: Miscellaneous',
'V0': 'Vacant: Zoned Residential; Not Manhattan',
'V1': 'Vacant: Zoned Commercial or Manhattan Residential',
'V2': 'Vacant: Zoned Commercial Adjacent to Class 1 '
'Dwelling; Not Manhattan',
'V3': 'Vacant: Zoned Primarily Residential; Not Manhattan',
'V4': 'Vacant: Police or Fire Department',
'V5': 'Vacant: School Site or Yard',
'V6': 'Vacant: Library, Hospital or Museum',
'V7': 'Vacant: Port Authority of NY and NJ',
'V8': 'Vacant: New York State & U.S. Government',
'V9': 'Vacant: Miscellaneous',
'W1': 'Educational: Public Elementary, Junior or Senior High',
'W2': 'Educational: Parochial School, Yeshiva',
'W3': 'Educational: School or Academy',
'W4': 'Educational: Training School',
'W5': 'Educational: City University',
'W6': 'Educational: Other College and University',
'W7': 'Educational: Theological Seminary',
'W8': 'Educational: Other Private School',
'W9': 'Educational: Miscellaneous',
'Y1': 'Government: Fire Department',
'Y2': 'Government: Police Department',
'Y3': 'Government: Prison, Jail, House of Detention',
'Y4': 'Government: Military and Naval Installation',
'Y5': 'Government: Department of Real Estate',
'Y6': 'Government: Department of Sanitation',
'Y7': 'Government: Department of Ports and',
'Y8': 'Government: Department of Public Works',
'Y9': 'Government: Department of Environmental Protection',
'Z0': 'Misc: Tennis Court, Pool, Shed, etc.',
'Z1': 'Misc: Court House',
'Z2': 'Misc: Public Parking Area',
'Z3': 'Misc: Post Office',
'Z4': 'Misc: Foreign Government',
'Z5': 'Misc: United Nations',
'Z7': 'Misc: Easement',
'Z8': 'Misc: Cemetery',
'Z9': 'Misc: Other',
}
})
),
("landuse", OBSColumn(
type="text",
weight=1,
name="Land use category",
description="""A code for the tax lot's land use category,
modified for display of parks, New York City Department of
Parks and Recreation properties and New York State Office of
Parks, Recreation and H istoric Preservation properties in the
appropriate category on land use maps.""",
extra={
"categories": {
'01': 'One & Two Family Buildings',
'02': 'Multi - Family Walk - Up Buildings',
'03': 'Multi - Family Elevator Buildings',
'04': 'Mixed Residential & Commercial Buildings',
'05': 'Commercial & Office Buildings',
'06': 'Industrial & Manufacturing',
'07': 'Transportation & Utility',
'08': 'Public Facilities & Institutions',
'09': 'Open Space & Outdoor Recreation',
'10': 'Parking Facilities',
'11': 'Vacant Land',
}
}
)),
("easements", OBSColumn(
type="text",
weight=1,
name="Easements",
description="""The number of easements on the tax lot. If the
number is zero, the tax lot has no easement""")),
("ownertype", OBSColumn(
type="text",
weight=1,
name="Ownership Type",
description="""A code indicating type of ownership for the tax lot.""")),
("ownername", OBSColumn(
type="text",
weight=1,
name="Owner name",
description="""The name of the owner of the tax lot (from RPAD).""")),
("lotarea", OBSColumn(
type="Numeric",
weight=1,
name="Lot Area",
description=""" """)),
("bldgarea", OBSColumn(
type="Numeric",
weight=1,
name="Building Area",
description=""" """)),
("comarea", OBSColumn(
type="Numeric",
weight=1,
name="Commercial Area",
description=""" """)),
("resarea", OBSColumn(
type="Numeric",
weight=1,
name="Residential Area",
description=""" """)),
("officearea", OBSColumn(
type="Numeric",
weight=1,
name="Office Area",
description=""" """)),
("retailarea", OBSColumn(
type="Numeric",
weight=1,
name="Retail Area",
description=""" """)
),
("garagearea", OBSColumn(
type="Numeric",
weight=1,
name="Garage Area",
description=""" """)
),
("strgearea", OBSColumn(
type="Numeric",
weight=1,
name="Storage Area",
description=""" """)
),
("factryarea", OBSColumn(
type="Numeric",
weight=1,
name="Factory Area",
description=""" """)
),
("otherarea", OBSColumn(
type="Numeric",
weight=1,
name="Other Area",
description=""" """)
),
("areasource", OBSColumn(
type="Text",
weight=1,
name="Area source",
description=""" """)
),
("numbldgs", OBSColumn(
type="Numeric",
weight=1,
name="Number of buildings",
description=""" """)
),
("numfloors", OBSColumn(
type="Numeric",
weight=1,
name="Floors",
description=""" """)
),
("unitsres", OBSColumn(
type="Numeric",
weight=1,
name="Residential units",
description=""" """)
),
("unitstotal", OBSColumn(
type="Numeric",
weight=1,
name="Total units",
description=""" """)
),
("lotfront", OBSColumn(
type="Numeric",
weight=1,
name="Lot front",
description=""" """)
),
("lotdepth", OBSColumn(
type="Numeric",
weight=1,
name="Lot depth",
description=""" """)
),
("bldgfront", OBSColumn(
type="Numeric",
weight=1,
name="Building Front",
description=""" """)
),
("bldgdepth", OBSColumn(
type="Numeric",
weight=1,
name="Building depth",
description=""" """)
),
("ext", OBSColumn(
type="Text",
weight=1,
name="Has extension",
description="""A code identifying whether there is an extension or free
standing structure on the lot which is not the primary structure""")
),
("proxcode", OBSColumn(
type="Text",
weight=1,
name="Proximity code",
description="""The physical relationship of the building to neighboring
buildings.""")
),
("irrlotcode", OBSColumn(
type="Text",
weight=1,
name="Irregularly shaped code",
description="""A code indicating whether th e tax lot is irregularly shaped""")
),
("lottype", OBSColumn(
type="Text",
weight=1,
name="Lot type code",
description="""A code indicating the location of the tax lot to
another tax lot and/or the water.""")
),
("bsmtcode", OBSColumn(
type="Text",
weight=1,
name="Basement code",
description="""A code describing the basement type/grade.""")
),
("assessland", OBSColumn(
type="Numeric",
weight=1,
name="Assessed land value",
description="""The tentative assessed land value for the upcoming fiscal year""")
),
("assesstot", OBSColumn(
type="Numeric",
weight=1,
name="Assessed total value",
description="""The tentative assessed total value for the upcoming fiscal year """)
),
("exemptland", OBSColumn(
type="Numeric",
weight=1,
name="Exempt land value",
description=""" """)
),
("exempttot", OBSColumn(
type="Numeric",
weight=1,
name="Exempt total value",
description=""" """)
),
("yearbuilt", OBSColumn(
type="Numeric",
weight=1,
name="Year built",
description=""" """)
),
("yearalter1", OBSColumn(
type="Numeric",
weight=1,
name="Year of most recent alteration",
description=""" """)
),
("yearalter2", OBSColumn(
type="Numeric",
weight=1,
name="Year of second most recent alteration",
description=""" """)
),
("histdist", OBSColumn(
type="Text",
weight=1,
name="Historical district",
description=""" """)
),
("landmark", OBSColumn(
type="Text",
weight=1,
name="Name of landmark",
description=""" """)
),
("builtfar", OBSColumn(
type="Numeric",
weight=1,
name="Built floor area ratio (FAR)",
description=""" """)
),
("residfar", OBSColumn(
type="Numeric",
weight=1,
name="Maximum allowable residential floor area ratio (FAR)",
description=""" """)
),
("commfar", OBSColumn(
type="Numeric",
weight=1,
name="Maximum allowable commercial floor area ratio (FAR)",
description=""" """)
),
("facilfar", OBSColumn(
type="Numeric",
weight=1,
name="Maximum allowable facilities floor area ratio (FAR)",
description=""" """)
),
("condono", OBSColumn(
type="Numeric",
weight=1,
name="Condo number",
description=""" """)
),
("xcoord", OBSColumn(
type="Numeric",
name="X Coordinate",
description=""" """)
),
("ycoord", OBSColumn(
type="Numeric",
name="Y coordinate",
description=""" """)
),
("zonemap", OBSColumn(
type="Text",
weight=1,
name="Zoning map number",
description=""" """)
),
("zmcode", OBSColumn(
type="Text",
weight=1,
name="Zoning map border code",
description=""" """)
),
("sanborn", OBSColumn(
type="Text",
weight=1,
name="Sanborn map number",
description=""" """)
),
("taxmap", OBSColumn(
type="Text",
weight=1,
name="Department of Finance tax map volume number",
description=""" """)
),
("edesignum", OBSColumn(
type="text",
weight=1,
name="E-Designation number",
description="")),
("appbbl", OBSColumn(
type="text",
weight=1,
name="Pre-apportionment BBL",
description="""The originating Borough, Tax Block and Tax Lot
from the apportionment prior to the merge, split or property's
conversion to a condominium. The Apportionment BBL is only
available for mergers, splits and conversions since 1984""")),
("appdate", OBSColumn(
type="text",
weight=1,
name="Apportionment date",
description="The date of the Apportionment in the format MM/DD/YYYY.")),
("plutomapid", OBSColumn(
type="Numeric",
weight=1,
name="PLUTO Map ID",
description="")),
("version", OBSColumn(
type="text",
weight=1,
name="MapPLUTO version number",
description="")),
("mappluto_f", OBSColumn(
type="text",
weight=1,
name="MapPLUTO Flag",
description="""The Department of Finance's DTM handles
condominium lots differently from many other MapPLUTO sources.
The DTM Tax Lot Polygon feature class uses the base borough
- block - lot (BBL) as the unique identifier of a parcel
currently occupied by a condominium. The Department of City
Planning and some of the other data sources for MapPLUTO use
the billing bbl for condominiums. Therefore, in creating
MapPLUTO from DTM, DCP has had to reassign the billing bbl as
the primary key for condominium tax parcels. In most cases,
there is one to one relationship between the DTM's base bbl and
MapPLUTO's billing bbl. In some cases, further processing has
been necessary. In a very few cases, non - condominium tax
lots have also been modified. All of these cases are identified
in the MapPluto Flag field. The data type for MapPLUTO flag is
a number, each number represents how the base bbl is
reassigned.""")),
#("shape_leng", OBSColumn(
# type="NUmeric",
# name="",
# description="")),
#("shape_area", OBSColumn(
# type="Numeric",
# name="",
# description="")),
])
return cols
def tags(self, input_, col_key, col):
return input_['tags']['nyc']
class MapPLUTO(TableTask):
release = Parameter()
def version(self):
return 3
# TODO: https://github.com/CartoDB/bigmetadata/issues/435
def targets(self):
return {
OBSTable(id='.'.join([self.schema(), self.name()])): GEOM_REF,
}
def requires(self):
data = {}
for borough in ('bx', 'bk', 'mn', 'qn', 'si'):
data[borough] = MapPLUTOTmpTable(borough=borough, release=self.release)
return {
'data': data,
'pluto_columns': MapPLUTOColumns(),
'nyc_columns': NYCColumns(),
'poi_columns': POIColumns(),
}
def columns(self):
input_ = self.input()
poi = input_['poi_columns']
nyc = input_['nyc_columns']
pluto = input_['pluto_columns']
return OrderedDict([
("borough", nyc["borough"]),
("block", nyc["block"]),
("lot", nyc["lot"]),
("cd", nyc["cd"]),
("ct2010", nyc["ct2010"]),
("cb2010", nyc["cb2010"]),
("schooldist", nyc["schooldist"]),
("council", nyc["council"]),
("zipcode", poi["postal_code"]),
("firecomp", nyc["firecomp"]),
("policeprct", nyc["policeprct"]),
("healtharea", nyc["healtharea"]),
("sanitboro", nyc["sanitboro"]),
("sanitdistr", nyc["sanitdistr"]),
("sanitsub", nyc["sanitsub"]),
("address", poi["address"]),
("zonedist1", pluto["zonedist1"]),
("zonedist2", pluto["zonedist2"]),
("zonedist3", pluto["zonedist3"]),
("zonedist4", pluto["zonedist4"]),
("overlay1", pluto["overlay1"]),
("overlay2", pluto["overlay2"]),
("spdist1", pluto["spdist1"]),
("spdist2", pluto["spdist2"]),
("spdist3", pluto["spdist3"]),
("ltdheight", pluto["ltdheight"]),
("splitzone", pluto["splitzone"]),
("bldgclass", pluto["bldgclass"]),
("landuse", pluto["landuse"]),
("easements", pluto["easements"]),
("ownertype", pluto["ownertype"]),
("ownername", pluto["ownername"]),
("lotarea", pluto["lotarea"]),
("bldgarea", pluto["bldgarea"]),
("comarea", pluto["comarea"]),
("resarea", pluto["resarea"]),
("officearea", pluto["officearea"]),
("retailarea", pluto["retailarea"]),
("garagearea", pluto["garagearea"]),
("strgearea", pluto["strgearea"]),
("factryarea", pluto["factryarea"]),
("otherarea", pluto["otherarea"]),
("areasource", pluto["areasource"]),
("numbldgs", pluto["numbldgs"]),
("numfloors", pluto["numfloors"]),
("unitsres", pluto["unitsres"]),
("unitstotal", pluto["unitstotal"]),
("lotfront", pluto["lotfront"]),
("lotdepth", pluto["lotdepth"]),
("bldgfront", pluto["bldgfront"]),
("bldgdepth", pluto["bldgdepth"]),
("ext", pluto["ext"]),
("proxcode", pluto["proxcode"]),
("irrlotcode", pluto["irrlotcode"]),
("lottype", pluto["lottype"]),
("bsmtcode", pluto["bsmtcode"]),
("assessland", pluto["assessland"]),
("assesstot", pluto["assesstot"]),
("exemptland", pluto["exemptland"]),
("exempttot", pluto["exempttot"]),
("yearbuilt", pluto["yearbuilt"]),
("yearalter1", pluto["yearalter1"]),
("yearalter2", pluto["yearalter2"]),
("histdist", pluto["histdist"]),
("landmark", pluto["landmark"]),
("builtfar", pluto["builtfar"]),
("residfar", pluto["residfar"]),
("commfar", pluto["commfar"]),
("facilfar", pluto["facilfar"]),
("borocode", nyc["borocode"]),
("bbl", nyc["bbl"]),
("condono", pluto["condono"]),
("tract2010", nyc["tract2010"]),
("xcoord", pluto["xcoord"]),
("ycoord", pluto["ycoord"]),
("zonemap", pluto["zonemap"]),
("zmcode", pluto["zmcode"]),
("sanborn", pluto["sanborn"]),
("taxmap", pluto["taxmap"]),
("edesignum", pluto["edesignum"]),
("appbbl", pluto["appbbl"]),
("appdate", pluto["appdate"]),
("plutomapid", pluto["plutomapid"]),
("version", pluto["version"]),
("mappluto_f", pluto["mappluto_f"]),
#("shape_leng", pluto["shape_leng"]),
#("shape_area", pluto["shape_area"]),
("wkb_geometry", nyc["parcel"]),
])
def table_timespan(self):
return get_timespan('20' + self.release)
def populate(self):
input_ = self.input()
session = current_session()
incols = ['"{}"::{}'.format(colname, col.get(session).type)
for colname, col in self.columns().items()]
for borough, data in self.input()['data'].items():
session.execute('''
INSERT INTO {output}
SELECT {incols} FROM {intable}
'''.format(
output=self.output().table,
intable=data.table,
incols=', '.join(incols)
))
session.execute('''create unique index on {output} (bbl)'''.format(
output=self.output().table
))
class MapPLUTOAll(WrapperTask):
def requires(self):
for release in RELEASES:
yield MapPLUTO(release=release)
|
{
"content_hash": "3d4f350f1bff2c7163b80412e196cbe8",
"timestamp": "",
"source": "github",
"line_count": 896,
"max_line_length": 113,
"avg_line_length": 46.338169642857146,
"alnum_prop": 0.46060839615597676,
"repo_name": "CartoDB/bigmetadata",
"id": "5aaebd3a069760704aa8079fb7d556b287ad9ba2",
"size": "41519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/us/ny/nyc/dcp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "143"
},
{
"name": "Dockerfile",
"bytes": "2305"
},
{
"name": "HTML",
"bytes": "19058"
},
{
"name": "JavaScript",
"bytes": "5864"
},
{
"name": "Makefile",
"bytes": "27552"
},
{
"name": "PLpgSQL",
"bytes": "32699"
},
{
"name": "Python",
"bytes": "2967442"
},
{
"name": "Shell",
"bytes": "11590"
}
],
"symlink_target": ""
}
|
import asyncio
import builtins
import io
import logging
import pytest
from mitmproxy.addons import termlog
from mitmproxy.test import taddons
from mitmproxy.utils import vt_codes
@pytest.fixture(autouse=True)
def ensure_cleanup():
yield
assert not any(
isinstance(x, termlog.TermLogHandler)
for x in logging.root.handlers
)
async def test_delayed_teardown():
t = termlog.TermLog()
t.done()
assert t.logger in logging.root.handlers
await asyncio.sleep(0)
assert t.logger not in logging.root.handlers
def test_output(capsys):
logging.getLogger().setLevel(logging.DEBUG)
t = termlog.TermLog()
with taddons.context(t) as tctx:
tctx.options.termlog_verbosity = "info"
tctx.configure(t)
logging.info("one")
logging.debug("two")
logging.warning("three")
logging.error("four")
out, err = capsys.readouterr()
assert "one" in out
assert "two" not in out
assert "three" in out
assert "four" in out
t.done()
async def test_styling(monkeypatch) -> None:
monkeypatch.setattr(vt_codes, "ensure_supported", lambda _: True)
f = io.StringIO()
t = termlog.TermLog(out=f)
with taddons.context(t) as tctx:
tctx.configure(t)
logging.warning("hello")
assert "\x1b[33mhello\x1b[0m" in f.getvalue()
t.done()
async def test_cannot_print(monkeypatch) -> None:
def _raise(*args, **kwargs):
raise OSError
monkeypatch.setattr(builtins, "print", _raise)
t = termlog.TermLog()
with taddons.context(t) as tctx:
tctx.configure(t)
with pytest.raises(SystemExit) as exc_info:
logging.info("Should not log this, but raise instead")
assert exc_info.value.args[0] == 1
t.done()
|
{
"content_hash": "f97ae4cdc99c78e065feb7e68d304da5",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 69,
"avg_line_length": 23.906666666666666,
"alnum_prop": 0.6525376464026771,
"repo_name": "mitmproxy/mitmproxy",
"id": "62573e218cf288fac458a30ecc985b4ce4489a75",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/mitmproxy/addons/test_termlog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "Dockerfile",
"bytes": "618"
},
{
"name": "HTML",
"bytes": "10672"
},
{
"name": "JavaScript",
"bytes": "134086"
},
{
"name": "Kaitai Struct",
"bytes": "3670"
},
{
"name": "Less",
"bytes": "21203"
},
{
"name": "PowerShell",
"bytes": "258"
},
{
"name": "Python",
"bytes": "2374404"
},
{
"name": "Shell",
"bytes": "3013"
},
{
"name": "TypeScript",
"bytes": "279000"
}
],
"symlink_target": ""
}
|
import os
import unittest
from mako import util
from test import eq_
class UtilTest(unittest.TestCase):
def test_fast_buffer_write(self):
buf = util.FastEncodingBuffer()
buf.write("string a ")
buf.write("string b")
eq_(buf.getvalue(), "string a string b")
def test_fast_buffer_truncate(self):
buf = util.FastEncodingBuffer()
buf.write("string a ")
buf.write("string b")
buf.truncate()
buf.write("string c ")
buf.write("string d")
eq_(buf.getvalue(), "string c string d")
def test_fast_buffer_encoded(self):
s = u"drôl m’a rée « S’il"
buf = util.FastEncodingBuffer(encoding='utf-8')
buf.write(s[0:10])
buf.write(s[10:])
q = buf.getvalue()
eq_(buf.getvalue(), s.encode('utf-8'))
def test_read_file(self):
fn = os.path.join(os.path.dirname(__file__), 'test_util.py')
data = util.read_file(fn, 'rb')
self.failUnless('test_util' in str(data)) # str() for py3k
def test_load_module(self):
fn = os.path.join(os.path.dirname(__file__), 'test_util.py')
module = util.load_module('mako.template', fn)
import mako.template
self.assertEqual(module, mako.template)
|
{
"content_hash": "f8e5ab6943bf2ae7cebd9fa6cea54bd0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 32.53846153846154,
"alnum_prop": 0.5886524822695035,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "5c4f95fde6e22b9e4029ac5198d727b1ddc27378",
"size": "1301",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Mako-0.7.2/test/test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
from scinet3.modellist import (DocumentList, KeywordList)
from collections import Counter
@contextmanager
def evaluation_manager(desired_docs, desired_kws, session):
evaluator = GoalBasedEvaluator()
evaluator.setGoal(desired_docs, desired_kws)
yield
eval_results = evaluator.evaluate(session.recom_docs, session.recom_kws)
print "Evaluation results:"
print "for docs:"
print eval_results[0] #closeness of docs
print "for kws:"
print eval_results[1] #closeness of kws
displayed_docs = set([doc
for doc_list in session.recom_docs
for doc in doc_list])
target_doc_occurrences = filter(lambda doc: doc in evaluator.desired_docs,
displayed_docs)
print "%d / %d precision" %(len(target_doc_occurrences), len(displayed_docs))
print "%d / %d recall" %(len(target_doc_occurrences), len(evaluator.desired_docs))
displayed_kws = set([kw
for kw_list in session.recom_kws
for kw in kw_list])
print displayed_kws
target_kw_occurrences = filter(lambda kw: kw in evaluator.desired_kws,
displayed_kws)
print "precision: %d / %d = %f" %(len(target_kw_occurrences), len(displayed_kws),
len(target_kw_occurrences) / float(len(displayed_kws)))
print "recall: %d / %d = %f" %(len(target_kw_occurrences), len(evaluator.desired_kws),
len(target_kw_occurrences) / len(evaluator.desired_kws))
class GoalBasedEvaluator(object):
"""
Evaluate the IR performance using the similarity to the goal
"""
def setGoal(self, docs, kws):
"""
docs: DocumentList, the documents desirable
kws: KeywordList, the keywords desirable
"""
assert type(docs) is DocumentList, "docs should be DocumentList, but is %r" %docs
assert type(kws) is KeywordList, "kws should be KeywordList, but is %r" %kws
assert len(docs) > 0, "target_docs shouldn't be empty"
assert len(kws) > 0, "target_kws shouldn't be empty"
self.desired_docs = docs
self.desired_kws = kws
def evaluate(self, recom_doc_history, recom_kw_history):
"""
Param:
recom_doc_history: list of DocumentList, the documents be recommended in each iteration, from newest to oldest
recom_kw_history: list of KeywordList, the keywords be recommended in each iteration, from newest to oldest
Return:
(list of float #1, list of float #2)
#1: similarity scores for each itertion of recommended documents(from newest to oldest)
#2: similarity scores for each itertion of recommended keywords(from newest to oldest)
"""
return ([self.desired_docs.similarity_to(docs) #for docs
for docs in recom_doc_history],
[self.desired_kws.similarity_to(kws) #for kws
for kws in recom_kw_history])
|
{
"content_hash": "0dd2bcc574ab9e7dcb45f6513583b2d4",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 118,
"avg_line_length": 37.607142857142854,
"alnum_prop": 0.6096866096866097,
"repo_name": "xiaohan2012/rl-search",
"id": "fce813e0d90e1ce7aee860338fd7c6262767b4a1",
"size": "3257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/ir_eval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "272"
},
{
"name": "Java",
"bytes": "2014"
},
{
"name": "JavaScript",
"bytes": "36144"
},
{
"name": "Python",
"bytes": "175298"
},
{
"name": "Shell",
"bytes": "576"
},
{
"name": "TeX",
"bytes": "11128"
}
],
"symlink_target": ""
}
|
import logging
from django.core.management.base import NoArgsCommand
import amo
from mkt.constants.regions import WORLDWIDE
log = logging.getLogger('z.task')
class Command(NoArgsCommand):
help = 'Migrate free apps without a world AER to enable_new_regions=True.'
def handle_noargs(self, *args, **options):
# Avoid import error.
from mkt.webapps.models import AddonExcludedRegion as AER, Webapp
# First exclude apps that have opted out of enabling new regions.
excludes = (AER.objects.filter(region=WORLDWIDE.id)
.values_list('addon', flat=True))
qs = (Webapp.objects.filter(premium_type=amo.ADDON_FREE)
.exclude(id__in=excludes))
# Now update the relevant apps.
for app in qs.iterator():
log.info('[App %s] Updated to have '
'enable_new_regions=True' % app.pk)
app.update(enable_new_regions=True)
|
{
"content_hash": "89dcabb2de613a9e493a1f14f7fb415e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 34.75,
"alnum_prop": 0.6310380267214799,
"repo_name": "Joergen/zamboni",
"id": "92e5f30574bcfa04f3c8e2fcad82a289bcf9d010",
"size": "973",
"binary": false,
"copies": "1",
"ref": "refs/heads/uge43",
"path": "mkt/developers/management/commands/migrate_free_apps_without_worldwide_aer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "608838"
},
{
"name": "JavaScript",
"bytes": "1750529"
},
{
"name": "Perl",
"bytes": "565"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6063534"
},
{
"name": "Ruby",
"bytes": "1865"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
}
|
import collections
from neutron_lib import constants as lib_const
from neutron_lib.plugins.ml2 import ovs_constants as ovs_consts
from neutron_lib.services.logapi import constants as log_const
from os_ken.base import app_manager
from os_ken.lib.packet import packet
from oslo_config import cfg
from oslo_log import formatters
from oslo_log import handlers
from oslo_log import log as logging
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw
from neutron.agent.linux.openvswitch_firewall import rules
from neutron.services.logapi.agent import log_extension as log_ext
from neutron.services.logapi.common import exceptions as log_exc
from neutron.services.logapi.drivers.openvswitch import log_oskenapp
LOG = logging.getLogger(__name__)
OVS_FW_TO_LOG_TABLES = {
ovs_consts.RULES_EGRESS_TABLE: ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE,
ovs_consts.RULES_INGRESS_TABLE: ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE,
}
FIELDS_TO_REMOVE = ['priority', 'actions', 'dl_type',
'reg_port', 'reg_remote_group']
REMOTE_RULE_PRIORITY = 70
def setup_logging():
log_file = cfg.CONF.network_log.local_output_log_base
if log_file:
# pylint: disable=import-outside-toplevel
from logging import handlers as watch_handler
log_file_handler = watch_handler.WatchedFileHandler(log_file)
log_file_handler.setLevel(
logging.DEBUG if cfg.CONF.debug else logging.INFO)
LOG.logger.addHandler(log_file_handler)
log_file_handler.setFormatter(
formatters.ContextFormatter(
fmt=cfg.CONF.logging_default_format_string,
datefmt=cfg.CONF.log_date_format))
elif cfg.CONF.use_journal:
journal_handler = handlers.OSJournalHandler()
LOG.logger.addHandler(journal_handler)
else:
syslog_handler = handlers.OSSysLogHandler()
LOG.logger.addHandler(syslog_handler)
def find_deleted_sg_rules(old_port, new_ports):
del_rules = list()
for port in new_ports:
if old_port.id == port.id:
for rule in old_port.secgroup_rules:
if rule not in port.secgroup_rules:
del_rules.append(rule)
return del_rules
return del_rules
class Cookie(object):
def __init__(self, cookie_id, port, action, project):
self.id = cookie_id
self.port = port
self.action = action
self.project = project
self.log_object_refs = set()
def __eq__(self, other):
return (self.id == other.id and
self.action == other.action and
self.port == other.port)
def __hash__(self):
return hash(self.id)
def add_log_obj_ref(self, log_id):
self.log_object_refs.add(log_id)
def remove_log_obj_ref(self, log_id):
self.log_object_refs.discard(log_id)
@property
def is_empty(self):
return not self.log_object_refs
class OFPortLog(object):
def __init__(self, port, ovs_port, log_event):
self.id = port['port_id']
self.ofport = ovs_port.ofport
self.secgroup_rules = [self._update_rule(rule) for rule in
port['security_group_rules']]
# event can be ALL, DROP and ACCEPT
self.event = log_event
def _update_rule(self, rule):
protocol = rule.get('protocol')
if protocol is not None:
if not isinstance(protocol, int) and protocol.isdigit():
rule['protocol'] = int(protocol)
elif (rule.get('ethertype') == lib_const.IPv6 and
protocol == lib_const.PROTO_NAME_ICMP):
rule['protocol'] = lib_const.PROTO_NUM_IPV6_ICMP
else:
rule['protocol'] = lib_const.IP_PROTOCOL_MAP.get(
protocol, protocol)
return rule
class OVSFirewallLoggingDriver(log_ext.LoggingDriver):
SUPPORTED_LOGGING_TYPES = ['security_group']
REQUIRED_PROTOCOLS = [
ovs_consts.OPENFLOW13,
ovs_consts.OPENFLOW14,
]
def __init__(self, agent_api):
integration_bridge = agent_api.request_int_br()
self.int_br = self.initialize_bridge(integration_bridge)
self._deferred = False
self.log_ports = collections.defaultdict(dict)
self.cookies_table = set()
self.cookie_ids_to_delete = set()
self.conj_id_map = ovsfw.ConjIdMap(self.int_br.br)
def initialize(self, resource_rpc, **kwargs):
self.resource_rpc = resource_rpc
setup_logging()
self.start_logapp()
@staticmethod
def initialize_bridge(bridge):
bridge.add_protocols(*OVSFirewallLoggingDriver.REQUIRED_PROTOCOLS)
# set rate limit and burst limit for controller
bridge.set_controller_rate_limit(cfg.CONF.network_log.rate_limit)
bridge.set_controller_burst_limit(cfg.CONF.network_log.burst_limit)
return bridge.deferred(full_ordered=True)
def start_logapp(self):
app_mgr = app_manager.AppManager.get_instance()
self.log_app = app_mgr.instantiate(log_oskenapp.OVSLogOSKenApp)
self.log_app.start()
self.log_app.register_packet_in_handler(self.packet_in_handler)
def packet_in_handler(self, ev):
msg = ev.msg
cookie_id = msg.cookie
pkt = packet.Packet(msg.data)
try:
cookie_entry = self._get_cookie_by_id(cookie_id)
LOG.info("action=%s project_id=%s log_resource_ids=%s vm_port=%s "
"pkt=%s", cookie_entry.action, cookie_entry.project,
list(cookie_entry.log_object_refs),
cookie_entry.port, pkt)
except log_exc.CookieNotFound:
LOG.warning("Unknown cookie=%s packet_in pkt=%s", cookie_id, pkt)
def defer_apply_on(self):
self._deferred = True
def defer_apply_off(self):
if self._deferred:
self.int_br.apply_flows()
self._cleanup_cookies()
self._deferred = False
def _get_cookie(self, port_id, action):
for cookie in self.cookies_table:
if cookie.port == port_id and cookie.action == action:
return cookie
def _get_cookies_by_port(self, port_id):
cookies_list = []
for cookie in self.cookies_table:
if cookie.port == port_id:
cookies_list.append(cookie)
return cookies_list
def _get_cookie_by_id(self, cookie_id):
for cookie in self.cookies_table:
if str(cookie.id) == str(cookie_id):
return cookie
raise log_exc.CookieNotFound(cookie_id=cookie_id)
def _cleanup_cookies(self):
cookie_ids = self.cookie_ids_to_delete
self.cookie_ids_to_delete = set()
for cookie_id in cookie_ids:
self.int_br.br.unset_cookie(cookie_id)
def generate_cookie(self, port_id, action, log_id, project_id):
cookie = self._get_cookie(port_id, action)
if not cookie:
cookie_id = self.int_br.br.request_cookie()
cookie = Cookie(cookie_id=cookie_id, port=port_id,
action=action, project=project_id)
self.cookies_table.add(cookie)
cookie.add_log_obj_ref(log_id)
return cookie.id
def _schedule_cookie_deletion(self, cookie):
# discard a cookie object
self.cookies_table.remove(cookie)
# schedule to cleanup cookie_ids later
self.cookie_ids_to_delete.add(cookie.id)
def start_logging(self, context, **kwargs):
LOG.debug("start logging: %s", str(kwargs))
for resource_type in self.SUPPORTED_LOGGING_TYPES:
# handle port updated, agent restarted
if 'port_id' in kwargs:
self._handle_logging('_create', context,
resource_type, **kwargs)
else:
self._handle_log_resources_by_type(
'_create', context, resource_type, **kwargs)
def stop_logging(self, context, **kwargs):
LOG.debug("stop logging: %s", str(kwargs))
for resource_type in self.SUPPORTED_LOGGING_TYPES:
# handle port deleted
if 'port_id' in kwargs:
self._handle_logging('_delete', context,
resource_type, **kwargs)
else:
self._handle_log_resources_by_type(
'_delete', context, resource_type, **kwargs)
def _handle_log_resources_by_type(
self, action, context, resource_type, **kwargs):
log_resources = []
for log_obj in kwargs.get('log_resources', []):
if log_obj['resource_type'] == resource_type:
log_resources.append(log_obj)
if log_resources:
self._handle_logging(
action, context, resource_type, log_resources=log_resources)
def _handle_logging(self, action, context, resource_type, **kwargs):
handler_name = "%s_%s_log" % (action, resource_type)
handler = getattr(self, handler_name)
handler(context, **kwargs)
def create_ofport_log(self, port, log_id, log_event):
port_id = port['port_id']
ovs_port = self.int_br.br.get_vif_port_by_id(port_id)
if ovs_port:
of_port_log = OFPortLog(port, ovs_port, log_event)
self.log_ports[log_id].add(of_port_log)
def _create_security_group_log(self, context, **kwargs):
port_id = kwargs.get('port_id')
log_resources = kwargs.get('log_resources')
logs_info = []
if port_id:
# try to clean port flows log for port updated/create event
self._cleanup_port_flows_log(port_id)
logs_info = self.resource_rpc.get_sg_log_info_for_port(
context,
resource_type=log_const.SECURITY_GROUP,
port_id=port_id)
elif log_resources:
logs_info = self.resource_rpc.get_sg_log_info_for_log_resources(
context,
resource_type=log_const.SECURITY_GROUP,
log_resources=log_resources)
for log_info in logs_info:
log_id = log_info['id']
old_ofport_logs = self.log_ports.get(log_id, [])
ports = log_info.get('ports_log')
self.log_ports[log_id] = set()
for port in ports:
self.create_ofport_log(port, log_id, log_info.get('event'))
# try to clean flows log if sg_rules are deleted
for port in old_ofport_logs:
del_rules = find_deleted_sg_rules(
port, self.log_ports[log_id])
if del_rules:
self._delete_sg_rules_flow_log(port, del_rules)
for port_log in self.log_ports[log_id]:
self.add_flows_from_rules(port_log, log_info)
def _cleanup_port_flows_log(self, port_id):
cookies_list = self._get_cookies_by_port(port_id)
for cookie in cookies_list:
if cookie.action == log_const.ACCEPT_EVENT:
self._delete_flows(
table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE,
cookie=cookie.id)
self._delete_flows(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE,
cookie=cookie.id)
if cookie.action == log_const.DROP_EVENT:
self._delete_flows(
table=ovs_consts.DROPPED_TRAFFIC_TABLE,
cookie=cookie.id)
self._schedule_cookie_deletion(cookie)
def _delete_security_group_log(self, context, **kwargs):
# port deleted event
port_id = kwargs.get('port_id')
if port_id:
self._cleanup_port_flows_log(port_id)
# log resources deleted events
for log_resource in kwargs.get('log_resources', []):
log_id = log_resource.get('id')
of_port_logs = self.log_ports.get(log_id, [])
for of_port_log in of_port_logs:
self.delete_port_flows_log(of_port_log, log_id)
def _log_accept_flow(self, **flow):
# log first accepted packet
flow['table'] = OVS_FW_TO_LOG_TABLES[flow['table']]
flow['actions'] = 'controller'
# forward egress accepted packet and log
if flow['table'] == ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE:
flow['actions'] = 'resubmit(,%d),controller' % (
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
self._add_flow(**flow)
def _add_flow(self, **kwargs):
dl_type = kwargs.get('dl_type')
ovsfw.create_reg_numbers(kwargs)
if isinstance(dl_type, int):
kwargs['dl_type'] = "0x{:04x}".format(dl_type)
LOG.debug("Add flow firewall log %s", str(kwargs))
if self._deferred:
self.int_br.add_flow(**kwargs)
else:
self.int_br.br.add_flow(**kwargs)
def _delete_flows(self, **kwargs):
ovsfw.create_reg_numbers(kwargs)
if self._deferred:
self.int_br.delete_flows(**kwargs)
else:
self.int_br.br.delete_flows(**kwargs)
def _log_drop_packet(self, port, log_id, project_id):
cookie = self.generate_cookie(port.id, log_const.DROP_EVENT,
log_id, project_id)
self._add_flow(
cookie=cookie,
table=ovs_consts.DROPPED_TRAFFIC_TABLE,
priority=53,
reg_port=port.ofport,
actions='controller'
)
def create_rules_generator_for_port(self, port):
for rule in port.secgroup_rules:
yield rule
def _create_conj_flows_log(self, remote_rule, port):
ethertype = remote_rule['ethertype']
direction = remote_rule['direction']
remote_sg_id = remote_rule.get('remote_group_id')
remote_ag_id = remote_rule.get('remote_address_group_id')
secgroup_id = remote_rule['security_group_id']
# we only want to log first accept packet, that means a packet with
# ct_state=+new-est, reg_remote_group=conj_id + 1 will be logged
flow_template = {
'priority': REMOTE_RULE_PRIORITY,
'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype],
'reg_port': port.ofport,
'reg_remote_group': self.conj_id_map.get_conj_id(
secgroup_id, remote_sg_id or remote_ag_id,
direction, ethertype) + 1,
}
if direction == lib_const.INGRESS_DIRECTION:
flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE
elif direction == lib_const.EGRESS_DIRECTION:
flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE
return [flow_template]
def _log_accept_packet(self, port, log_id, project_id):
cookie = self.generate_cookie(port.id, log_const.ACCEPT_EVENT,
log_id, project_id)
for rule in self.create_rules_generator_for_port(port):
if 'remote_group_id' in rule or 'remote_address_group_id' in rule:
flows = self._create_conj_flows_log(rule, port)
else:
flows = rules.create_flows_from_rule_and_port(rule, port)
for flow in flows:
flow['cookie'] = cookie
self._log_accept_flow(**flow)
def add_flows_from_rules(self, port, log_info):
# log event can be ACCEPT or DROP or ALL(both ACCEPT and DROP)
event = log_info['event']
project_id = log_info['project_id']
log_id = log_info['id']
if event == log_const.ACCEPT_EVENT:
self._log_accept_packet(port, log_id, project_id)
elif event == log_const.DROP_EVENT:
self._log_drop_packet(port, log_id, project_id)
else:
self._log_drop_packet(port, log_id, project_id)
self._log_accept_packet(port, log_id, project_id)
def _delete_accept_flows_log(self, port, log_id):
cookie = self._get_cookie(port.id, log_const.ACCEPT_EVENT)
if cookie:
cookie.remove_log_obj_ref(log_id)
if cookie.is_empty:
self._delete_flows(
table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE,
cookie=cookie.id)
self._delete_flows(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE,
cookie=cookie.id)
self._schedule_cookie_deletion(cookie)
def _delete_drop_flows_log(self, port, log_id):
cookie = self._get_cookie(port.id, log_const.DROP_EVENT)
if cookie:
cookie.remove_log_obj_ref(log_id)
if cookie.is_empty:
self._delete_flows(table=ovs_consts.DROPPED_TRAFFIC_TABLE,
cookie=cookie.id)
self._schedule_cookie_deletion(cookie)
def delete_port_flows_log(self, port, log_id):
"""Delete all flows log for given port and log_id"""
event = port.event
if event == log_const.ACCEPT_EVENT:
self._delete_accept_flows_log(port, log_id)
elif event == log_const.DROP_EVENT:
self._delete_drop_flows_log(port, log_id)
else:
self._delete_accept_flows_log(port, log_id)
self._delete_drop_flows_log(port, log_id)
def _delete_sg_rules_flow_log(self, port, del_rules):
cookie = self._get_cookie(port.id, log_const.ACCEPT_EVENT)
if not cookie:
return
for rule in del_rules:
if 'remote_group_id' in rule or 'remote_address_group_id' in rule:
flows = self._create_conj_flows_log(rule, port)
else:
flows = rules.create_flows_from_rule_and_port(rule, port)
for flow in flows:
for kw in FIELDS_TO_REMOVE:
flow.pop(kw, None)
flow['table'] = OVS_FW_TO_LOG_TABLES[flow['table']]
flow['cookie'] = cookie.id
self._delete_flows(**flow)
|
{
"content_hash": "930db5d134483fe4915ac85e77f34f19",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 78,
"avg_line_length": 39.377969762419006,
"alnum_prop": 0.5857832382623958,
"repo_name": "openstack/neutron",
"id": "ff87f252db0e62030b3d59d9ded2a8b58aa93a54",
"size": "18867",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/services/logapi/drivers/openvswitch/ovs_firewall_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
}
|
from pyroute2.common import AddrPool
class TestAddrPool(object):
def test_alloc_aligned(self):
ap = AddrPool(minaddr=1, maxaddr=1024)
for i in range(1024):
ap.alloc()
try:
ap.alloc()
except KeyError:
pass
def test_alloc_odd(self):
ap = AddrPool(minaddr=1, maxaddr=1020)
for i in range(1020):
ap.alloc()
try:
ap.alloc()
except KeyError:
pass
def test_reverse(self):
ap = AddrPool(minaddr=1, maxaddr=1024, reverse=True)
for i in range(512):
assert ap.alloc() > ap.alloc()
def test_free(self):
ap = AddrPool(minaddr=1, maxaddr=1024)
f = ap.alloc()
ap.free(f)
def test_free_fail(self):
ap = AddrPool(minaddr=1, maxaddr=1024)
try:
ap.free(0)
except KeyError:
pass
def test_free_reverse_fail(self):
ap = AddrPool(minaddr=1, maxaddr=1024, reverse=True)
try:
ap.free(0)
except KeyError:
pass
def test_locate(self):
ap = AddrPool()
f = ap.alloc()
base1, bit1, is_allocated1 = ap.locate(f)
base2, bit2, is_allocated2 = ap.locate(f + 1)
assert base1 == base2
assert bit2 == bit1 + 1
assert is_allocated1
assert not is_allocated2
assert ap.allocated == 1
def test_setaddr_allocated(self):
ap = AddrPool()
f = ap.alloc()
base, bit, is_allocated = ap.locate(f + 1)
assert not is_allocated
assert ap.allocated == 1
ap.setaddr(f + 1, 'allocated')
base, bit, is_allocated = ap.locate(f + 1)
assert is_allocated
assert ap.allocated == 2
ap.free(f + 1)
base, bit, is_allocated = ap.locate(f + 1)
assert not is_allocated
assert ap.allocated == 1
def test_setaddr_free(self):
ap = AddrPool()
f = ap.alloc()
base, bit, is_allocated = ap.locate(f + 1)
assert not is_allocated
assert ap.allocated == 1
ap.setaddr(f + 1, 'free')
base, bit, is_allocated = ap.locate(f + 1)
assert not is_allocated
assert ap.allocated == 1
ap.setaddr(f, 'free')
base, bit, is_allocated = ap.locate(f)
assert not is_allocated
assert ap.allocated == 0
try:
ap.free(f)
except KeyError:
pass
|
{
"content_hash": "ee52baa194e7b4c40a25feb3ae71f8f3",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 60,
"avg_line_length": 24.339805825242717,
"alnum_prop": 0.5273234942161946,
"repo_name": "nazarewk/pyroute2",
"id": "356bd31a5ccd675b839afea87ac22d57dbdcb37f",
"size": "2507",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/general/test_addrpool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "413"
},
{
"name": "C",
"bytes": "4259"
},
{
"name": "Makefile",
"bytes": "4163"
},
{
"name": "Python",
"bytes": "607649"
},
{
"name": "Shell",
"bytes": "1914"
}
],
"symlink_target": ""
}
|
import os, sys; sys.path.insert(0, os.path.join("..", ".."))
import time
from pattern.vector import Document, Model, KNN
from pattern.db import Datasheet
# Long documents contain lots of words.
# Models with lots of long documents can become slow,
# because calculating cosine similarity then takes a long time.
# Latent Semantic Analysis (LSA) is a statistical machine learning method,
# based on a matrix calculation called "singular value decomposition" (SVD).
# It discovers semantically related words across documents.
# It groups related words into "concepts" .
# It then creates a concept vector for each document.
# This reduces the amount of data to work with (for example when clustering),
# and filters out noise, so that semantically related words come out stronger.
# We'll use the Pang & Lee corpus of movie reviews, included in the testing suite.
# Take 250 positive reviews and 250 negative reviews:
data = os.path.join("..","..","test", "corpora", "polarity-en-pang&lee.csv")
data = Datasheet.load(data)
data = data[:250] + data[-250:]
# Build a model of movie reviews.
# Each document consists of the top 40 words in the movie review.
documents = []
for score, review in data:
document = Document(review, stopwords=False, top=40, type=int(score) > 0)
documents.append(document)
m = Model(documents)
print "number of documents:", len(m)
print "number of features:", len(m.vector)
print "number of features (average):", sum(len(d.terms) for d in m.documents) / float(len(m))
print
# 6,337 different features may be too slow for some algorithms (e.g., hierarchical clustering).
# We'll reduce the document vectors to 10 concepts.
# Let's test how our model performs as a classifier.
# A document can have a label (or type, or class).
# For example, in the movie reviews corpus,
# there are positive reviews (score > 0) and negative reviews (score < 0).
# A classifier uses a model as "training" data
# to predict the label (type/class) of unlabeled documents.
# In this case, it can predict whether a new movie review is positive or negative.
# The details are not that important right now, just observe the accuracy.
# Naturally, we want accuracy to stay the same after LSA reduction,
# and hopefully decrease the time needed to run.
t = time.time()
print "accuracy:", KNN.test(m, folds=10)[-1]
print "time:", time.time() - t
print
# Reduce the documents to vectors of 10 concepts (= 1/4 of 40 features).
print "LSA reduction..."
print
m.reduce(10)
t = time.time()
print "accuracy:", KNN.test(m, folds=10)[-1]
print "time:", time.time() - t
print
# Accuracy is about the same, but the performance is better: 2x-3x faster,
# because each document is now a "10-word summary" of the original review.
# Let's take a closer look at the concepts.
# The concept vector for the first document:
print m.lsa.vectors[m[0].id]
print
# It is a dictionary of concept id's (instead of features).
# This is is not very helpful.
# But we can look up the features "bundled" in each concept:
print len(m.lsa.concepts[0])
# That's a lot of words.
# In fact, all features in the model have a score for one of the ten concepts.
# To make it clearer, let's generate 100 concepts (i.e., semantic categories),
# and then examine the features with the highest score for a concept:
m.lsa = None
m.reduce(100)
for feature, weight in m.lsa.concepts[2].items(): # concept id=2
if abs(weight) > 0.1:
print feature
# Concept 2 = "truman", "ventura", "ace", "carrey", ... Obviously about Jim Carrey movies.
# Concept 23 = "star", "wars", "jedi", "vader", "luke", "effects", ...
# Not all concepts are equally easy to interpret,
# but the technique can be useful to discover synonym sets.
|
{
"content_hash": "b919bb73393d17bdf4c0a7fecb3a849e",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 95,
"avg_line_length": 37.5959595959596,
"alnum_prop": 0.722729715206878,
"repo_name": "EricSchles/pattern",
"id": "19cfbabf7ff17217cf90589e938f29e280a4d4f7",
"size": "3722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/05-vector/03-lsa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "89255"
},
{
"name": "HTML",
"bytes": "606"
},
{
"name": "JavaScript",
"bytes": "212643"
},
{
"name": "Makefile",
"bytes": "92"
},
{
"name": "Python",
"bytes": "2053820"
}
],
"symlink_target": ""
}
|
from datetime import date
from django.forms.models import model_to_dict
from nose.tools import eq_, ok_
from remo.base.tests import RemoTestCase
from remo.profiles.tests import FunctionalAreaFactory, UserFactory
from remo.reports import ACTIVITY_CAMPAIGN
from remo.reports.forms import NGReportForm
from remo.reports.models import NGReport
from remo.reports.tests import (ActivityFactory, CampaignFactory,
NGReportFactory)
class NGReportFormTests(RemoTestCase):
def test_base(self):
user = UserFactory.create()
activity = ActivityFactory.create()
campaign = CampaignFactory.create()
functional_area = FunctionalAreaFactory.create()
data = {
'report_date': '25 March 2012',
'activity': activity.id,
'campaign': campaign.id,
'longitude': 44.33,
'latitude': 55.66,
'location': 'world',
'link': 'https://example.com',
'link_description': 'Test link.',
'activity_description': 'Test activity',
'functional_areas': functional_area.id,
}
form = NGReportForm(data, instance=NGReport(user=user))
ok_(form.is_valid())
db_obj = form.save()
eq_(db_obj.report_date, date(2012, 3, 25))
eq_(db_obj.activity, activity)
eq_(db_obj.longitude, 44.33)
eq_(db_obj.latitude, 55.66)
eq_(db_obj.location, 'world')
eq_(db_obj.link, 'https://example.com')
eq_(db_obj.link_description, 'Test link.')
eq_(db_obj.activity_description, 'Test activity'),
eq_(db_obj.functional_areas.all().count(), 1)
eq_(db_obj.functional_areas.all()[0], functional_area)
eq_(db_obj.mentor, user.userprofile.mentor)
def test_report_date_in_future(self):
data = {
'report_date': '25 March 3000',
}
form = NGReportForm(data)
ok_(not form.is_valid())
ok_('report_date' in form.errors)
ok_(form.errors['report_date'],
'Report date cannot be in the future.')
def test_campain_activity_without_campaign(self):
activity = ActivityFactory.create(name=ACTIVITY_CAMPAIGN)
data = {
'activity': activity.id
}
form = NGReportForm(data)
ok_(not form.is_valid())
ok_('campaign' in form.errors)
ok_(form.errors['campaign'],
'Please select an option from the list.')
class InactiveCategoriesTest(RemoTestCase):
def test_edit_event(self):
"""Edit NGReport with inactive categories."""
activity = ActivityFactory.create()
campaign = CampaignFactory.create()
active_area = FunctionalAreaFactory.create()
inactive_areas = FunctionalAreaFactory.create_batch(2, active=False)
report = NGReportFactory.create(activity=activity, campaign=campaign,
functional_areas=inactive_areas)
data = model_to_dict(report)
data['functional_areas'] = active_area.id
form = NGReportForm(data, instance=report)
ok_(form.is_valid())
result = form.save()
ok_(active_area in result.functional_areas.all())
eq_(result.functional_areas.all().count(), 1)
|
{
"content_hash": "427f830c2537101e8d573bf16b34600d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 77,
"avg_line_length": 37.87356321839081,
"alnum_prop": 0.6100151745068285,
"repo_name": "mozilla/remo",
"id": "9e5ad2d9e13733b1691b2274ef247762d2e3c869",
"size": "3295",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "remo/reports/tests/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "228359"
},
{
"name": "HTML",
"bytes": "325696"
},
{
"name": "JavaScript",
"bytes": "288713"
},
{
"name": "Python",
"bytes": "763657"
},
{
"name": "Shell",
"bytes": "648"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
}
|
import math
class THAssistant:
def __init__(self, players=None, prizepool=None, payable=None):
if players and prizepool and payable:
self.players = int(players)
self.prizepool = int(prizepool)
self.payablePlayers = int(payable)
self.distFuncs = {"uniform" : self.uniform,
"geometric" : self.geometric,
"lognormal" : self.lognormal,
"exponential" : self.exponential}
def blindsStructure(self, startingStack, hours=None):
bb = int(1.0/50.0 * startingStack)
sb = int(float(bb)/2.0)
table = [(sb, bb)]
## Blind Period in minutes
blindPeriod = 0
if hours == None or hours == 0:
## Standard blind period time
blindPeriod = 20
else:
## Get the amount of levels to generate
numLevels = math.log(float(startingStack) / float(bb), 2)
## Figure out how long each blind period should be
blindPeriod = float(hours) * 60.0 / float(numLevels)
## Generate blind levels
while bb <= startingStack:
sb = bb
bb *= 2
table.append((sb, bb))
return table, int(blindPeriod)
def stackCount(self, bigBlind, chipValueArr):
## Assume the stack should be 100 big blinds
stackCount = 100 * int(bigBlind)
minVal = min(chipValueArr)
## If the stack is not divisible by the smallest chip then set it to
## the closest number divisible by the smallest chip
if not stackCount % minVal:
highVal = (int(stackCount / minVal) + 1) * minVal
lowVal = int(stackCount / minVal) * minVal
if abs(stackCount - highVal) < abs(stackCount - lowVal):
stackCount = highVal
else:
stackCount = lowVal
return stackCount
def chipDist(self, stackSize, chipValueArr):
table = {}
## Get an initial estimated distribution
for i in range(len(chipValueArr)):
val = int(stackSize * (1.0/(2.0**(i+1))))
val -= val % chipValueArr[::-1][i]
table[chipValueArr[::-1][i]] = val
stackSize -= val
## Spread extras in greedy manner
while stackSize > 0:
for chip in table:
if chip <= stackSize:
table[chip] += chip
stackSize -= chip
## Convert table into array of tuples
finalTable = []
for chip in sorted(table.keys()):
finalTable.append([chip, table[chip]])
return finalTable
def prizeDist(self, dist):
if isinstance(dist, str):
if dist in self.distFuncs:
return self.distFuncs[dist]()
else:
print("ValueError: dist not a valid value")
print("Please choose [linear, quadratic, hyperbolic, exponential]")
else:
print("TypeError: dist is not a string")
def uniform(self):
## All the same, prizepool/payable
val = self.prizepool/self.payablePlayers
remaining = self.prizepool
table = []
for p in range(self.payablePlayers):
table.append(val)
remaining -= val
table[0] += remaining
return table
def geometric(self):
## y = 1/(2^position)
remaining = self.prizepool
table = []
for p in xrange(self.payablePlayers):
val = int(self.prizepool * (1.0/(2.0**(p+1))))
table.append(val)
remaining -= val
table[0] += remaining
return table
def lognormal(self):
## y = (1/x(scale)sqrt(2pi))e^((-ln(x-location)^2)/2scale^2)
remaining = self.prizepool
table = []
scale = 0.6
location = 0.999
for p in range(self.payablePlayers):
val = int(self.prizepool * ((1/((p+1)*scale*math.sqrt(2*math.pi))) * math.exp(-(math.log((p+1) - location) ** 2)/(scale * math.sqrt(2)))))
table.append(val)
remaining -= val
table[0] += remaining
return table
def exponential(self):
## y = rate * e ^ (-rate * x)
remaining = self.prizepool
table = []
rateParam = 0.7
for p in range(self.payablePlayers):
val = int(self.prizepool * rateParam * math.exp(-rateParam * (p + 1)))
table.append(val)
remaining -= val
table[0] += remaining
return table
|
{
"content_hash": "16479ff6a4e8efe506fd666a23ae6d7d",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 150,
"avg_line_length": 29.67948717948718,
"alnum_prop": 0.5323974082073434,
"repo_name": "tolo137/TourneyHelper",
"id": "ac5ff0f9213b61729db79572b69ecfdecce498ff",
"size": "4788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "THAssistant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7351"
}
],
"symlink_target": ""
}
|
"""
Scrapy signals
These signals are documented in docs/topics/signals.rst. Please don't add new
signals here without documenting them there.
"""
engine_started = object()
engine_stopped = object()
spider_opened = object()
spider_idle = object()
spider_closed = object()
spider_error = object()
request_scheduled = object()
request_dropped = object()
request_reached_downloader = object()
request_left_downloader = object()
response_received = object()
response_downloaded = object()
item_scraped = object()
item_dropped = object()
item_error = object()
# for backward compatibility
stats_spider_opened = spider_opened
stats_spider_closing = spider_closed
stats_spider_closed = spider_closed
item_passed = item_scraped
request_received = request_scheduled
|
{
"content_hash": "be162814179416eb934f9ae1ba34e078",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 24.548387096774192,
"alnum_prop": 0.7608409986859396,
"repo_name": "eLRuLL/scrapy",
"id": "cd7ed7fb167866f898fb6626e6bccd131146caf2",
"size": "761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapy/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "1518287"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
}
|
import logging
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest.test import attr
LOG = logging.getLogger(__name__)
class StacksTestJSON(base.BaseOrchestrationTest):
_interface = 'json'
template = """
HeatTemplateFormatVersion: '2012-12-12'
Description: |
Template which creates some simple resources
Parameters:
trigger:
Type: String
Default: not_yet
Resources:
fluffy:
Type: AWS::AutoScaling::LaunchConfiguration
Metadata:
kittens:
- Tom
- Stinky
Properties:
ImageId: not_used
InstanceType: not_used
UserData:
Fn::Replace:
- variable_a: {Ref: trigger}
variable_b: bee
- |
A == variable_a
B == variable_b
Outputs:
fluffy:
Description: "fluffies irc nick"
Value:
Fn::Replace:
- nick: {Ref: fluffy}
- |
#nick
"""
@classmethod
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
cls.client = cls.orchestration_client
cls.stack_name = data_utils.rand_name('heat')
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
cls.template,
parameters={
'trigger': 'start'
})
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.resource_name = 'fluffy'
cls.resource_type = 'AWS::AutoScaling::LaunchConfiguration'
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
def assert_fields_in_dict(self, obj, *fields):
for field in fields:
self.assertIn(field, obj)
@attr(type='gate')
def test_stack_list(self):
"""Created stack should be on the list of existing stacks."""
resp, stacks = self.client.list_stacks()
self.assertEqual('200', resp['status'])
self.assertIsInstance(stacks, list)
stacks_names = map(lambda stack: stack['stack_name'], stacks)
self.assertIn(self.stack_name, stacks_names)
@attr(type='gate')
def test_stack_show(self):
"""Getting details about created stack should be possible."""
resp, stack = self.client.get_stack(self.stack_name)
self.assertEqual('200', resp['status'])
self.assertIsInstance(stack, dict)
self.assert_fields_in_dict(stack, 'stack_name', 'id', 'links',
'parameters', 'outputs', 'disable_rollback',
'stack_status_reason', 'stack_status',
'creation_time', 'updated_time',
'capabilities', 'notification_topics',
'timeout_mins', 'template_description')
self.assert_fields_in_dict(stack['parameters'], 'AWS::StackId',
'trigger', 'AWS::Region', 'AWS::StackName')
self.assertEqual(True, stack['disable_rollback'],
'disable_rollback should default to True')
self.assertEqual(self.stack_name, stack['stack_name'])
self.assertEqual(self.stack_id, stack['id'])
self.assertEqual('fluffy', stack['outputs'][0]['output_key'])
@attr(type='gate')
def test_suspend_resume_stack(self):
"""suspend and resume a stack."""
resp, suspend_stack = self.client.suspend_stack(self.stack_identifier)
self.assertEqual('200', resp['status'])
self.client.wait_for_stack_status(self.stack_identifier,
'SUSPEND_COMPLETE')
resp, resume_stack = self.client.resume_stack(self.stack_identifier)
self.assertEqual('200', resp['status'])
self.client.wait_for_stack_status(self.stack_identifier,
'RESUME_COMPLETE')
@attr(type='gate')
def test_list_resources(self):
"""Getting list of created resources for the stack should be possible.
"""
resp, resources = self.client.list_resources(self.stack_identifier)
self.assertEqual('200', resp['status'])
self.assertIsInstance(resources, list)
for res in resources:
self.assert_fields_in_dict(res, 'logical_resource_id',
'resource_type', 'resource_status',
'updated_time')
resources_names = map(lambda resource: resource['logical_resource_id'],
resources)
self.assertIn(self.resource_name, resources_names)
resources_types = map(lambda resource: resource['resource_type'],
resources)
self.assertIn(self.resource_type, resources_types)
@attr(type='gate')
def test_show_resource(self):
"""Getting details about created resource should be possible."""
resp, resource = self.client.get_resource(self.stack_identifier,
self.resource_name)
self.assertIsInstance(resource, dict)
self.assert_fields_in_dict(resource, 'resource_name', 'description',
'links', 'logical_resource_id',
'resource_status', 'updated_time',
'required_by', 'resource_status_reason',
'physical_resource_id', 'resource_type')
self.assertEqual(self.resource_name, resource['logical_resource_id'])
self.assertEqual(self.resource_type, resource['resource_type'])
@attr(type='gate')
def test_resource_metadata(self):
"""Getting metadata for created resource should be possible."""
resp, metadata = self.client.show_resource_metadata(
self.stack_identifier,
self.resource_name)
self.assertEqual('200', resp['status'])
self.assertIsInstance(metadata, dict)
self.assertEqual(['Tom', 'Stinky'], metadata.get('kittens', None))
@attr(type='gate')
def test_list_events(self):
"""Getting list of created events for the stack should be possible."""
resp, events = self.client.list_events(self.stack_identifier)
self.assertEqual('200', resp['status'])
self.assertIsInstance(events, list)
for event in events:
self.assert_fields_in_dict(event, 'logical_resource_id', 'id',
'resource_status_reason',
'resource_status', 'event_time')
resource_statuses = map(lambda event: event['resource_status'], events)
self.assertIn('CREATE_IN_PROGRESS', resource_statuses)
self.assertIn('CREATE_COMPLETE', resource_statuses)
@attr(type='gate')
def test_show_event(self):
"""Getting details about existing event should be possible."""
resp, events = self.client.list_resource_events(self.stack_identifier,
self.resource_name)
self.assertNotEqual([], events)
events.sort(key=lambda event: event['event_time'])
event_id = events[0]['id']
resp, event = self.client.show_event(self.stack_identifier,
self.resource_name, event_id)
self.assertEqual('200', resp['status'])
self.assertIsInstance(event, dict)
self.assert_fields_in_dict(event, 'resource_name', 'event_time',
'links', 'logical_resource_id',
'resource_status', 'resource_status_reason',
'physical_resource_id', 'id',
'resource_properties', 'resource_type')
self.assertEqual(self.resource_name, event['resource_name'])
self.assertEqual('state changed', event['resource_status_reason'])
self.assertEqual(self.resource_name, event['logical_resource_id'])
|
{
"content_hash": "f332014ca4fae310a5dde13897c2a4bd",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 42.13157894736842,
"alnum_prop": 0.572891942535915,
"repo_name": "ntymtsiv/tempest",
"id": "11d01f76a53321f0c15a7710a5f8aec2e8125630",
"size": "8578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/orchestration/stacks/test_non_empty_stack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2312198"
},
{
"name": "Shell",
"bytes": "9160"
}
],
"symlink_target": ""
}
|
import abc
from typing import Any, Dict, Union
import numpy as np
import scipy.sparse
from autosklearn.pipeline.components.data_preprocessing.feature_type import (
FeatTypeSplit,
)
class AbstractDataManager:
__metaclass__ = abc.ABCMeta
def __init__(self, name: str):
self._data = dict() # type: Dict
self._info = dict() # type: Dict
self._name = name
@property
def name(self) -> str:
return self._name
@property
def data(self) -> Dict[str, np.ndarray]:
return self._data
@property
def info(self) -> Dict[str, Any]:
return self._info
@property
def feat_type(self) -> Dict[Union[str, int], str]:
return self._feat_type
@feat_type.setter
def feat_type(self, value: Dict[Union[str, int], str]) -> None:
self._feat_type = value
@property
def encoder(self) -> FeatTypeSplit:
return self._encoder
@encoder.setter
def encoder(self, value: FeatTypeSplit) -> FeatTypeSplit:
self._encoder = value
def __repr__(self) -> str:
return "DataManager : " + self.name
def __str__(self) -> str:
val = "DataManager : " + self.name + "\ninfo:\n"
for item in self.info:
val = val + "\t" + item + " = " + str(self.info[item]) + "\n"
val = val + "data:\n"
for subset in self.data:
val = val + "\t%s = %s %s %s\n" % (
subset,
type(self.data[subset]),
str(self.data[subset].shape),
str(self.data[subset].dtype),
)
if isinstance(self.data[subset], scipy.sparse.spmatrix):
val = val + "\tdensity: %f\n" % (
float(len(self.data[subset].data))
/ self.data[subset].shape[0]
/ self.data[subset].shape[1]
)
val = val + "feat_type:\t" + str(self.feat_type) + "\n"
return val
|
{
"content_hash": "d78739ceda211cbccec5689d150fe0ac",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.5314083080040527,
"repo_name": "automl/auto-sklearn",
"id": "0837d59ad09dfd66788d565e1e1b92e479fd82a3",
"size": "1974",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "autosklearn/data/abstract_data_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "950"
},
{
"name": "Makefile",
"bytes": "3513"
},
{
"name": "Python",
"bytes": "2008151"
},
{
"name": "Shell",
"bytes": "4744"
}
],
"symlink_target": ""
}
|
zones = dict()
zones["ZONES HAVE NOT BEEN IMPLEMENTED FOR SONEL"] = set([
"Seriously, don't use this zone."
])
|
{
"content_hash": "dc5a530a25d4441895f64a1398767622",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 58,
"avg_line_length": 28.75,
"alnum_prop": 0.6608695652173913,
"repo_name": "gragas/gpsbro",
"id": "4ba8e951400a93afa16d7c91c4c03f95c5b6a6a3",
"size": "115",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "gpsbro2/gpsbro/sonel/zones.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132"
},
{
"name": "HTML",
"bytes": "1302"
},
{
"name": "Python",
"bytes": "57826"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-JWT'
copyright = u'2014, Matt Wright'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'flask_small'
#html_theme = 'default'
html_theme_options = {
'index_logo': False,
'github_fork': 'mattupstate/flask-jwt'
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'flask-jwtdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'flask-jwt.tex', u'flask-jwt Documentation',
u'Dan Jacob', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
intersphinx_mapping = {'http://docs.python.org/': None,
'http://flask.pocoo.org/docs/': None}
|
{
"content_hash": "697ae6c14f2dff7c80dde1be7c98c2ab",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 84,
"avg_line_length": 32.86842105263158,
"alnum_prop": 0.7055244195356285,
"repo_name": "avilaton/flask-jwt",
"id": "cd41762f12b3de86250ad1bc1cacc370f97ef914",
"size": "6665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29833"
}
],
"symlink_target": ""
}
|
import synapse
from synapse.server import HomeServer
from synapse.config._base import ConfigError
from synapse.config.logger import setup_logging
from synapse.config.homeserver import HomeServerConfig
from synapse.http.site import SynapseSite
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.storage.roommember import RoomMemberStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.storage.engines import create_engine
from synapse.storage import DataStore
from synapse.util.async import sleep
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
from synapse import events
from twisted.internet import reactor, defer
from twisted.web.resource import Resource
from daemonize import Daemonize
import sys
import logging
import gc
logger = logging.getLogger("synapse.app.pusher")
class PusherSlaveStore(
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
SlavedAccountDataStore
):
update_pusher_last_stream_ordering_and_success = (
DataStore.update_pusher_last_stream_ordering_and_success.__func__
)
update_pusher_failing_since = (
DataStore.update_pusher_failing_since.__func__
)
update_pusher_last_stream_ordering = (
DataStore.update_pusher_last_stream_ordering.__func__
)
get_throttle_params_by_room = (
DataStore.get_throttle_params_by_room.__func__
)
set_throttle_params = (
DataStore.set_throttle_params.__func__
)
get_time_of_last_push_action_before = (
DataStore.get_time_of_last_push_action_before.__func__
)
get_profile_displayname = (
DataStore.get_profile_displayname.__func__
)
who_forgot_in_room = (
RoomMemberStore.__dict__["who_forgot_in_room"]
)
class PusherServer(HomeServer):
def get_db_conn(self, run_new_connection=True):
# Any param beginning with cp_ is a parameter for adbapi, and should
# not be passed to the database engine.
db_params = {
k: v for k, v in self.db_config.get("args", {}).items()
if not k.startswith("cp_")
}
db_conn = self.database_engine.module.connect(**db_params)
if run_new_connection:
self.database_engine.on_new_connection(db_conn)
return db_conn
def setup(self):
logger.info("Setting up.")
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def remove_pusher(self, app_id, push_key, user_id):
http_client = self.get_simple_http_client()
replication_url = self.config.worker_replication_url
url = replication_url + "/remove_pushers"
return http_client.post_json_get_json(url, {
"remove": [{
"app_id": app_id,
"push_key": push_key,
"user_id": user_id,
}]
})
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, Resource())
for address in bind_addresses:
reactor.listenTCP(
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
),
interface=address
)
logger.info("Synapse pusher now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
bind_addresses = listener["bind_addresses"]
for address in bind_addresses:
reactor.listenTCP(
listener["port"],
manhole(
username="matrix",
password="rabbithole",
globals={"hs": self},
),
interface=address
)
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@defer.inlineCallbacks
def replicate(self):
http_client = self.get_simple_http_client()
store = self.get_datastore()
replication_url = self.config.worker_replication_url
pusher_pool = self.get_pusherpool()
def stop_pusher(user_id, app_id, pushkey):
key = "%s:%s" % (app_id, pushkey)
pushers_for_user = pusher_pool.pushers.get(user_id, {})
pusher = pushers_for_user.pop(key, None)
if pusher is None:
return
logger.info("Stopping pusher %r / %r", user_id, key)
pusher.on_stop()
def start_pusher(user_id, app_id, pushkey):
key = "%s:%s" % (app_id, pushkey)
logger.info("Starting pusher %r / %r", user_id, key)
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
@defer.inlineCallbacks
def poke_pushers(results):
pushers_rows = set(
map(tuple, results.get("pushers", {}).get("rows", []))
)
deleted_pushers_rows = set(
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
)
for row in sorted(pushers_rows | deleted_pushers_rows):
if row in deleted_pushers_rows:
user_id, app_id, pushkey = row[1:4]
stop_pusher(user_id, app_id, pushkey)
elif row in pushers_rows:
user_id = row[1]
app_id = row[5]
pushkey = row[8]
yield start_pusher(user_id, app_id, pushkey)
stream = results.get("events")
if stream and stream["rows"]:
min_stream_id = stream["rows"][0][0]
max_stream_id = stream["position"]
preserve_fn(pusher_pool.on_new_notifications)(
min_stream_id, max_stream_id
)
stream = results.get("receipts")
if stream and stream["rows"]:
rows = stream["rows"]
affected_room_ids = set(row[1] for row in rows)
min_stream_id = rows[0][0]
max_stream_id = stream["position"]
preserve_fn(pusher_pool.on_new_receipts)(
min_stream_id, max_stream_id, affected_room_ids
)
while True:
try:
args = store.stream_positions()
args["timeout"] = 30000
result = yield http_client.get_json(replication_url, args=args)
yield store.process_replication(result)
poke_pushers(result)
except:
logger.exception("Error replicating from %r", replication_url)
yield sleep(30)
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse pusher", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.pusher"
setup_logging(config.worker_log_config, config.worker_log_file)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
if config.start_pushers:
sys.stderr.write(
"\nThe pushers must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``start_pushers: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.start_pushers = True
database_engine = create_engine(config.database_config)
ps = PusherServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ps.setup()
ps.start_listening(config.worker_listeners)
def run():
with LoggingContext("run"):
logger.info("Running")
change_resource_limit(config.soft_file_limit)
if config.gc_thresholds:
gc.set_threshold(*config.gc_thresholds)
reactor.run()
def start():
ps.replicate()
ps.get_pusherpool().start()
ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)
if config.worker_daemonize:
daemon = Daemonize(
app="synapse-pusher",
pid=config.worker_pid_file,
action=run,
auto_close_fds=False,
verbose=True,
logger=logger,
)
daemon.start()
else:
run()
if __name__ == '__main__':
with LoggingContext("main"):
ps = start(sys.argv[1:])
|
{
"content_hash": "0bbcb39e9baab7cb4813a02eb7c53071",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 81,
"avg_line_length": 33.80546075085324,
"alnum_prop": 0.5811206461383139,
"repo_name": "TribeMedia/synapse",
"id": "073f2c248900b08a96e4c58655c5142634c1b9f8",
"size": "10530",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synapse/app/pusher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4376"
},
{
"name": "HTML",
"bytes": "9046"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31852"
},
{
"name": "Python",
"bytes": "2748398"
},
{
"name": "Shell",
"bytes": "7827"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from models import Bookmarklet, Vote, Category, Comment
admin.site.register(Bookmarklet)
admin.site.register(Vote)
admin.site.register(Category)
admin.site.register(Comment)
|
{
"content_hash": "f89bf95b7a82aa2f6dff28033889570d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 55,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.8309178743961353,
"repo_name": "assertnotnull/bookmarklets",
"id": "161108676ffedbd410def3b24e715941264107a7",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bookmarklets/bookmarkletsapp/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23775"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "33989"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
import requests
headers = {
'Content-Type': 'text/xml;charset=UTF-8',
}
response = requests.get('http://postman-echo.com/get', headers=headers)
|
{
"content_hash": "92ac14aa93fcdd355dab341d187c0fa0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 21.428571428571427,
"alnum_prop": 0.6933333333333334,
"repo_name": "NickCarneiro/curlconverter",
"id": "be1f3fc324de51108b4eeb04b452ec9d17c5df37",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixtures/python/get_with_header_without_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dart",
"bytes": "11466"
},
{
"name": "Elixir",
"bytes": "21751"
},
{
"name": "Go",
"bytes": "5544"
},
{
"name": "JavaScript",
"bytes": "85105"
},
{
"name": "MATLAB",
"bytes": "35550"
},
{
"name": "PHP",
"bytes": "4145"
},
{
"name": "Python",
"bytes": "21395"
},
{
"name": "R",
"bytes": "18920"
},
{
"name": "Rust",
"bytes": "3284"
},
{
"name": "Shell",
"bytes": "129"
}
],
"symlink_target": ""
}
|
from setuptools import setup
requires = [
'Werkzeug==0.8.3',
'Fabric==1.5.1',
]
setup(
name='Finance Web Application',
version='1.0',
description='Website tracking accounts',
author='Greg Reinbach',
author_email='greg@reinbach.com',
url='https://github.com/reinbach/finance',
install_requires=requires,
)
|
{
"content_hash": "f9f4c7c11ec74d5818491ee1c4abe016",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 46,
"avg_line_length": 21.375,
"alnum_prop": 0.6578947368421053,
"repo_name": "reinbach/finance",
"id": "d4d7dd9c925e4b8f608e7dc32de8b4ff26b39cb1",
"size": "364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "537"
},
{
"name": "HTML",
"bytes": "12718"
},
{
"name": "JavaScript",
"bytes": "42149"
},
{
"name": "Python",
"bytes": "94442"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "512"
}
],
"symlink_target": ""
}
|
import argparse
import babel.messages.catalog as catalog
import babel.messages.pofile as pofile
# NOTE: This implementation has been superseded by the pseudo_translate
# management command, and will be removed in Queens (13.0) when run_tests.sh
# is also removed.
def translate(segment):
prefix = u""
# When the id starts with a newline the mo compiler enforces that
# the translated message must also start with a newline. Make
# sure that doesn't get broken when prepending the bracket.
if segment.startswith('\n'):
prefix = u"\n"
orig_size = len(segment)
# Add extra expansion space based on recommendation from
# http://www-01.ibm.com/software/globalization/guidelines/a3.html
if orig_size < 20:
multiplier = 1
elif orig_size < 30:
multiplier = 0.8
elif orig_size < 50:
multiplier = 0.6
elif orig_size < 70:
multiplier = 0.4
else:
multiplier = 0.3
extra_length = int(max(0, (orig_size * multiplier) - 10))
extra_chars = "~" * extra_length
return u"{0}[~{1}~您好яшçあ{2}]".format(prefix, segment, extra_chars)
def main():
# Check arguments
parser = argparse.ArgumentParser()
parser.add_argument('pot_filename', type=argparse.FileType('r'))
parser.add_argument('po_filename', type=argparse.FileType('w'))
parser.add_argument('locale')
args = parser.parse_args()
# read POT file
pot_cat = pofile.read_po(args.pot_filename, ignore_obsolete=True)
# Create the new Catalog
new_cat = catalog.Catalog(locale=args.locale,
last_translator="pseudo.py",
charset="utf-8")
num_plurals = new_cat.num_plurals
# Process messages from template
for msg in pot_cat:
if msg.pluralizable:
msg.string = [translate(u"{}:{}".format(i, msg.id[0]))
for i in range(num_plurals)]
else:
msg.string = translate(msg.id)
new_cat[msg.id] = msg
# Write "translated" PO file
pofile.write_po(args.po_filename, new_cat, ignore_obsolete=True)
if __name__ == '__main__':
main()
|
{
"content_hash": "cdae8711da7a7f745979b5742254dbd4",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 32.26865671641791,
"alnum_prop": 0.6262719703977798,
"repo_name": "kogotko/carburetor",
"id": "4a5ef1a0dfa86e46c21d5320352e13a725aa061f",
"size": "2778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/pseudo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9097503"
},
{
"name": "HTML",
"bytes": "1650202"
},
{
"name": "JavaScript",
"bytes": "4712562"
},
{
"name": "Makefile",
"bytes": "557"
},
{
"name": "Python",
"bytes": "5086985"
},
{
"name": "Shell",
"bytes": "18571"
}
],
"symlink_target": ""
}
|
def _import():
from .classorinterfacetype import ClassOrInterfaceType
from .primitivetype import PrimitiveType
from ..expr.integerliteralexpr import IntegerLiteralExpr
from ..expr.nameexpr import NameExpr
from ..expr.annotationexpr import AnnotationExpr
return locals()
|
{
"content_hash": "36062149edd13dbb60a74f5de12446d6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.7736486486486487,
"repo_name": "plum-umd/java-sketch",
"id": "a59b170e2c48e172bb2d8863adc9fea0f491bc74",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jskparser/ast/type/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "44034"
},
{
"name": "Java",
"bytes": "5042035"
},
{
"name": "Makefile",
"bytes": "215"
},
{
"name": "Perl",
"bytes": "495"
},
{
"name": "Python",
"bytes": "600201"
},
{
"name": "Shell",
"bytes": "46731"
}
],
"symlink_target": ""
}
|
from io import StringIO
from mock import patch
from twisted.python.log import startLoggingWithObserver, removeObserver
from twisted.trial import unittest
from scrapyrt.log import ScrapyrtFileLogObserver, msg
@patch('twisted.python.log.FileLogObserver.emit')
class TestLogObserver(unittest.TestCase):
def setUp(self):
self.file = StringIO()
self.log_observer = ScrapyrtFileLogObserver(self.file)
startLoggingWithObserver(self.log_observer.emit, setStdout=False)
self.event_dict = {'system': 'scrapyrt', 'message': 'blah'}
def tearDown(self):
removeObserver(self.log_observer.emit)
def test_emit_called(self, emit_mock):
self.log_observer.emit(self.event_dict)
self.assertTrue(emit_mock.called)
def test_scrapy_filtering(self, emit_mock):
self.event_dict['system'] = 'scrapy'
self.log_observer.emit(self.event_dict)
self.assertFalse(emit_mock.called)
def test_log_start_messages_filtering(self, emit_mock):
self.event_dict['system'] = 'HTTPChannel'
self.event_dict['message'] = 'Log opened.'
self.log_observer.emit(self.event_dict)
self.assertFalse(emit_mock.called)
self.event_dict['system'] = 'other'
self.log_observer.emit(self.event_dict)
self.assertTrue(emit_mock.called)
def test_unicode_message(self, emit_mock):
original_message = u'Привет, мир!'
msg(original_message)
transformed_message = emit_mock.call_args[0][1]['message'][0]
self.assertEqual(transformed_message, original_message.encode('utf-8'))
|
{
"content_hash": "cad122982357a5d243bdc65acee2c4c4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 35.82222222222222,
"alnum_prop": 0.6873449131513648,
"repo_name": "pawelmhm/scrapyrt",
"id": "149ba0919eafea3ab54f26f0329b0b3f772562b5",
"size": "1645",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_log_observer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "819"
},
{
"name": "HTML",
"bytes": "471"
},
{
"name": "Python",
"bytes": "104615"
}
],
"symlink_target": ""
}
|
import configparser
import logging
import logging.handlers
import optparse
import os
import pwd
import random
import re
import signal
import subprocess
import sys
import time
import urllib.error
import urllib.parse
import urllib.request
def ping(host):
fd = os.open(os.devnull, os.O_RDWR)
try:
ping_cmd = ["ping", "-q", "-c1", host]
ping_ret = subprocess.call(ping_cmd, stdout=fd, stderr=fd, stdin=fd)
return ping_ret == 0
finally:
os.close(fd)
class ConnectionMonitor:
PING_HOSTS = ["www.google.com", "www.yahoo.com",
"www.facebook.com", "www.bing.com",
"www.youtube.com", "www.stackoverflow.com"]
def __init__(self, ping=ping):
self.ping = ping
def connected(self):
random.shuffle(self.PING_HOSTS)
for host in self.PING_HOSTS:
if self.ping(host):
return True
return False
class ExternalIpLookupService:
"""Wrapper around an external IP lookup service. Can be configured to
enforce a minimal delay between each queries."""
# Over-simplified regular expression to match IP address.
ip_regex = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
def __init__(self, name, url, min_delay_between_query=0):
self.name = name
self.url = url
self.min_delay_between_query = min_delay_between_query
self.last_query_time = 0
self.logger = logging.getLogger("ipupdate.ExternalIpLookupService")
def ready(self):
# Check if the elapsed time since the last query is
# greater than the maximal query rate for this service.
delay_since_last_query = int(time.time()) - self.last_query_time
return self.min_delay_between_query < delay_since_last_query
def query(self):
if not self.ready():
return None
self.logger.info("Querying %s." % self.name)
self.last_query_time = int(time.time())
try:
response = urllib.request.urlopen(self.url, None, 30)
content = response.read().decode("utf-8")
ip = self.ip_regex.search(content)
if ip is not None:
ip = ip.group(0)
self.logger.info("%s" % ip)
return ip
except urllib.error.HTTPError as err:
error = "HTTPError (%s) querying %s." % (err.code, self.name)
self.logger.error(error)
return None
except urllib.error.URLError as err:
error = "URLError (%s) querying %s." % (err.reason, self.name)
self.logger.error(error)
return None
self.logger.warning("No IP address in %s response." % self.name)
return None
class ExternalIpLookupServicePool:
"""The ExternalIpLookupServicePool holds many ExternalIpLookupService
instances. This allow the IpWatchDogDaemon to do more queries while,
hopefully, increasing the overall success rate."""
def __init__(self, services):
self.services = services
self.logger = logging.getLogger("ipupdate.ExternalIpLookupServicePool")
def add(self, service):
self.services.append(service)
# Returns true if at least one external IP lookup service is ready to handle
# a request.
def ready(self):
for service in self.services:
if service.ready():
return True
return False
def query(self):
for service in self.services:
ip = service.query()
if ip is not None:
return ip
self.logger.warning("Can't determine the external IP address.")
return None
class DynResponseParser:
"""Handles the parsing of the responses returned by the Dyn DNS API."""
_dyn_error_regex = re.compile(r"^(!?[a-z0-9]+)$")
_host_error_regex = re.compile(r"^([a-z]+) (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$")
def parse(self, response, hostnames):
err = self._parse_dyn_error(response)
statuses = self._parse_parts(response, hostnames) if not err else {}
return err, statuses
def _parse_dyn_error(self, response):
err = self._dyn_error_regex.match(response)
return err.group(0) if err else None
def _parse_parts(self, response, hostnames):
response_parts = response.split('\n')
statuses = {}
if len(response_parts) != len(hostnames):
raise Exception("Reponse/hostnames length mismatch!")
for hostname, part in zip(hostnames, response_parts):
statuses[hostname] = self._parse_part(part)
return statuses
def _parse_part(self, part):
matches = self._host_error_regex.match(part)
if not matches:
raise Exception("Could not parse host status.")
return matches.group(1), matches.group(2)
class DynService:
"""Simple wrapper around urllib to perform GET request against the Dyn DNS
REST API."""
base_url = "https://members.dyndns.org/"
update_url = "https://members.dyndns.org/nic/update"
# The HTTP status code returned by Dyn DNS may be a 500 in case the return
# code is 911 or dnserr. This class overrides urllib's default behavior
# which is to raise an exception when this status code is returned. That
# way, it's actually possible to read the Dyn DNS return code.
class IgnoreError500(urllib.request.BaseHandler):
def http_error_500(self, request, response, code, msg, hdrs):
return response
def __init__(self, username, password, response_parser):
self.logger = logging.getLogger("ipupdate.DynService")
self.response_parser = response_parser
pswd_manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
pswd_manager.add_password(user=username, passwd=password,
realm=None, uri=self.base_url)
auth_handler = urllib.request.HTTPBasicAuthHandler(pswd_manager)
self.opener = urllib.request.build_opener(auth_handler,
self.IgnoreError500)
def send_update(self, ip, hostnames):
params = urllib.parse.urlencode({
"hostname": ",".join(hostnames),
"myip": ip
})
request_url = "%s?%s" % (self.update_url, params)
self.logger.debug("Send request: '%s'" % request_url)
response = self.opener.open(request_url)
response_content = response.read().decode("utf-8").strip()
self.logger.debug("Got response: '%s'" % response_content)
return self.response_parser.parse(response_content, hostnames)
# API datails can be found at http://dyn.com/support/developers/api/
class DynUpdater:
"""Handles the entire external IP updating process from update submission
to response parsing."""
system_errors = {
"badauth": "Bad authorization (username or password).",
"!donator": "The offline setting was set and the user is not a donator.",
"badagent": "The user agent was not sent or HTTP method is not permitted.",
"911": "There is a problem or scheduled maintenance on Dyn DNS.",
"dnserr": "DNS error encountered."
}
ok_codes = ["good", "nochg"]
def __init__(self, hostnames, dns_service):
self.logger = logging.getLogger("ipupdate.DynUpdater")
self.hostnames = hostnames
self.dns_service = dns_service
def update(self, ip):
try:
return self._do_update(ip)
except urllib.error.HTTPError as err:
err_msg = "HTTPError (%s)." % err.code
self.logger.error(err_msg)
except urllib.error.URLError as err:
err_msg = "URLError (%s)." % err.reason
self.logger.error(err_msg)
return False
def _do_update(self, ip):
err, statuses = self.dns_service.send_update(ip, self.hostnames)
if err:
msg = self.system_errors.get(err, "Unknown error code.")
self.logger.error("%s: %s" % (err, msg))
return False
else:
for hostname, status in statuses.items():
self.logger.info("%s %s %s" % (hostname, status[0], status[1]))
return all(map(lambda status:
status[0] in self.ok_codes,
statuses.values()))
class IpWatchDog:
"""The IpWatchDog periodically checks the external ip address by querying
an ExternalIpLookupService. When a change of the external IP address is
detected, an update request is issued using a DNS updater."""
def __init__(self, connection_monitor, ip_lookup_service,
dns_updater, check_interval):
self.logger = logging.getLogger("ipupdate.IpWatchDog")
self.connection_monitor = connection_monitor
self.ip_lookup_service = ip_lookup_service
self.dns_updater = dns_updater
self.check_interval = check_interval
self.current_ip = None
def watch(self):
while True:
self.update()
time.sleep(self.check_interval)
def update(self):
try:
self._do_update()
except Exception:
self.logger.exception("Unhandled exception during update.")
def _do_update(self):
if not self.connection_monitor.connected():
self.logger.warning("No connection, skipping update.")
return
ip = self.ip_lookup_service.query()
if ip is not None and ip != self.current_ip:
self.logger.info("Updating external IP address to %s." % ip)
if self.dns_updater.update(ip):
self.logger.info("Updated external IP address to %s." % ip)
self.current_ip = ip
else:
self.logger.info("Failed to update external IP address.")
def daemonize():
try:
if os.fork() != 0:
os._exit(0)
os.setsid()
if os.fork() != 0:
os._exit(0)
os.chdir("/")
# Always turn off group and other write bit when creating new files.
os.umask(0o022)
# Change current user. Notice that the call order is important since
# after the setuid() call, the effective UID isn't 0 any more while
# calling setgid() requires root privileges.
pwd_entry = pwd.getpwnam("ipupdate")
os.setgid(pwd_entry.pw_gid)
os.setuid(pwd_entry.pw_uid)
except OSError as err:
print("OSError(%s, %s)." % (err.strerror, err.errno), file=sys.stderr)
exit(1)
def configure_logging(options):
msgfmt = "%(asctime)s (%(levelname)s) %(name)s: %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=msgfmt, datefmt=datefmt)
RotatingFileHandler = logging.handlers.RotatingFileHandler
rotating_file_handler = RotatingFileHandler(options.log_file,
maxBytes=options.log_size,
backupCount=options.log_num)
rotating_file_handler.setFormatter(formatter)
levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL
}
logger = logging.getLogger("ipupdate")
logger.setLevel(levels[options.log_level])
logger.addHandler(rotating_file_handler)
if options.daemonize:
fd = os.open(os.devnull, os.O_RDWR)
os.dup2(fd, sys.stdin.fileno())
os.dup2(fd, sys.stdout.fileno())
os.dup2(fd, sys.stderr.fileno())
os.close(fd)
# The init process will send a SIGTERM signal at shutdown.
def sigterm_handler(signum, frame):
logging.getLogger("ipupdate").info("SIGTERM received, shutting down.")
logging.shutdown()
exit(0)
def configure_signals():
signal.signal(signal.SIGTERM, sigterm_handler)
def parse_args():
usage = "usage: %prog [options] CONFIG_FILE"
argparser = optparse.OptionParser(usage=usage)
argparser.add_option("-d", "--daemon",
action="store_true",
default=False,
dest="daemonize",
help="run as daemon")
argparser.add_option("-l",
action="store",
choices=["debug", "info", "warning",
"error", "critical"],
default="info",
dest="log_level",
help="log level (debug, info, warning, error, critical)")
# options contains all the optional arguments
# args contains all the positional arguments
(options, args) = argparser.parse_args()
if len(args) != 1:
argparser.error("no configuration file")
options.config_filename = args[0]
if not os.path.isfile(options.config_filename):
argparser.error("configuration file doesn't exist")
return options
def parse_config(options):
config = configparser.SafeConfigParser({
"check_interval": 60 * 5,
"log_file": "ipupdate.log",
"log_size": 1024 * 128,
"log_num": 5
})
try:
config.read(options.config_filename)
options.log_file = config.get("logging", "log_file")
options.log_size = config.getint("logging", "log_size")
options.log_num = config.getint("logging", "log_num")
options.username = config.get("account", "username")
options.password = config.get("account", "password")
options.check_interval = config.getint("configuration", "check_interval")
options.hosts = config.get("configuration", "hosts").split(",")
except configparser.Error as err:
# Logging isn't enabled yet.
print(err, file=sys.stderr)
exit(1)
return options
def main():
options = parse_args()
parse_config(options)
if options.daemonize:
configure_signals()
daemonize()
# Make sure to configure logging after the process has been daemonized,
# otherwise, the log files will be opened with the wrong permissions.
configure_logging(options)
response_parser = DynResponseParser()
dns_service = DynService(options.username, options.password, response_parser)
dns_updater = DynUpdater(options.hosts, dns_service)
ip_lookup_service_pool = ExternalIpLookupServicePool([
ExternalIpLookupService("ipappspot", "http://ip.appspot.com/", 60),
ExternalIpLookupService("easydns", "http://support.easydns.com/utils/get_ip.php", 360),
ExternalIpLookupService("dyndns", "http://checkip.dyndns.org/", 720),
ExternalIpLookupService("ifconfig", "http://ifconfig.me/ip", 300),
ExternalIpLookupService("icanhazip", "http://icanhazip.com/", 300),
ExternalIpLookupService("dnsomatic", "http://myip.dnsomatic.com/", 300)
])
logging.getLogger("ipupdate").info("Started.")
watchdog = IpWatchDog(ConnectionMonitor(), ip_lookup_service_pool,
dns_updater, options.check_interval)
watchdog.watch()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
{
"content_hash": "0aa05d7816da63fcd9b5106b315e4623",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 95,
"avg_line_length": 34.433408577878104,
"alnum_prop": 0.6075127835321883,
"repo_name": "MathieuTurcotte/ipupdate",
"id": "ce599e44b53b6d1f74cb729d658c0cbc962a12eb",
"size": "15351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipupdate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24104"
}
],
"symlink_target": ""
}
|
import numpy as np
import os
import urllib.request
import gzip
import struct
def download_data(url, force_download=True):
if not os.path.isdir('data'):
os.makedirs('data')
fname = os.path.join('data', url.split("/")[-1])
if force_download or not os.path.exists(fname):
urllib.request.urlretrieve(url, fname)
return fname
def read_data(label_url, image_url):
with gzip.open(download_data(label_url)) as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(download_data(image_url), 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
return (label, image)
|
{
"content_hash": "35571d857ab71fe78c56bbbf8aad9463",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 90,
"avg_line_length": 32.44,
"alnum_prop": 0.654747225647349,
"repo_name": "boofhead/sghnt_example",
"id": "359f13284930f29d102936a01e2ca5876257b709",
"size": "811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "read_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "18494"
},
{
"name": "Python",
"bytes": "14835"
}
],
"symlink_target": ""
}
|
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class InverseSquareRootLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=4000,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("inverse_sqrt", dataclass=InverseSquareRootLRScheduleConfig)
class InverseSquareRootSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = cfg.lr * sqrt(cfg.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, cfg: InverseSquareRootLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with inverse_sqrt."
" Consider --lr-scheduler=fixed instead."
)
warmup_end_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr
# linearly warmup for the first cfg.warmup_updates
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * cfg.warmup_updates**0.5
# initial learning rate
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr
|
{
"content_hash": "9df458a07511fb7adf44c48c641d311c",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 87,
"avg_line_length": 38.125,
"alnum_prop": 0.6616393442622951,
"repo_name": "pytorch/fairseq",
"id": "987c905a23d50342dd7e809e0eddb5a6df2ebe90",
"size": "3228",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fairseq/optim/lr_scheduler/inverse_square_root_schedule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
}
|
__doc__ = """Security Proxy for OGC Services like WPS."""
__author__ = """Carsten Ehbrecht"""
__email__ = 'ehbrecht@dkrz.de'
__version__ = '0.7.0'
|
{
"content_hash": "e387ec07d9f4800fb15d6acd80a50eea",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 57,
"avg_line_length": 36.75,
"alnum_prop": 0.5918367346938775,
"repo_name": "bird-house/twitcher",
"id": "895fe895fb39060f38495190ff492b4005d87980",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitcher/__version__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1280"
},
{
"name": "Jupyter Notebook",
"bytes": "18058"
},
{
"name": "Makefile",
"bytes": "7005"
},
{
"name": "Mako",
"bytes": "492"
},
{
"name": "Python",
"bytes": "131658"
}
],
"symlink_target": ""
}
|
import importlib
import sys
import warnings
from inspect import isabstract
from unittest import TestCase, mock
import pytest
from parameterized import parameterized
from airflow.models.baseoperator import BaseOperator
from tests.deprecated_classes import ALL, RENAMED_ALL
class TestDeprecations(TestCase):
@staticmethod
def assert_warning(msg: str, warnings):
error = f"Text '{msg}' not in warnings"
assert any(msg in str(w) for w in warnings), error
def assert_is_subclass(self, clazz, other):
assert issubclass(clazz, other), f"{clazz} is not subclass of {other}"
def assert_proper_import(self, old_resource, new_resource):
new_path, _, _ = new_resource.rpartition(".")
old_path, _, _ = old_resource.rpartition(".")
with pytest.warns(DeprecationWarning) as warnings:
# Reload to see deprecation warning each time
importlib.reload(importlib.import_module(old_path))
self.assert_warning(new_path, warnings)
def skip_test_with_mssql_in_py38(self, path_a="", path_b=""):
py_38 = sys.version_info >= (3, 8)
if py_38:
if "mssql" in path_a or "mssql" in path_b:
raise self.skipTest("Mssql package not available when Python >= 3.8.")
@staticmethod
def get_class_from_path(path_to_class, parent=False):
"""
:param path_to_class: the path to the class
:param parent: indicates if "path_to_class" arg is super class
"""
path, _, class_name = path_to_class.rpartition(".")
module = importlib.import_module(path)
class_ = getattr(module, class_name)
if isabstract(class_) and not parent:
class_name = f"Mock({class_.__name__})"
attributes = {a: mock.MagicMock() for a in class_.__abstractmethods__}
new_class = type(class_name, (class_,), attributes)
return new_class
return class_
@parameterized.expand(RENAMED_ALL)
def test_is_class_deprecated(self, new_module, old_module):
self.skip_test_with_mssql_in_py38(new_module, old_module)
deprecation_warning_msg = "This class is deprecated."
with pytest.warns(DeprecationWarning, match=deprecation_warning_msg) as warnings:
old_module_class = self.get_class_from_path(old_module)
warnings.clear()
with mock.patch(f"{new_module}.__init__") as init_mock:
init_mock.return_value = None
klass = old_module_class()
if isinstance(klass, BaseOperator):
# In case of operators we are validating that proper stacklevel
# is used (=3)
assert len(warnings) >= 1
# For nicer error reporting from pytest, create a static
# list of filenames
files = [warning.filename for warning in warnings]
assert __file__ in files, old_module
init_mock.assert_called_once()
@parameterized.expand(ALL)
def test_is_subclass(self, parent_class_path, sub_class_path):
self.skip_test_with_mssql_in_py38(parent_class_path, sub_class_path)
with mock.patch(f"{parent_class_path}.__init__"), warnings.catch_warnings(record=True):
parent_class_path = self.get_class_from_path(parent_class_path, parent=True)
sub_class_path = self.get_class_from_path(sub_class_path)
self.assert_is_subclass(sub_class_path, parent_class_path)
@parameterized.expand(ALL)
def test_warning_on_import(self, new_path, old_path):
self.skip_test_with_mssql_in_py38(new_path, old_path)
self.assert_proper_import(old_path, new_path)
def test_no_redirect_to_deprecated_classes(self):
"""
When we have the following items:
new_A, old_B
old_B, old_C
This will tell us to use new_A instead of old_B.
"""
all_classes_by_old = {old: new for new, old in ALL}
for new, old in ALL:
# Using if statement allows us to create a developer-friendly message only when we need it.
# Otherwise, it wouldn't always be possible - KeyError
if new in all_classes_by_old:
raise AssertionError(
f'Deprecation "{old}" to "{new}" is incorrect. '
f'Please use \"{all_classes_by_old[new]}\" instead of "{old}".'
)
|
{
"content_hash": "3f8c0482b9afbd0ec6945b8d900bdd88",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 103,
"avg_line_length": 41.91588785046729,
"alnum_prop": 0.6115942028985507,
"repo_name": "mistercrunch/airflow",
"id": "c519b7a3df97dabe858b48674733d28982d73989",
"size": "5273",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/always/test_deprecations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary import event_multiplexer
def _AddEvents(path):
if not gfile.IsDirectory(path):
gfile.MakeDirs(path)
fpath = os.path.join(path, 'hypothetical.tfevents.out')
with gfile.GFile(fpath, 'w'):
return fpath
def _CreateCleanDirectory(path):
if gfile.IsDirectory(path):
gfile.DeleteRecursively(path)
gfile.MkDir(path)
class _FakeAccumulator(object):
def __init__(self, path):
self._path = path
self.reload_called = False
def Tags(self):
return {event_accumulator.IMAGES: ['im1', 'im2'],
event_accumulator.AUDIO: ['snd1', 'snd2'],
event_accumulator.HISTOGRAMS: ['hst1', 'hst2'],
event_accumulator.COMPRESSED_HISTOGRAMS: ['cmphst1', 'cmphst2'],
event_accumulator.SCALARS: ['sv1', 'sv2']}
def FirstEventTimestamp(self):
return 0
def Scalars(self, tag_name):
if tag_name not in self.Tags()[event_accumulator.SCALARS]:
raise KeyError
return ['%s/%s' % (self._path, tag_name)]
def Histograms(self, tag_name):
if tag_name not in self.Tags()[event_accumulator.HISTOGRAMS]:
raise KeyError
return ['%s/%s' % (self._path, tag_name)]
def CompressedHistograms(self, tag_name):
if tag_name not in self.Tags()[event_accumulator.COMPRESSED_HISTOGRAMS]:
raise KeyError
return ['%s/%s' % (self._path, tag_name)]
def Images(self, tag_name):
if tag_name not in self.Tags()[event_accumulator.IMAGES]:
raise KeyError
return ['%s/%s' % (self._path, tag_name)]
def Audio(self, tag_name):
if tag_name not in self.Tags()[event_accumulator.AUDIO]:
raise KeyError
return ['%s/%s' % (self._path, tag_name)]
def Reload(self):
self.reload_called = True
# pylint: disable=unused-argument
def _GetFakeAccumulator(
path,
size_guidance=None,
compression_bps=None,
purge_orphaned_data=None):
return _FakeAccumulator(path)
# pylint: enable=unused-argument
class EventMultiplexerTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EventMultiplexerTest, self).setUp()
event_accumulator.EventAccumulator = _GetFakeAccumulator
def testEmptyLoader(self):
x = event_multiplexer.EventMultiplexer()
self.assertEqual(x.Runs(), {})
def testRunNamesRespected(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertItemsEqual(sorted(x.Runs().keys()), ['run1', 'run2'])
self.assertEqual(x._GetAccumulator('run1')._path, 'path1')
self.assertEqual(x._GetAccumulator('run2')._path, 'path2')
def testReload(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertFalse(x._GetAccumulator('run1').reload_called)
self.assertFalse(x._GetAccumulator('run2').reload_called)
x.Reload()
self.assertTrue(x._GetAccumulator('run1').reload_called)
self.assertTrue(x._GetAccumulator('run2').reload_called)
def testScalars(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
run1_actual = x.Scalars('run1', 'sv1')
run1_expected = ['path1/sv1']
self.assertEqual(run1_expected, run1_actual)
def testExceptions(self):
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
with self.assertRaises(KeyError):
x.Scalars('sv1', 'xxx')
def testInitialization(self):
x = event_multiplexer.EventMultiplexer()
self.assertEqual(x.Runs(), {})
x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'})
self.assertItemsEqual(x.Runs(), ['run1', 'run2'])
self.assertEqual(x._GetAccumulator('run1')._path, 'path1')
self.assertEqual(x._GetAccumulator('run2')._path, 'path2')
def testAddRunsFromDirectory(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
fakedir = join(tmpdir, 'fake_accumulator_directory')
realdir = join(tmpdir, 'real_accumulator_directory')
self.assertEqual(x.Runs(), {})
x.AddRunsFromDirectory(fakedir)
self.assertEqual(x.Runs(), {}, 'loading fakedir had no effect')
_CreateCleanDirectory(realdir)
x.AddRunsFromDirectory(realdir)
self.assertEqual(x.Runs(), {}, 'loading empty directory had no effect')
path1 = join(realdir, 'path1')
gfile.MkDir(path1)
x.AddRunsFromDirectory(realdir)
self.assertEqual(x.Runs(), {}, 'creating empty subdirectory had no effect')
_AddEvents(path1)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1'], 'loaded run: path1')
loader1 = x._GetAccumulator('path1')
self.assertEqual(loader1._path, path1, 'has the correct path')
path2 = join(realdir, 'path2')
_AddEvents(path2)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1', 'path2'])
self.assertEqual(
x._GetAccumulator('path1'), loader1, 'loader1 not regenerated')
path2_2 = join(path2, 'path2')
_AddEvents(path2_2)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['path1', 'path2', 'path2/path2'])
self.assertEqual(
x._GetAccumulator('path2/path2')._path, path2_2, 'loader2 path correct')
def testAddRunsFromDirectoryThatContainsEvents(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
self.assertEqual(x.Runs(), {})
_AddEvents(realdir)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.'])
subdir = join(realdir, 'subdir')
_AddEvents(subdir)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.', 'subdir'])
def testAddRunsFromDirectoryWithRunNames(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
self.assertEqual(x.Runs(), {})
_AddEvents(realdir)
x.AddRunsFromDirectory(realdir, 'foo')
self.assertItemsEqual(x.Runs(), ['foo/.'])
subdir = join(realdir, 'subdir')
_AddEvents(subdir)
x.AddRunsFromDirectory(realdir, 'foo')
self.assertItemsEqual(x.Runs(), ['foo/.', 'foo/subdir'])
def testAddRunsFromDirectoryWalksTree(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
join = os.path.join
realdir = join(tmpdir, 'event_containing_directory')
_CreateCleanDirectory(realdir)
_AddEvents(realdir)
sub = join(realdir, 'subdirectory')
sub1 = join(sub, '1')
sub2 = join(sub, '2')
sub1_1 = join(sub1, '1')
_AddEvents(sub1)
_AddEvents(sub2)
_AddEvents(sub1_1)
x.AddRunsFromDirectory(realdir)
self.assertItemsEqual(x.Runs(), ['.', 'subdirectory/1', 'subdirectory/2',
'subdirectory/1/1'])
def testAddRunsFromDirectoryThrowsException(self):
x = event_multiplexer.EventMultiplexer()
tmpdir = self.get_temp_dir()
filepath = _AddEvents(tmpdir)
with self.assertRaises(ValueError):
x.AddRunsFromDirectory(filepath)
def testAddRun(self):
x = event_multiplexer.EventMultiplexer()
x.AddRun('run1_path', 'run1')
run1 = x._GetAccumulator('run1')
self.assertEqual(sorted(x.Runs().keys()), ['run1'])
self.assertEqual(run1._path, 'run1_path')
x.AddRun('run1_path', 'run1')
self.assertEqual(run1, x._GetAccumulator('run1'), 'loader not recreated')
x.AddRun('run2_path', 'run1')
new_run1 = x._GetAccumulator('run1')
self.assertEqual(new_run1._path, 'run2_path')
self.assertNotEqual(run1, new_run1)
x.AddRun('runName3')
self.assertItemsEqual(sorted(x.Runs().keys()), ['run1', 'runName3'])
self.assertEqual(x._GetAccumulator('runName3')._path, 'runName3')
def testAddRunMaintainsLoading(self):
x = event_multiplexer.EventMultiplexer()
x.Reload()
x.AddRun('run1')
x.AddRun('run2')
self.assertTrue(x._GetAccumulator('run1').reload_called)
self.assertTrue(x._GetAccumulator('run2').reload_called)
if __name__ == '__main__':
googletest.main()
|
{
"content_hash": "e3b1a726d52861fe91b2431108c39ce6",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 80,
"avg_line_length": 32.28136882129277,
"alnum_prop": 0.676678445229682,
"repo_name": "EvenStrangest/tensorflow",
"id": "bf8851aa52c7640bde50f1666e4677d9806b8deb",
"size": "9180",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/summary/event_multiplexer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156263"
},
{
"name": "C++",
"bytes": "9372687"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "784316"
},
{
"name": "Java",
"bytes": "39229"
},
{
"name": "JavaScript",
"bytes": "10875"
},
{
"name": "Jupyter Notebook",
"bytes": "1533241"
},
{
"name": "Makefile",
"bytes": "11364"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45585"
},
{
"name": "Protocol Buffer",
"bytes": "112557"
},
{
"name": "Python",
"bytes": "6949434"
},
{
"name": "Shell",
"bytes": "196466"
},
{
"name": "TypeScript",
"bytes": "411503"
}
],
"symlink_target": ""
}
|
import pytz
from datetime import datetime
from mock import mock, patch
import sys
from .base import TestCase
from django.conf import settings
from graphite.render.datalib import TimeSeries, fetchData, _merge_results, prefetchData
from graphite.util import timebounds
from six.moves import range
class TimeSeriesTest(TestCase):
def test_TimeSeries_init_no_args(self):
if sys.version_info[0] >= 3:
msg = '__init__\(\) missing 5 required positional arguments'
else:
msg = '__init__\(\) takes at least 6 arguments \(1 given\)'
with self.assertRaisesRegexp(TypeError, msg):
TimeSeries()
def test_TimeSeries_init_string_values(self):
series = TimeSeries("collectd.test-db.load.value", 0, 2, 1, "ab")
expected = TimeSeries("collectd.test-db.load.value", 0, 2, 1, ["a","b"])
self.assertEqual(series, expected)
def test_TimeSeries_init_tag_parse(self):
series = TimeSeries("collectd.test-db.load.value;tag=value", 0, 2, 1, [1, 2])
self.assertEqual(series.tags, {'name': 'collectd.test-db.load.value', 'tag': 'value'})
def test_TimeSeries_init_tag_parse_fail(self):
series = TimeSeries("collectd.test-db.load.value;", 0, 2, 1, [1, 2])
self.assertEqual(series.tags, {'name': 'collectd.test-db.load.value;'})
def test_TimeSeries_equal_list(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
with self.assertRaises(AssertionError):
self.assertEqual(values, series)
def test_TimeSeries_equal_list_color(self):
values = list(range(0,100))
series1 = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
series1.color = 'white'
series2 = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
series2.color = 'white'
self.assertEqual(series1, series2)
def test_TimeSeries_equal_list_color_bad(self):
values = list(range(0,100))
series1 = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
series2 = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
series2.color = 'white'
with self.assertRaises(AssertionError):
self.assertEqual(series1, series2)
def test_TimeSeries_equal_list_color_bad2(self):
values = list(range(0,100))
series1 = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
series2 = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
series1.color = 'white'
with self.assertRaises(AssertionError):
self.assertEqual(series1, series2)
def test_TimeSeries_getInfo(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
self.assertEqual(series.getInfo(), {
'name': 'collectd.test-db.load.value',
'values': values,
'start': 0,
'step': 1,
'end': len(values),
'pathExpression': 'collectd.test-db.load.value',
'valuesPerPoint': 1,
'consolidationFunc': 'average',
'xFilesFactor': 0,
})
def test_TimeSeries_consolidate(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, len(values)/2, 1, values)
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
def test_TimeSeries_iterate(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, len(values), 1, values)
for i, val in enumerate(series):
self.assertEqual(val, values[i])
def test_TimeSeries_iterate_valuesPerPoint_2_none_values(self):
values = [None, None, None, None, None]
series = TimeSeries("collectd.test-db.load.value", 0, len(values)/2, 1, values)
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, [None, None, None])
values = [None, None, None, None, None, 1, 2, 3, 4]
series = TimeSeries("collectd.test-db.load.value", 0, len(values)/2, 1, values, xFilesFactor=0.1)
self.assertEqual(series.valuesPerPoint, 1)
self.assertEqual(series.xFilesFactor, 0.1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, [None, None, 1, 2.5, 4])
self.assertEqual(list(series), list(expected))
series.xFilesFactor = 0.5
self.assertEqual(list(series), list(expected))
series.xFilesFactor = 0.500001
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, [None, None, None, 2.5, None])
self.assertEqual(list(series), list(expected))
series.xFilesFactor = 1
self.assertEqual(list(series), list(expected))
def test_TimeSeries_iterate_valuesPerPoint_2_avg(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, len(values)/2, 1, values)
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, [0.5, 2.5, 4.5, 6.5, 8.5, 10.5, 12.5, 14.5, 16.5, 18.5, 20.5, 22.5, 24.5, 26.5, 28.5, 30.5, 32.5, 34.5, 36.5, 38.5, 40.5, 42.5, 44.5, 46.5, 48.5, 50.5, 52.5, 54.5, 56.5, 58.5, 60.5, 62.5, 64.5, 66.5, 68.5, 70.5, 72.5, 74.5, 76.5, 78.5, 80.5, 82.5, 84.5, 86.5, 88.5, 90.5, 92.5, 94.5, 96.5, 98.5])
self.assertEqual(list(series), list(expected))
series.consolidate(3)
self.assertEqual(series.valuesPerPoint, 3)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, map(float, list(range(1, 100, 3)) + [99]))
self.assertEqual(list(series), list(expected))
def test_TimeSeries_iterate_valuesPerPoint_2_sum(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, 5, 1, values, consolidate='sum')
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(1,200,4)))
self.assertEqual(list(series), list(expected))
series.consolidate(3)
self.assertEqual(series.valuesPerPoint, 3)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(3,300,9)) + [99])
self.assertEqual(list(series), list(expected))
series.xFilesFactor = 0.4
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(3,300,9)) + [None])
self.assertEqual(list(series), list(expected))
def test_TimeSeries_iterate_valuesPerPoint_2_max(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, 5, 1, values, consolidate='max')
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(1,100,2)))
self.assertEqual(list(series), list(expected))
series.consolidate(3)
self.assertEqual(series.valuesPerPoint, 3)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(2,100,3)) + [99])
self.assertEqual(list(series), list(expected))
def test_TimeSeries_iterate_valuesPerPoint_2_min(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, 5, 1, values, consolidate='min')
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(0,100,2)))
self.assertEqual(list(series), list(expected))
series.consolidate(3)
self.assertEqual(series.valuesPerPoint, 3)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(0,100,3)))
self.assertEqual(list(series), list(expected))
def test_TimeSeries_iterate_valuesPerPoint_2_first(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, 5, 1, values, consolidate='first')
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(0,100,2)))
self.assertEqual(list(series), list(expected))
series.consolidate(3)
self.assertEqual(series.valuesPerPoint, 3)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(0,100,3)))
self.assertEqual(list(series), list(expected))
def test_TimeSeries_iterate_valuesPerPoint_2_last(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, 5, 1, values, consolidate='last')
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(1,100,2)))
self.assertEqual(list(series), list(expected))
series.consolidate(3)
self.assertEqual(series.valuesPerPoint, 3)
expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(2,100,3)) + [99])
self.assertEqual(list(series), list(expected))
def test_TimeSeries_iterate_valuesPerPoint_2_invalid(self):
values = list(range(0,100))
series = TimeSeries("collectd.test-db.load.value", 0, 5, 1, values, consolidate='bogus')
self.assertEqual(series.valuesPerPoint, 1)
series.consolidate(2)
self.assertEqual(series.valuesPerPoint, 2)
with self.assertRaisesRegexp(Exception, "Invalid consolidation function: 'bogus'"):
result = list(series)
class DatalibFunctionTest(TestCase):
def _build_requestContext(self, startTime=datetime(1970, 1, 1, 0, 0, 0, 0, pytz.timezone(settings.TIME_ZONE)), endTime=datetime(1970, 1, 1, 0, 59, 0, 0, pytz.timezone(settings.TIME_ZONE)), data=[], tzinfo=pytz.utc):
"""
Helper method to create request contexts
Args:
startTime: datetime
endTime: datetime
data: list
Returns:
"""
return {
'template': {},
'args': ({}, {}),
'startTime': startTime,
'endTime': endTime,
'localOnly': False,
'data': data,
'tzinfo': tzinfo
}
def test__merge_results(self):
pathExpr = 'collectd.test-db.load.value'
startTime=datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE))
endTime=datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE))
timeInfo = [startTime, endTime, 60]
result_queue = [
[pathExpr, [timeInfo, [0,1,2,3,4,None,None,None,None,None]]],
[pathExpr, [timeInfo, [None,None,None,None,None,5,6,7,8,9]]],
[pathExpr, [timeInfo, [None,None,None,None,None,None,None,7,8,9]]],
[pathExpr, [timeInfo, [0,1,2,3,4,None,None,7,8,9]]]
]
seriesList = {}
requestContext = self._build_requestContext(startTime, endTime)
results = _merge_results(pathExpr, startTime, endTime, result_queue, seriesList, requestContext)
expectedResults = [
TimeSeries("collectd.test-db.load.value", startTime, endTime, 60, [0,1,2,3,4,5,6,7,8,9]),
]
self.assertEqual(results, expectedResults)
@mock.patch('graphite.logger.log.debug')
def test__merge_results_no_results(self, log_debug):
pathExpr = 'collectd.test-db.load.value'
startTime=datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE))
endTime=datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE))
timeInfo = [startTime, endTime, 60]
result_queue = [
[pathExpr, None],
]
seriesList = {}
requestContext = self._build_requestContext(startTime, endTime)
results = _merge_results(pathExpr, startTime, endTime, result_queue, seriesList, requestContext)
expectedResults = []
self.assertEqual(results, expectedResults)
log_debug.assert_called_with("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (pathExpr, startTime, endTime))
@mock.patch('graphite.logger.log.exception')
def test__merge_results_bad_results(self, log_exception):
pathExpr = 'collectd.test-db.load.value'
startTime=datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE))
endTime=datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE))
timeInfo = [startTime, endTime, 60]
result_queue = [
[pathExpr, ['invalid input']],
]
seriesList = {}
requestContext = self._build_requestContext(startTime, endTime)
with self.assertRaises(Exception):
_merge_results(pathExpr, startTime, endTime, result_queue, seriesList, requestContext)
log_exception.assert_called_with("could not parse timeInfo/values from metric '%s': %s" % (pathExpr, 'need more than 1 value to unpack'))
def test__merge_results_multiple_series(self):
pathExpr = 'collectd.test-db.load.value'
startTime=datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE))
endTime=datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE))
timeInfo = [startTime, endTime, 60]
result_queue = [
[pathExpr, [timeInfo, [0,1,2,3,4,None,None,None,None,None]]],
[pathExpr, [timeInfo, [None,None,None,None,None,5,6,7,8,9]]],
[pathExpr, [timeInfo, [None,None,None,None,None,None,None,7,8,9]]],
[pathExpr, [timeInfo, [0,1,2,3,4,None,None,7,8,9]]]
]
seriesList = {
'collectd.test-db.cpu.value': TimeSeries("collectd.test-db.cpu.value", startTime, endTime, 60, [0,1,2,3,4,5,6,7,8,9])
}
requestContext = self._build_requestContext(startTime, endTime)
results = _merge_results(pathExpr, startTime, endTime, result_queue, seriesList, requestContext)
expectedResults = [
TimeSeries("collectd.test-db.cpu.value", startTime, endTime, 60, [0,1,2,3,4,5,6,7,8,9]),
TimeSeries("collectd.test-db.load.value", startTime, endTime, 60, [0,1,2,3,4,5,6,7,8,9]),
]
self.assertEqual(results, expectedResults)
def test__merge_results_no_remote_store_merge_results(self):
pathExpr = 'collectd.test-db.load.value'
startTime=datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE))
endTime=datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE))
timeInfo = [startTime, endTime, 60]
result_queue = [
[pathExpr, [timeInfo, [0,1,2,3,4,None,None,None,None,None]]],
[pathExpr, [timeInfo, [None,None,None,3,4,5,6,7,8,9]]],
[pathExpr, [timeInfo, [None,None,None,None,None,None,None,7,8,9]]]
]
seriesList = {}
requestContext = self._build_requestContext(startTime, endTime)
with self.settings(REMOTE_STORE_MERGE_RESULTS=False):
results = _merge_results(pathExpr, startTime, endTime, result_queue, seriesList, requestContext)
expectedResults = [
TimeSeries("collectd.test-db.load.value", startTime, endTime, 60, [None,None,None,3,4,5,6,7,8,9]),
]
self.assertEqual(results, expectedResults)
def test_fetchData(self):
pathExpr = 'collectd.test-db.load.value'
startTime=datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE))
endTime=datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE))
requestContext = self._build_requestContext(startTime, endTime)
requestContext['now'] = endTime
requestContext['forwardHeaders'] = None
results = fetchData(requestContext, pathExpr)
expectedResults = []
self.assertEqual(results, expectedResults)
def test_prefetchData(self):
# STORE.finders has no non-local finders
results = prefetchData({}, [])
self.assertEqual(results, None)
# STORE.fetch returns list with None value
with patch('graphite.render.datalib.STORE.fetch', lambda *_: [None]):
startTime = datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE))
endTime = datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE))
now = datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE))
requestContext = {
'startTime': startTime,
'endTime': endTime,
'now': now,
'prefetched': {
'somekey': 'somedata',
},
}
prefetchData(requestContext, ['test'])
self.assertEqual(requestContext['prefetched'][timebounds(requestContext)], {})
|
{
"content_hash": "b4e883e4ebf47c6694e6c698569920ee",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 364,
"avg_line_length": 43.7005076142132,
"alnum_prop": 0.636194679986061,
"repo_name": "DanCech/graphite-web",
"id": "df1767401e4915b52760263923c8faaf1988dc23",
"size": "17218",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webapp/tests/test_render_datalib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "150191"
},
{
"name": "HTML",
"bytes": "21521"
},
{
"name": "JavaScript",
"bytes": "1690375"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "1234658"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "1113"
}
],
"symlink_target": ""
}
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
#from django.test import LiveServerTestCase
#from django.test import TestCase
from django.utils import unittest
from selenium.webdriver.firefox.webdriver import WebDriver
from models import *
import requests
from requests.auth import HTTPBasicAuth
import os
from django.conf import settings
from django.contrib.auth.models import User
import simplejson
from django.test.client import RequestFactory
from views import (
ProjectViewSet,
MachineViewSet,
VLanViewSet,
IfaceViewSet,
ExcludedIPRangeViewSet)
from rest_framework.reverse import reverse
from django.test import TestCase
from django.db import IntegrityError
from django.core.exceptions import ImproperlyConfigured
import ipaddr
def add_iface_for_machine(machine, vlan):
return machine.add_iface(vlan)
def add_project(name, code):
p = Project(name=name, code=code)
p.save()
return p
def add_machine(project, dns_zone, env, role, os, mtype, virtual=True, dmz_located=False):
m = Machine(
hostname="",
dns_zone=dns_zone,
environment=env,
role=role,
operating_system=os,
virtual=virtual,
project=project,
dmz_located=dmz_located,
mtype=mtype,
)
m.save()
return m
def create_iface_creation_request(user, machine, vlan, ip=None):
requestfactory = RequestFactory()
data = simplejson.dumps({
"vlan": reverse("vlan-detail", args=[vlan.pk, ]),
"ip": ip,
"machines": [reverse("machine-detail", args=[machine["id"], ], ), ],
})
request = requestfactory.post(
reverse("iface-list"),
data=data,
content_type="application/json", )
request.user = user
request.session = {}
print data
return request
def create_machine_creation_request(user, project, dns_zone, env, role, os, hostname=None, mtype=None):
requestfactory = RequestFactory()
project = reverse(
"project-detail",
args=[project.pk, ]) if project is not None else ""
data = simplejson.dumps({
"project": project,
"role": reverse("role-detail", args=[role.pk, ]),
"operating_system": reverse(
"operatingsystem-detail",
args=[os.pk, ]),
"environment": reverse(
"environment-detail",
args=[env.pk, ]),
"dns_zone": reverse("dnszone-detail", args=[dns_zone.pk, ]),
"mtype": reverse("mtype-detail", args=[mtype.pk, ]),
"hostname": hostname,
})
request = requestfactory.post(
reverse("machine-list"),
data=data,
content_type="application/json", )
request.user = user
request.session = {}
return request
def add_vlan(name, tag, ip, gw, mask=24):
v = VLan(
name=name,
tag=tag,
ip=ip,
gw=gw,
mask=mask,
)
v.save()
return v
def add_os(code, descr):
os = OperatingSystem(
code=code,
description=descr,
)
os.save()
return os
def add_role(code, descr):
r = Role(
code=code,
description=descr,
)
r.save()
return r
def add_env(code, descr):
e = Environment(
code=code,
description=descr
)
e.save()
return e
def add_dnszone(name):
dz = DNSZone(name=name)
dz.save()
return dz
def add_mtype(name, auto_name, has_serial):
mt = MType(name=name, auto_name=auto_name, has_serial=has_serial)
mt.save()
return mt
def add_superuser(name, password):
u = User(username=name, password=password, is_superuser=True)
u.save()
return u
def create_excluded_ip_range_request(user, first, last, vlan):
requestfactory = RequestFactory()
data = simplejson.dumps({
"vlan": reverse("vlan-detail", args=[vlan.pk, ]),
"first": first,
"last": last,
})
request = requestfactory.post(
reverse("excludediprange-list"),
data=data,
content_type="application/json", )
request.user = user
request.session = {}
print data
return request
class MachineTest(TestCase):
def setUp(self, ):
self.factory = RequestFactory()
self.z_ib = add_dnszone(".ib")
self.env_pro = add_env("PRO", "production")
self.env_pre = add_env("PRE", "preproduction")
self.env_des = add_env("DES", "development")
self.role_bd = add_role("BD", "base de datos")
self.role_bd.needs_backup_vlan = True
self.role_bd.save()
self.role_sa = add_role("SA", "Servidor aplicaciones")
self.role_sw = add_role("SW", "Servidor Web")
self.role_ut = add_role("UT", "Utils server")
self.role_ce = add_role("CE", "Conmutador Electronico")
self.role_bal = add_role("BL", "Load Balancer")
self.os_lin = add_os("L", "GNU/Linux")
self.os_win = add_os("W", "Windows")
self.mtype_server = add_mtype(
"server",
auto_name=True,
has_serial=True)
self.mtype_router = add_mtype(
"router",
auto_name=False,
has_serial=False)
self.mtype_server_standalone = add_mtype(
"server_standalone",
auto_name=True,
has_serial=False)
self.vlan_man1 = add_vlan(
"MAN1",
200,
"172.21.200.0",
"172.21.200.1",
23,
)
self.vlan_patio = add_vlan(
"PATIO",
229,
"172.21.228.0",
"172.21.228.1",
23,
)
self.project_ibcom = add_project("IBC", "IBC")
self.project_crm = add_project("CRM", "CRM")
try:
self.user = User.objects.get(username="raton")
except:
self.user = add_superuser("raton", "r")
def test_find_ip_exclusion(self, ):
ExcludedIPRange.objects.all().delete()
eir = ExcludedIPRange(
first="172.21.229.1",
last="172.21.229.10",
vlan=self.vlan_patio,
)
eir.save()
self.assertEquals(
Iface.excluded_in_ranges("172.21.229.5"),
[eir, ],
"Not properly finding Excluded IP Ranges for IP 172.21.229.5, should: %s" % eir,
)
eir1 = ExcludedIPRange(
first="172.21.229.8",
last="172.21.229.12",
vlan=self.vlan_patio,
)
eir1.save()
self.assertEquals(
Iface.excluded_in_ranges("172.21.229.9"),
[eir, eir1, ],
"Not properly finding Excluded IP Ranges for IP 172.21.229.9, should: %s" % [eir, eir1, ],
)
self.assertEquals(
Iface.excluded_in_ranges("172.21.210.9"),
[],
"Not properly finding Excluded IP Ranges for IP 172.21.210.9, should None" % [eir, eir1, ],
)
def test_excluded_ip_ranges(self, ):
#create an ip range who's first IP is not suitable for the assigned vlan
request = create_excluded_ip_range_request(
self.user,
"172.21.200.10",
"172.21.229.10",
self.vlan_patio)
func = ExcludedIPRangeViewSet.as_view({"post": "create"})
self.assertRaises(
ipaddr.AddressValueError,
func, request,
"creating an excluded ip range, not properly checking if first ip is valid for assigned vlan")
#create an ip range who's second IP is not suitable for the assigned vlan
request = create_excluded_ip_range_request(
self.user,
"172.21.229.10",
"172.21.200.10",
self.vlan_patio)
self.assertRaises(
ipaddr.AddressValueError,
func, request,
"creating an excluded ip range, not properly checking if second ip is valid for assigned vlan")
#create an ip range correctly for vlan addressing with one IP
request = create_excluded_ip_range_request(
self.user,
"172.21.228.10",
"172.21.228.10",
self.vlan_patio)
response = (ExcludedIPRangeViewSet.as_view({"post": "create"})(request)).render()
eir = simplejson.loads(response.content)
self.assertEquals(
201,
response.status_code,
"Is not properly creating excluded_ip_ranges (single IP)")
#create an ip range correctly for vlan addressing with more than one IP
request = create_excluded_ip_range_request(
self.user,
"172.21.228.1",
"172.21.228.8",
self.vlan_patio)
response = (ExcludedIPRangeViewSet.as_view({"post": "create"})(request)).render()
eir = simplejson.loads(response.content)
self.assertEquals(
201,
response.status_code,
"Is not properly creating excluded_ip_ranges (more than one IP)")
#does it respect excluded ranges at iface creation??
request = create_machine_creation_request(
self.user,
None,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
mtype=self.mtype_server)
machine = simplejson.loads(
(MachineViewSet.as_view({"post": "create"})(request)).render().content)
request = create_iface_creation_request(
self.user,
machine,
self.vlan_patio)
print request
response = IfaceViewSet.as_view({"post": "create"})(request)
iface = simplejson.loads(response.render().content)
print iface
self.assertEquals(
iface["ip"],
"172.21.228.9",
"Creating an iface is not looking at created excluded_ip_ranges")
request = create_iface_creation_request(
self.user,
machine,
self.vlan_patio)
response = IfaceViewSet.as_view({"post": "create"})(request)
iface = simplejson.loads(response.render().content)
print iface
self.assertEquals(
iface["ip"],
"172.21.228.11",
"Creating an iface is not looking at created excluded_ip_ranges")
def off_test_many_machines_for_one_iface(self, ):
#TODO write test_many_machines_for_one_iface
request = create_machine_creation_request(
self.user,
None,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
mtype=self.mtype_server)
machine1 = simplejson.loads(
(MachineViewSet.as_view({"post": "create"})(request)).render().content)
request = create_machine_creation_request(
self.user,
None,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
mtype=self.mtype_server)
machine2 = simplejson.loads(
(MachineViewSet.as_view({"post": "create"})(request)).render().content)
request = create_iface_creation_request(
self.user,
machine1,
self.vlan_man1)
response = IfaceViewSet.as_view({"post": "create"})(request)
iface = simplejson.loads(response.render().content)
requestfactory = RequestFactory()
data = simplejson.dumps({
"machine": reverse("iface-detail", args=[machine1["id"], ]),
})
request = requestfactory.post(
reverse("iface-list"),
data=data,
content_type="application/json", )
request.user = user
request.session = {}
print data
def test_interface_rest(self, ):
#regular iface creation with IP
request = create_machine_creation_request(
self.user,
None,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
mtype=self.mtype_server)
machine = simplejson.loads(
(MachineViewSet.as_view({"post": "create"})(request)).render().content)
request = create_iface_creation_request(
self.user,
machine,
self.vlan_man1,
"6.6.6.6",)
response = IfaceViewSet.as_view({"post": "create"})(request)
response.render()
self.assertEquals(
201,
response.status_code,
"Not properly creating iface. (should: 201; does: %s) %s" %
(response.status_code, response.content))
self.assertEquals(
simplejson.loads(response.content)["ip"],
"6.6.6.6",
"Not properly creating ifaces when IP is assigned by user")
# creating the same vlan for the same machine, should create it
request = create_iface_creation_request(
self.user,
machine,
self.vlan_man1)
response = IfaceViewSet.as_view({"post": "create"})(request)
self.assertEquals(
201,
response.status_code,
"Not properly creating second iface on same machine and vlan. (should: 201; does: %s): %s" %
(response.status_code, response.render().content))
#regular iface creation without IP
request = create_iface_creation_request(
self.user,
machine,
self.vlan_man1)
response = IfaceViewSet.as_view({"post": "create"})(request)
response.render()
self.assertEquals(
201,
response.status_code,
"Not properly creating iface. (should: 201; does: %s) %s" %
(response.status_code, response.content))
def test_conflicting_ip(self, ):
Iface.objects.all().delete()
ExcludedIPRange.objects.filter(vlan=self.vlan_man1).delete()
ConflictingIP(ip="172.21.200.1").save()
ip = self.vlan_man1.get_ip()
self.assertEquals(
ip,
"172.21.200.2",
"Not properly assigning an IP when there is a conflicting one. (should: 172.21.200.2, does: %s)" % ip
)
def test_find_vlan_for_ip(self, ):
VLan.objects.all().delete()
vlan_servicio = add_vlan(
"SERVICIO",
100,
"172.21.100.0",
"172.21.100.1",
24,
)
vlan = Iface.find_vlan("192.168.1.1")
self.assertIsNone(
vlan,
"Should return None because no 192.168.1.1 does not belong to any vlan, returned: %s" % vlan
)
self.assertEqual(
vlan_servicio,
Iface.find_vlan("172.21.100.3"),
"Should return %s as 172.21.100.3 belongs to it" % vlan_servicio
)
def test_vlan_decision(self, ):
#cleaning ifaces *******************************
Iface.objects.all().delete()
vlan_servicio = add_vlan(
"SERVICIO",
100,
"172.21.100.0",
"172.21.100.1",
30,
)
vlan_dmz = add_vlan(
"DMZ",
101,
"172.21.101.0",
"172.21.101.1",
30,
)
vlan_management = add_vlan(
"MNG",
102,
"172.21.102.0",
"172.21.102.1",
30,
)
vlan_management.management_purpose = True
vlan_management.save()
print vlan_management.info
vlan_project = add_vlan(
"PROJECT",
103,
"172.21.103.0",
"172.21.103.1",
30,
)
vlan_backup_prod = add_vlan(
"backup_prod",
104,
"172.21.104.0",
"172.21.104.1",
30,
)
vlan_backup_des = add_vlan(
"backup_des",
105,
"172.21.105.0",
"172.21.105.1",
30,
)
vlan_pre = add_vlan(
"pre",
106,
"172.21.106.0",
"172.21.106.1",
30,)
self.env_des.backup_vlans.add(vlan_backup_des)
self.env_pre.service_vlans.add(vlan_pre)
self.env_pre.backup_vlans.add(vlan_backup_des)
self.env_pro.backup_vlans.add(vlan_backup_prod)
self.env_pro.service_vlans.add(vlan_servicio)
project = add_project('Federation Project', 'fed')
project.service_vlans.add(vlan_project)
project.dmz = vlan_dmz
project.save()
#creating vlan config
# project machine
# campus network
# production
# role BD
# should assign vlans: vlan_project, vlan_backup_prod, vlan_management
m1 = add_machine(
project,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
self.mtype_server,
True,
False)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,
)
vc.save()
print m1
print vc.vlans.all().order_by('name')
self.assertEquals(
[vlan_management, vlan_project, vlan_backup_prod, ],
[x for x in vc.vlans.all().order_by('name')],
"Should have vlan_backup_prod, vlan_management, vlan_project")
for v in vc.vlans.all():
i = Iface(vlan=v)
i.save()
i.machines.add(m1)
#creating second vlan config
# project machine
# campus network
# production
# role BD
# should assign vlans: vlan_project, vlan_backup_prod, vlan_management
m1 = add_machine(
project,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
self.mtype_server,
True,
False)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,
)
vc.save()
self.assertEquals(
[vlan_management, vlan_project, vlan_backup_prod, ],
[x for x in vc.vlans.all().order_by('name')],
"Should have vlan_backup_prod, vlan_management, vlan_project")
for v in vc.vlans.all():
i = Iface(vlan=v)
i.save()
i.machines.add(m1)
#creating third vlan config
# project machine
# campus network
# production
# role BD
# should not find free IPs
m1 = add_machine(
project,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
self.mtype_server,
True,
False)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,
)
func = vc.save
self.assertRaises(
VLan.NoFreeIPError,
func,
"Should raise NoFeeIPError as there are no free IPs"
)
#cleaning ifaces *******************************
Iface.objects.all().delete()
#creating vlan config
# NON project machine
# campus network
# production
# role SA(no backup needed)
# should assign vlans: vlan_servicio, vlan_management
m1 = add_machine(
None,
self.z_ib,
self.env_pro,
self.role_sa,
self.os_lin,
self.mtype_server,
True,
False)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,
)
vc.save()
self.assertEquals(
[vlan_management, vlan_servicio, ],
[x for x in vc.vlans.all().order_by('name')],
"Should have vlan_servicio and vlan_management")
Iface(vlan=vlan_servicio).save()
Iface(vlan=vlan_servicio).save()
#creating vlan config
# NON project machine
# campus network
# production
# role SA(no backup needed)
# should not find free IPs for vlan_servicio
m1 = add_machine(
None,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
self.mtype_server,
True,
False)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,)
func = vc.save
self.assertRaises(
VLan.NoFreeIPError,
func,
"It should find free IPs for vlan_servicio, but it did")
#cleaning ifaces *******************************
Iface.objects.all().delete()
Iface(vlan=vlan_backup_prod).save()
Iface(vlan=vlan_backup_prod).save()
#creating vlan config
# NON project machine
# campus network
# production
# role SA(no backup needed)
# should not find free IPs for vlan_backup
m1 = add_machine(
None,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
self.mtype_server,
True,
False)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,)
func = vc.save
self.assertRaises(
VLan.NoFreeIPError,
func,
"It should find free IPs for vlan_backup_prod, but it did")
#cleaning ifaces *******************************
Iface.objects.all().delete()
#creating vlan config
# NON project machine
# campus network
# pre
# role SA(no backup needed)
# should not find free IPs for vlan_backup
m1 = add_machine(
None,
self.z_ib,
self.env_pre,
self.role_sa,
self.os_lin,
self.mtype_server,
True,
False)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,)
vc.save()
self.assertEquals(
2,
vc.vlans.all().count(),
"Should onkly have 2 vlans, as role needs no backup")
self.assertEquals(
[vlan_management, vlan_pre, ],
[x for x in vc.vlans.all().order_by('name')],
"Should have vlan_pre and vlan_management")
#creating vlan config
# project machine
# DMZ network
# pro
# role SA(no backup needed)
# should assign [vlan_management, vlan_dmz]
m1 = add_machine(
project,
self.z_ib,
self.env_pro,
self.role_sa,
self.os_lin,
self.mtype_server,
True,
True)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,)
vc.save()
self.assertEquals(
2,
vc.vlans.all().count(),
"Should onkly have 2 vlans, as role needs no backup")
self.assertEquals(
[vlan_dmz, vlan_management, ],
[x for x in vc.vlans.all().order_by('name')],
"Should have vlan_dmz and vlan_management; does: %s" % vc)
#creating vlan config
# NON project machine
# DMZ network
# pro
# role SA(no backup needed)
# should raise ImproperConfigured Exception because system
# can't find dmz for non-project machine
m1 = add_machine(
None,
self.z_ib,
self.env_pro,
self.role_sa,
self.os_lin,
self.mtype_server,
True,
True)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,)
func = vc.save
self.assertRaises(
ImproperlyConfigured,
func,
"Should raise ImproperlyConfigured because system can't allocate dmz for non-project machine")
#creating vlan config
# project machine
# DMZ network
# pro
# role BD
# should [vlan_dmz, vlan_backup, vlan_management]
m1 = add_machine(
project,
self.z_ib,
self.env_pro,
self.role_bd,
self.os_lin,
self.mtype_server,
True,
True)
vc = VLanConfig(
machine=m1,
needs_backup=True,
needs_management=True,)
vc.save()
self.assertEquals(
3,
vc.vlans.all().count(),
"Should have 3 vlans, as role needs backup")
self.assertEquals(
[vlan_dmz, vlan_management, vlan_backup_prod],
[x for x in vc.vlans.all().order_by('name')],
"Should have vlan_dmz and vlan_management; does: %s" % vc)
def test_standalone_hostnames_rest(self, ):
#creating a regular server for CRM project
request = create_machine_creation_request(
self.user,
self.project_crm,
self.z_ib,
self.env_des,
self.role_bd,
self.os_lin,
mtype=self.mtype_server,
)
response = MachineViewSet.as_view({"post": "create"})(request).render()
proper_name = ("%s%s%s%s1" % (
self.role_bd.code,
self.project_crm.code,
self.os_lin.code,
self.env_des.code)).lower()
machine = simplejson.loads(response.content)
self.assertEquals(
proper_name,
machine["hostname"],
"Not properly creating cluster server hostname (should: %s; does: %s)" % (
proper_name,
machine["hostname"],
)
)
#creating an standalone server once already exists a regular server with same properties
request = create_machine_creation_request(
self.user,
self.project_crm,
self.z_ib,
self.env_des,
self.role_bd,
self.os_lin,
mtype=self.mtype_server_standalone
)
response = MachineViewSet.as_view({"post": "create"})(request).render()
proper_name = ("%s%s%s%s" % (
self.role_bd.code,
self.project_crm.code,
self.os_lin.code,
self.env_des.code)).lower()
machine = simplejson.loads(response.content)
self.assertEquals(
proper_name,
machine["hostname"],
"Not properly creating standalone server hostname when a cluster server exist for the same properties (should: %s; does: %s)" % (
proper_name,
machine["hostname"],
)
)
#creating the second regular server once already exists a
#regular server and a standalone server with same properties
request = create_machine_creation_request(
self.user,
self.project_crm,
self.z_ib,
self.env_des,
self.role_bd,
self.os_lin,
mtype=self.mtype_server
)
response = MachineViewSet.as_view({"post": "create"})(request).render()
proper_name = ("%s%s%s%s2" % (
self.role_bd.code,
self.project_crm.code,
self.os_lin.code,
self.env_des.code)).lower()
machine = simplejson.loads(response.content)
self.assertEquals(
proper_name,
machine["hostname"],
"Not properly creating hostnames (should: %s; does: %s)" % (
proper_name,
machine["hostname"],
)
)
def off_test_no_auto_name(self, ):
#creating machine without autoname and not sending hostname
request = create_machine_creation_request(
self.user,
None,
self.z_ib,
self.env_pro,
self.role_ce,
self.os_lin,
mtype=self.mtype_router)
try:
MachineViewSet.as_view({"post": "create"})(request)
assert("No properly creating machine with no auto name. No hostname has been sent, it should return 400, does: %s" % response.status_code)
except IntegrityError:
pass
#creating machine that should increment counter
request = create_machine_creation_request(
self.user,
self.project_ibcom,
self.z_ib,
self.env_des,
self.role_bd,
self.os_lin,
mtype=self.mtype_server)
response = MachineViewSet.as_view({"post": "create"})(request).render()
self.assertEquals(
201,
response.status_code,
"Not properly return status after machine creation(should:201;does:%s" % response.status_code)
proper_name = ("%s%s%s%s1" % (
self.role_bd.code,
self.project_ibcom.code,
self.os_lin.code,
self.env_des.code)).lower()
machine = simplejson.loads(response.content)
self.assertEquals(
proper_name,
machine["hostname"],
"Not properly creating hostnames (should: %s; does: %s)" % (
proper_name,
machine["hostname"],
)
)
# creating machine that needs a specified hostname, even though there is
# a previous cluster server with same properties
request = create_machine_creation_request(
self.user,
self.project_ibcom,
self.z_ib,
self.env_des,
self.role_bd,
self.os_lin,
mtype=self.mtype_server_standalone,
hostname="bdibcldes")
response = MachineViewSet.as_view({"post": "create"})(request).render()
self.assertEquals(
201,
response.status_code,
"Not properly return status after machine creation(should:201;does:%s)" % response.status_code)
proper_name = "bdibcldes"
machine = simplejson.loads(response.content)
self.assertEquals(
proper_name,
machine["hostname"],
"Not properly creating hostnames (should: %s; does: %s)" % (
proper_name,
machine["hostname"],
)
)
#creating the other cluster machine
request = create_machine_creation_request(
self.user,
self.project_ibcom,
self.z_ib,
self.env_des,
self.role_bd,
self.os_lin,
mtype=self.mtype_server)
response = MachineViewSet.as_view({"post": "create"})(request).render()
self.assertEquals(
201,
response.status_code,
"Not properly return status after machine creation(should:201;does:%s" % response.status_code)
proper_name = ("%s%s%s%s2" % (
self.role_bd.code,
self.project_ibcom.code,
self.os_lin.code,
self.env_des.code)).lower()
machine = simplejson.loads(response.content)
self.assertEquals(
proper_name,
machine["hostname"],
"Not properly creating hostnames (should: %s; does: %s)" % (
proper_name,
machine["hostname"],
)
)
def test_alta_router(self, ):
request = create_machine_creation_request(
self.user,
None,
self.z_ib,
self.env_pro,
self.role_ce,
self.os_lin,
mtype=self.mtype_router,
hostname="CE_MAN1"
)
response = MachineViewSet.as_view({"post": "create"})(request).render()
self.assertEquals(
201,
response.status_code,
"Not properly return status after machine creation(should:201;does:%s" % response.status_code)
machine = simplejson.loads(response.content)
self.assertEquals(
"CE_MAN1",
machine["hostname"],
"Not properly creating hostnames (should: %s; does: %s)" % (
"CE_MAN1",
machine["hostname"],
)
)
def test_alta_balanceador(self, ):
request = create_machine_creation_request(
self.user,
None,
self.z_ib,
self.env_pro,
self.role_bal,
self.os_lin,
mtype=self.mtype_router,
hostname="bigip1"
)
response = MachineViewSet.as_view({"post": "create"})(request).render()
self.assertEquals(
201,
response.status_code,
"Not properly return status after load balancer creation(should:201;does:%s" % response.status_code)
machine = simplejson.loads(response.content)
self.assertEquals(
machine["hostname"],
"bigip1",
"Not properly initializing load balancer hostname",
)
|
{
"content_hash": "d8e1f848d5cb888481d148086cf515da",
"timestamp": "",
"source": "github",
"line_count": 1149,
"max_line_length": 150,
"avg_line_length": 29.186248912097476,
"alnum_prop": 0.5241985984792008,
"repo_name": "jpardobl/naman",
"id": "577af250d9315b5a59db1fe2b5b3617def556524",
"size": "33535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "naman/core/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "10679"
},
{
"name": "Python",
"bytes": "159906"
}
],
"symlink_target": ""
}
|
import re
from unittest import TestCase, mock
import pytest
from google.api_core.exceptions import NotFound
from google.cloud.secretmanager_v1.types import AccessSecretVersionResponse
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.google.cloud.secrets.secret_manager import CloudSecretManagerBackend
CREDENTIALS = 'test-creds'
KEY_FILE = 'test-file.json'
PROJECT_ID = 'test-project-id'
OVERRIDDEN_PROJECT_ID = 'overridden-test-project-id'
CONNECTIONS_PREFIX = "test-connections"
VARIABLES_PREFIX = "test-variables"
SEP = '-'
CONN_ID = 'test-postgres'
CONN_URI = 'postgresql://airflow:airflow@host:5432/airflow'
VAR_KEY = 'hello'
VAR_VALUE = 'world'
CONFIG_KEY = 'sql_alchemy_conn'
CONFIG_VALUE = 'postgresql://airflow:airflow@host:5432/airflow'
MODULE_NAME = "airflow.providers.google.cloud.secrets.secret_manager"
CLIENT_MODULE_NAME = "airflow.providers.google.cloud._internal_client.secret_manager_client"
class TestCloudSecretManagerBackend(TestCase):
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_default_valid_and_sep(self, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
backend = CloudSecretManagerBackend()
assert backend._is_valid_prefix_and_sep()
@parameterized.expand(
[
("colon:", "not:valid", ":"),
("slash/", "not/valid", "/"),
("space_with_char", "a b", ""),
("space_only", "", " "),
]
)
def test_raise_exception_with_invalid_prefix_sep(self, _, prefix, sep):
with pytest.raises(AirflowException):
CloudSecretManagerBackend(connections_prefix=prefix, sep=sep)
@parameterized.expand(
[
("dash-", "valid1", "-", True),
("underscore_", "isValid", "_", True),
("empty_string", "", "", True),
("space_prefix", " ", "", False),
("space_sep", "", " ", False),
("colon:", "not:valid", ":", False),
]
)
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_is_valid_prefix_and_sep(self, _, prefix, sep, is_valid, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
backend = CloudSecretManagerBackend()
backend.connections_prefix = prefix
backend.sep = sep
assert backend._is_valid_prefix_and_sep() == is_valid
@parameterized.expand(["airflow-connections", "connections", "airflow"])
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_get_conn_uri(self, connections_prefix, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
test_response = AccessSecretVersionResponse()
test_response.payload.data = CONN_URI.encode("UTF-8")
mock_client.access_secret_version.return_value = test_response
secrets_manager_backend = CloudSecretManagerBackend(connections_prefix=connections_prefix)
secret_id = secrets_manager_backend.build_path(connections_prefix, CONN_ID, SEP)
returned_uri = secrets_manager_backend.get_conn_uri(conn_id=CONN_ID)
assert CONN_URI == returned_uri
mock_client.secret_version_path.assert_called_once_with(PROJECT_ID, secret_id, "latest")
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(MODULE_NAME + ".CloudSecretManagerBackend.get_conn_value")
def test_get_connection(self, mock_get_value, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_get_value.return_value = CONN_URI
conn = CloudSecretManagerBackend().get_connection(conn_id=CONN_ID)
assert isinstance(conn, Connection)
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_get_conn_uri_non_existent_key(self, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
# The requested secret id or secret version does not exist
mock_client.access_secret_version.side_effect = NotFound('test-msg')
secrets_manager_backend = CloudSecretManagerBackend(connections_prefix=CONNECTIONS_PREFIX)
secret_id = secrets_manager_backend.build_path(CONNECTIONS_PREFIX, CONN_ID, SEP)
with self.assertLogs(secrets_manager_backend.client.log, level="ERROR") as log_output:
assert secrets_manager_backend.get_conn_uri(conn_id=CONN_ID) is None
assert secrets_manager_backend.get_connection(conn_id=CONN_ID) is None
assert re.search(
f"Google Cloud API Call Error \\(NotFound\\): Secret ID {secret_id} not found",
log_output.output[0],
)
@parameterized.expand(["airflow-variables", "variables", "airflow"])
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_get_variable(self, variables_prefix, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
test_response = AccessSecretVersionResponse()
test_response.payload.data = VAR_VALUE.encode("UTF-8")
mock_client.access_secret_version.return_value = test_response
secrets_manager_backend = CloudSecretManagerBackend(variables_prefix=variables_prefix)
secret_id = secrets_manager_backend.build_path(variables_prefix, VAR_KEY, SEP)
returned_uri = secrets_manager_backend.get_variable(VAR_KEY)
assert VAR_VALUE == returned_uri
mock_client.secret_version_path.assert_called_once_with(PROJECT_ID, secret_id, "latest")
@parameterized.expand(["airflow-config", "config", "airflow"])
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_get_config(self, config_prefix, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
test_response = AccessSecretVersionResponse()
test_response.payload.data = CONFIG_VALUE.encode("UTF-8")
mock_client.access_secret_version.return_value = test_response
secrets_manager_backend = CloudSecretManagerBackend(config_prefix=config_prefix)
secret_id = secrets_manager_backend.build_path(config_prefix, CONFIG_KEY, SEP)
returned_val = secrets_manager_backend.get_config(CONFIG_KEY)
assert CONFIG_VALUE == returned_val
mock_client.secret_version_path.assert_called_once_with(PROJECT_ID, secret_id, "latest")
@parameterized.expand(["airflow-variables", "variables", "airflow"])
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_get_variable_override_project_id(self, variables_prefix, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
test_response = AccessSecretVersionResponse()
test_response.payload.data = VAR_VALUE.encode("UTF-8")
mock_client.access_secret_version.return_value = test_response
secrets_manager_backend = CloudSecretManagerBackend(
variables_prefix=variables_prefix, project_id=OVERRIDDEN_PROJECT_ID
)
secret_id = secrets_manager_backend.build_path(variables_prefix, VAR_KEY, SEP)
returned_uri = secrets_manager_backend.get_variable(VAR_KEY)
assert VAR_VALUE == returned_uri
mock_client.secret_version_path.assert_called_once_with(OVERRIDDEN_PROJECT_ID, secret_id, "latest")
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_get_variable_non_existent_key(self, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
# The requested secret id or secret version does not exist
mock_client.access_secret_version.side_effect = NotFound('test-msg')
secrets_manager_backend = CloudSecretManagerBackend(variables_prefix=VARIABLES_PREFIX)
secret_id = secrets_manager_backend.build_path(VARIABLES_PREFIX, VAR_KEY, SEP)
with self.assertLogs(secrets_manager_backend.client.log, level="ERROR") as log_output:
assert secrets_manager_backend.get_variable(VAR_KEY) is None
assert re.search(
f"Google Cloud API Call Error \\(NotFound\\): Secret ID {secret_id} not found",
log_output.output[0],
)
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_connections_prefix_none_value(self, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
with mock.patch(MODULE_NAME + '.CloudSecretManagerBackend._get_secret') as mock_get_secret:
with mock.patch(
MODULE_NAME + '.CloudSecretManagerBackend._is_valid_prefix_and_sep'
) as mock_is_valid_prefix_sep:
secrets_manager_backend = CloudSecretManagerBackend(connections_prefix=None)
mock_is_valid_prefix_sep.assert_not_called()
assert secrets_manager_backend.get_conn_uri(conn_id=CONN_ID) is None
mock_get_secret.assert_not_called()
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_variables_prefix_none_value(self, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
with mock.patch(MODULE_NAME + '.CloudSecretManagerBackend._get_secret') as mock_get_secret:
secrets_manager_backend = CloudSecretManagerBackend(variables_prefix=None)
assert secrets_manager_backend.get_variable(VAR_KEY) is None
mock_get_secret.assert_not_called()
@mock.patch(MODULE_NAME + ".get_credentials_and_project_id")
@mock.patch(CLIENT_MODULE_NAME + ".SecretManagerServiceClient")
def test_config_prefix_none_value(self, mock_client_callable, mock_get_creds):
mock_get_creds.return_value = CREDENTIALS, PROJECT_ID
mock_client = mock.MagicMock()
mock_client_callable.return_value = mock_client
with mock.patch(MODULE_NAME + '.CloudSecretManagerBackend._get_secret') as mock_get_secret:
secrets_manager_backend = CloudSecretManagerBackend(config_prefix=None)
assert secrets_manager_backend.get_config(CONFIG_KEY) is None
mock_get_secret.assert_not_called()
|
{
"content_hash": "89a8b88c76e1f8c5834296a040ff2f06",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 108,
"avg_line_length": 51.10212765957447,
"alnum_prop": 0.6872345740694479,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "ebe3f4e01af920c3a8f8709864cf1fed4cc73ac5",
"size": "12795",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/secrets/test_secret_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
import json
import plotly
import pytest
from dash import __version__, Dash
from dash import html
from dash.development.base_component import Component
from dash import dcc, Input, Output
Component._prop_names = ("id", "a", "children", "style")
Component._type = "TestComponent"
Component._namespace = "test_namespace"
Component._valid_wildcard_attributes = ["data-", "aria-"]
def nested_tree():
"""This tree has a few unique properties:
- children is mixed strings and components (as in c2)
- children is just components (as in c)
- children is just strings (as in c1)
- children is just a single component (as in c3, c4)
- children contains numbers (as in c2)
- children contains "None" items (as in c2)
"""
c1 = Component(id="0.1.x.x.0", children="string")
c2 = Component(
id="0.1.x.x", children=[10, None, "wrap string", c1, "another string", 4.51]
)
c3 = Component(
id="0.1.x",
# children is just a component
children=c2,
)
c4 = Component(id="0.1", children=c3)
c5 = Component(id="0.0")
c = Component(id="0", children=[c5, c4])
return c, c1, c2, c3, c4, c5
def test_debc001_init():
Component(a=3)
def test_debc002_get_item_with_children():
c1 = Component(id="1")
c2 = Component(children=[c1])
assert c2["1"] == c1
def test_debc003_get_item_with_children_as_component_instead_of_list():
c1 = Component(id="1")
c2 = Component(id="2", children=c1)
assert c2["1"] == c1
def test_debc004_get_item_with_nested_children_one_branch():
c1 = Component(id="1")
c2 = Component(id="2", children=[c1])
c3 = Component(children=[c2])
assert c2["1"] == c1
assert c3["2"] == c2
assert c3["1"] == c1
def test_debc005_get_item_with_nested_children_two_branches():
c1 = Component(id="1")
c2 = Component(id="2", children=[c1])
c3 = Component(id="3")
c4 = Component(id="4", children=[c3])
c5 = Component(children=[c2, c4])
assert c2["1"] == c1
assert c4["3"] == c3
assert c5["2"] == c2
assert c5["4"] == c4
assert c5["1"] == c1
assert c5["3"] == c3
def test_debc006_get_item_with_full_tree():
c, c1, c2, c3, c4, c5 = nested_tree()
keys = [k for k in c]
assert keys == ["0.0", "0.1", "0.1.x", "0.1.x.x", "0.1.x.x.0"]
# Try to get each item
for comp in [c1, c2, c3, c4, c5]:
assert c[comp.id] == comp
# Get an item that doesn't exist
with pytest.raises(KeyError):
c["x"]
def test_debc007_len_with_full_tree():
c = nested_tree()[0]
assert (
len(c) == 5 + 5 + 1
), "the length of the nested children should match the total of 5 \
components, 2 strings + 2 numbers + none in c2, and 1 string in c1"
def test_debc008_set_item_anywhere_in_tree():
keys = ["0.0", "0.1", "0.1.x", "0.1.x.x", "0.1.x.x.0"]
c = nested_tree()[0]
# Test setting items starting from the innermost item
for key in reversed(keys):
new_id = "new {}".format(key)
new_component = Component(id=new_id, children="new string")
c[key] = new_component
assert c[new_id] == new_component
def test_debc009_del_item_full_tree():
c = nested_tree()[0]
keys = reversed([k for k in c])
for key in keys:
c[key]
del c[key]
with pytest.raises(KeyError):
c[key]
def test_debc010_traverse_full_tree():
c, c1, c2, c3, c4, c5 = nested_tree()
elements = [i for i in c._traverse()]
assert elements == c.children + [c3] + [c2] + c2.children
def test_debc011_traverse_with_tuples():
c, c1, c2, c3, c4, c5 = nested_tree()
c2.children = tuple(c2.children)
c.children = tuple(c.children)
elements = [i for i in c._traverse()]
assert elements == list(c.children) + [c3] + [c2] + list(c2.children)
def test_debc012_to_plotly_json_full_tree():
c = nested_tree()[0]
Component._namespace
Component._type
expected = {
"type": "TestComponent",
"namespace": "test_namespace",
"props": {
"children": [
{
"type": "TestComponent",
"namespace": "test_namespace",
"props": {"id": "0.0"},
},
{
"type": "TestComponent",
"namespace": "test_namespace",
"props": {
"children": {
"type": "TestComponent",
"namespace": "test_namespace",
"props": {
"children": {
"type": "TestComponent",
"namespace": "test_namespace",
"props": {
"children": [
10,
None,
"wrap string",
{
"type": "TestComponent",
"namespace": "test_namespace",
"props": {
"children": "string",
"id": "0.1.x.x.0",
},
},
"another string",
4.51,
],
"id": "0.1.x.x",
},
},
"id": "0.1.x",
},
},
"id": "0.1",
},
},
],
"id": "0",
},
}
res = json.loads(json.dumps(c.to_plotly_json(), cls=plotly.utils.PlotlyJSONEncoder))
assert res == expected
def test_debc013_get_item_raises_key_if_id_doesnt_exist():
c = Component()
with pytest.raises(KeyError):
c["1"]
c1 = Component(id="1")
with pytest.raises(KeyError):
c1["1"]
c2 = Component(id="2", children=[c1])
with pytest.raises(KeyError):
c2["0"]
c3 = Component(children="string with no id")
with pytest.raises(KeyError):
c3["0"]
def test_debc014_set_item():
c1a = Component(id="1", children="Hello world")
c2 = Component(id="2", children=c1a)
assert c2["1"] == c1a
c1b = Component(id="1", children="Brave new world")
c2["1"] = c1b
assert c2["1"] == c1b
def test_debc015_set_item_with_children_as_list():
c1 = Component(id="1")
c2 = Component(id="2", children=[c1])
assert c2["1"] == c1
c3 = Component(id="3")
c2["1"] = c3
assert c2["3"] == c3
def test_debc016_set_item_with_nested_children():
c1 = Component(id="1")
c2 = Component(id="2", children=[c1])
c3 = Component(id="3")
c4 = Component(id="4", children=[c3])
c5 = Component(id="5", children=[c2, c4])
c3b = Component(id="3")
assert c5["3"] == c3
assert c5["3"] != "3"
assert c5["3"] is not c3b
c5["3"] = c3b
assert c5["3"] is c3b
assert c5["3"] is not c3
c2b = Component(id="2")
c5["2"] = c2b
assert c5["4"] is c4
assert c5["2"] is not c2
assert c5["2"] is c2b
with pytest.raises(KeyError):
c5["1"]
def test_debc017_set_item_raises_key_error():
c1 = Component(id="1")
c2 = Component(id="2", children=[c1])
with pytest.raises(KeyError):
c2["3"] = Component(id="3")
def test_debc018_del_item_from_list():
c1 = Component(id="1")
c2 = Component(id="2")
c3 = Component(id="3", children=[c1, c2])
assert c3["1"] == c1
assert c3["2"] == c2
del c3["2"]
with pytest.raises(KeyError):
c3["2"]
assert c3.children == [c1]
del c3["1"]
with pytest.raises(KeyError):
c3["1"]
assert c3.children == []
def test_debc019_del_item_from_class():
c1 = Component(id="1")
c2 = Component(id="2", children=c1)
assert c2["1"] == c1
del c2["1"]
with pytest.raises(KeyError):
c2["1"]
assert c2.children is None
def test_debc020_to_plotly_json_without_children():
c = Component(id="a")
c._prop_names = ("id",)
c._type = "MyComponent"
c._namespace = "basic"
assert c.to_plotly_json() == {
"namespace": "basic",
"props": {"id": "a"},
"type": "MyComponent",
}
def test_debc021_to_plotly_json_with_null_arguments():
c = Component(id="a")
c._prop_names = ("id", "style")
c._type = "MyComponent"
c._namespace = "basic"
assert c.to_plotly_json() == {
"namespace": "basic",
"props": {"id": "a"},
"type": "MyComponent",
}
c = Component(id="a", style=None)
c._prop_names = ("id", "style")
c._type = "MyComponent"
c._namespace = "basic"
assert c.to_plotly_json() == {
"namespace": "basic",
"props": {"id": "a", "style": None},
"type": "MyComponent",
}
def test_debc022_to_plotly_json_with_children():
c = Component(id="a", children="Hello World")
c._prop_names = ("id", "children")
c._type = "MyComponent"
c._namespace = "basic"
assert c.to_plotly_json() == {
"namespace": "basic",
"props": {
"id": "a",
# TODO - Rename 'children' to 'children'
"children": "Hello World",
},
"type": "MyComponent",
}
def test_debc023_to_plotly_json_with_wildcards():
c = Component(
id="a", **{"aria-expanded": "true", "data-toggle": "toggled", "data-none": None}
)
c._prop_names = ("id",)
c._type = "MyComponent"
c._namespace = "basic"
assert c.to_plotly_json() == {
"namespace": "basic",
"props": {
"aria-expanded": "true",
"data-toggle": "toggled",
"data-none": None,
"id": "a",
},
"type": "MyComponent",
}
def test_debc024_len():
assert len(Component()) == 0
assert len(Component(children="Hello World")) == 1
assert len(Component(children=Component())) == 1
assert len(Component(children=[Component(), Component()])) == 2
assert len(Component(children=[Component(children=Component()), Component()])) == 3
def test_debc025_iter():
# The mixin methods from MutableMapping were cute but probably never
# used - at least not by us. Test that they're gone
# keys, __contains__, items, values, and more are all mixin methods
# that we get for free by inheriting from the MutableMapping
# and behave as according to our implementation of __iter__
c = Component(
id="1",
children=[
Component(id="2", children=[Component(id="3", children=Component(id="4"))]),
Component(id="5", children=[Component(id="6", children="Hello World")]),
Component(),
Component(children="Hello World"),
Component(children=Component(id="7")),
Component(children=[Component(id="8")]),
],
)
mixins = [
"clear",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
]
for m in mixins:
assert not hasattr(c, m), "should not have method " + m
keys = ["2", "3", "4", "5", "6", "7", "8"]
for k in keys:
# test __contains__()
assert k in c, "should find key " + k
# test __getitem__()
assert c[k].id == k, "key {} points to the right item".format(k)
# test __iter__()
keys2 = []
for k in c:
keys2.append(k)
assert k in keys, "iteration produces key " + k
assert len(keys) == len(keys2), "iteration produces no extra keys"
def test_debc026_component_not_children():
children = [Component(id="a"), html.Div(id="b"), "c", 1]
for i in range(len(children)):
# cycle through each component in each position
children = children[1:] + [children[0]]
# use html.Div because only real components accept positional args
html.Div(children)
# the first arg is children, and a single component works there
html.Div(children[0], id="x")
with pytest.raises(TypeError):
# If you forget the `[]` around children you get this:
html.Div(children[0], children[1], children[2], children[3])
def test_debc027_component_error_message():
with pytest.raises(TypeError) as e:
Component(asdf=True)
assert str(e.value) == (
"The `TestComponent` component received an unexpected "
+ "keyword argument: `asdf`\nAllowed arguments: a, children, "
+ "id, style"
)
with pytest.raises(TypeError) as e:
Component(asdf=True, id="my-component")
assert str(e.value) == (
"The `TestComponent` component "
+ 'with the ID "my-component" received an unexpected '
+ "keyword argument: `asdf`\nAllowed arguments: a, children, "
+ "id, style"
)
with pytest.raises(TypeError) as e:
html.Div(asdf=True)
assert str(e.value) == (
"The `html.Div` component (version {}) ".format(__version__)
+ "received an unexpected "
+ "keyword argument: `asdf`\n"
+ "Allowed arguments: {}".format(", ".join(sorted(html.Div()._prop_names)))
)
with pytest.raises(TypeError) as e:
html.Div(asdf=True, id="my-component")
assert str(e.value) == (
"The `html.Div` component (version {}) ".format(__version__)
+ 'with the ID "my-component" received an unexpected '
+ "keyword argument: `asdf`\n"
+ "Allowed arguments: {}".format(", ".join(sorted(html.Div()._prop_names)))
)
def test_debc028_set_random_id():
app = Dash(__name__)
input1 = dcc.Input(value="Hello Input 1")
input2 = dcc.Input(value="Hello Input 2")
output1 = html.Div()
output2 = html.Div()
output3 = html.Div(id="output-3")
app.layout = html.Div([input1, input2, output1, output2, output3])
@app.callback(Output(output1, "children"), Input(input1, "value"))
def update(v):
return f"Input 1 {v}"
@app.callback(Output(output2, "children"), Input(input2, "value"))
def update(v):
return f"Input 2 {v}"
@app.callback(
Output(output3, "children"), Input(input1, "value"), Input(input2, "value")
)
def update(v1, v2):
return f"Output 3 - Input 1: {v1}, Input 2: {v2}"
# Verify the auto-generated IDs are stable
assert output1.id == "e3e70682-c209-4cac-629f-6fbed82c07cd"
assert input1.id == "82e2e662-f728-b4fa-4248-5e3a0a5d2f34"
assert output2.id == "d4713d60-c8a7-0639-eb11-67b367a9c378"
assert input2.id == "23a7711a-8133-2876-37eb-dcd9e87a1613"
# we make sure that the if the id is set explicitly, then it is not replaced by random id
assert output3.id == "output-3"
def test_debc029_random_id_errors():
app = Dash(__name__)
input1 = dcc.Input(value="Hello Input 1", persistence=True)
output1 = html.Div()
app.layout = html.Div([input1, output1])
with pytest.raises(RuntimeError) as e:
@app.callback(Output(output1, "children"), Input(input1, "value"))
def update(v):
return f"Input 1 {v}"
assert "persistence" in e.value.args[0]
assert "Please assign an explicit ID" in e.value.args[0]
assert "dash_core_components.Input" in e.value.args[0]
input1.id = "explicit"
# now it works without error
@app.callback(Output(output1, "children"), Input(input1, "value"))
def update2(v):
return f"Input 1 {v}"
def test_debc030_invalid_children_args():
with pytest.raises(TypeError):
dcc.Input(children="invalid children")
|
{
"content_hash": "141bca173b64ad52486e5ab1f27beb32",
"timestamp": "",
"source": "github",
"line_count": 541,
"max_line_length": 93,
"avg_line_length": 29.75785582255083,
"alnum_prop": 0.5282315671780856,
"repo_name": "plotly/dash",
"id": "cefde04fdec38cf2418b0459d23bef9bf1873bba",
"size": "16099",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/unit/development/test_base_component.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17191"
},
{
"name": "HTML",
"bytes": "1729"
},
{
"name": "JavaScript",
"bytes": "638735"
},
{
"name": "Less",
"bytes": "22320"
},
{
"name": "Python",
"bytes": "1304969"
},
{
"name": "Shell",
"bytes": "224"
},
{
"name": "TypeScript",
"bytes": "840257"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from oslo_log import log as logging
import six
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _LE
from sahara.plugins import base as plugin_base
from sahara.service.edp.binary_retrievers import dispatch
from sahara.service.edp import job_manager as manager
from sahara.utils import edp
from sahara.utils import proxy as p
conductor = c.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
OPS = None
def setup_edp_api(ops):
global OPS
OPS = ops
def get_job_types(**kwargs):
# Return a dictionary of all the job types that can be run
# by this instance of Sahara. For each job type, the value
# will be a list of plugins that support the job type. For
# each plugin, include a dictionary of the versions that
# support the job type.
# All entries in kwargs are expected to have list values
hints = kwargs.get("hints", ["false"])[0].lower() == "true"
plugin_names = kwargs.get("plugin", [])
all_plugins = plugin_base.PLUGINS.get_plugins()
if plugin_names:
plugins = filter(lambda x: x.name in plugin_names, all_plugins)
else:
plugins = all_plugins
job_types = kwargs.get("type", edp.JOB_TYPES_ALL)
versions = kwargs.get("version", [])
res = []
for job_type in job_types:
# All job types supported by all versions of the plugin.
# This is a dictionary where keys are plugin version
# strings and values are lists of job types
job_entry = {"name": job_type,
"plugins": []}
for plugin in plugins:
types_for_plugin = plugin.get_edp_job_types(versions)
# dict returns a new object so we are not modifying the plugin
p = plugin.dict
# Find only the versions of this plugin that support the job.
# Additionally, instead of a list we want a dictionary of
# plugin versions with corresponding config hints
p["versions"] = {}
for version, supported_types in six.iteritems(types_for_plugin):
if job_type in supported_types:
if hints:
config_hints = plugin.get_edp_config_hints(job_type,
version)
else:
config_hints = {}
p["versions"][version] = config_hints
# If we found at least one version of the plugin that
# supports the job type, add the plugin to the result
if p["versions"]:
job_entry["plugins"].append(p)
if job_entry["plugins"]:
res.append(job_entry)
return res
def get_job_config_hints(job_type):
return manager.get_job_config_hints(job_type)
def execute_job(job_id, data):
# Elements common to all job types
cluster_id = data['cluster_id']
configs = data.get('job_configs', {})
interface = data.get('interface', {})
# Not in Java job types but present for all others
input_id = data.get('input_id', None)
output_id = data.get('output_id', None)
# Since we will use a unified class in the database, we pass
# a superset for all job types
job_ex_dict = {'input_id': input_id, 'output_id': output_id,
'job_id': job_id, 'cluster_id': cluster_id,
'info': {'status': edp.JOB_STATUS_PENDING},
'job_configs': configs, 'extra': {},
'interface': interface}
job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict)
context.set_current_job_execution_id(job_execution.id)
# check to use proxy user
if p.job_execution_requires_proxy_user(job_execution):
try:
p.create_proxy_user_for_job_execution(job_execution)
except ex.SaharaException as e:
LOG.error(_LE("Can't run job execution. "
"(Reasons: {reason})").format(reason=e))
conductor.job_execution_destroy(context.ctx(), job_execution)
raise e
OPS.run_edp_job(job_execution.id)
return job_execution
def get_job_execution_status(id):
return manager.get_job_status(id)
def job_execution_list(**kwargs):
return conductor.job_execution_get_all(context.ctx(), **kwargs)
def get_job_execution(id):
return conductor.job_execution_get(context.ctx(), id)
def cancel_job_execution(id):
context.set_current_job_execution_id(id)
job_execution = conductor.job_execution_get(context.ctx(), id)
OPS.cancel_job_execution(id)
return job_execution
def delete_job_execution(id):
context.set_current_job_execution_id(id)
OPS.delete_job_execution(id)
def get_data_sources(**kwargs):
return conductor.data_source_get_all(context.ctx(), **kwargs)
def get_data_source(id):
return conductor.data_source_get(context.ctx(), id)
def delete_data_source(id):
conductor.data_source_destroy(context.ctx(), id)
def register_data_source(values):
return conductor.data_source_create(context.ctx(), values)
def data_source_update(id, values):
return conductor.data_source_update(context.ctx(), id, values)
def get_jobs(**kwargs):
return conductor.job_get_all(context.ctx(), **kwargs)
def get_job(id):
return conductor.job_get(context.ctx(), id)
def create_job(values):
return conductor.job_create(context.ctx(), values)
def delete_job(job_id):
return conductor.job_destroy(context.ctx(), job_id)
def create_job_binary(values):
return conductor.job_binary_create(context.ctx(), values)
def get_job_binaries(**kwargs):
return conductor.job_binary_get_all(context.ctx(), **kwargs)
def get_job_binary(id):
return conductor.job_binary_get(context.ctx(), id)
def update_job_binary(id, values):
return conductor.job_binary_update(context.ctx(), id, values)
def delete_job_binary(id):
conductor.job_binary_destroy(context.ctx(), id)
def create_job_binary_internal(values):
return conductor.job_binary_internal_create(context.ctx(), values)
def get_job_binary_internals(**kwargs):
return conductor.job_binary_internal_get_all(context.ctx(), **kwargs)
def get_job_binary_internal(id):
return conductor.job_binary_internal_get(context.ctx(), id)
def delete_job_binary_internal(id):
conductor.job_binary_internal_destroy(context.ctx(), id)
def get_job_binary_internal_data(id):
return conductor.job_binary_internal_get_raw_data(context.ctx(), id)
def get_job_binary_data(id):
job_binary = conductor.job_binary_get(context.ctx(), id)
return dispatch.get_raw_binary(job_binary, with_context=True)
|
{
"content_hash": "1301cdddf4120279e8cd160910dd61d0",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 78,
"avg_line_length": 29.22077922077922,
"alnum_prop": 0.6481481481481481,
"repo_name": "ekasitk/sahara",
"id": "ecfbe85c679600c12c60fac6daa6acd3d18681fb",
"size": "7333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara/service/edp/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "19620"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "3141724"
},
{
"name": "Shell",
"bytes": "52399"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from datafeed.exchange import Security
__all__ = ['Report', 'Day', 'Minute', 'SecurityList']
class _Struct(object):
def __init__(self, security, adict):
assert isinstance(security, Security)
self.__dict__.update(adict)
self.security = security
def assert_data(self):
pass
def __getstate__(self):
odict = self.__dict__.copy()
odict.pop('_raw_data', None)
return odict
def __setstate__(self, state):
self.__dict__.update(state)
def todict(self):
return self.__getstate__()
class Report(_Struct):
def __init__(self, security, adict):
assert isinstance(adict['price'], float)
assert isinstance(adict['time'], datetime)
super(Report, self).__init__(security, adict)
def __str__(self):
return "%s, %s, %s" % (self.security, self.price, self.time)
class Day(_Struct):
pass
class Minute(_Struct):
pass
class SecurityList(_Struct):
pass
|
{
"content_hash": "09f156bb0bd1bdfff111e1ac96d632a6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 68,
"avg_line_length": 21.06122448979592,
"alnum_prop": 0.5872093023255814,
"repo_name": "yinhm/datafeed",
"id": "077289c9f0d17705267576146292c3efb702a18e",
"size": "1057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datafeed/quote.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "203178"
},
{
"name": "Shell",
"bytes": "3121"
}
],
"symlink_target": ""
}
|
"""myapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
{
"content_hash": "fbb7bfda04aa12a4de3ceee88384c622",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 36.285714285714285,
"alnum_prop": 0.699475065616798,
"repo_name": "HolubTom/SpektrumDuhy",
"id": "bdc834f2ffd57011441f43e8d75fdbd83a6015dc",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spektrumduhy/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45820"
},
{
"name": "JavaScript",
"bytes": "97109"
},
{
"name": "Python",
"bytes": "5754"
}
],
"symlink_target": ""
}
|
__all__ = ['ttypes', 'constants', 'Service', 'AdapterService']
|
{
"content_hash": "87e0d1941830a3753a8edafa93eef68f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 62,
"avg_line_length": 63,
"alnum_prop": 0.6190476190476191,
"repo_name": "facebook/fbthrift",
"id": "208c67726cd684cd24aafdc0f73b9ff8d4fcc78b",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "thrift/compiler/test/fixtures/adapter/gen-py/module/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15608"
},
{
"name": "C++",
"bytes": "10658844"
},
{
"name": "CMake",
"bytes": "147347"
},
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "Cython",
"bytes": "339005"
},
{
"name": "Emacs Lisp",
"bytes": "11229"
},
{
"name": "Go",
"bytes": "447092"
},
{
"name": "Hack",
"bytes": "313122"
},
{
"name": "Java",
"bytes": "1990062"
},
{
"name": "JavaScript",
"bytes": "38872"
},
{
"name": "Mustache",
"bytes": "1269560"
},
{
"name": "Python",
"bytes": "1623026"
},
{
"name": "Ruby",
"bytes": "6111"
},
{
"name": "Rust",
"bytes": "283392"
},
{
"name": "Shell",
"bytes": "6615"
},
{
"name": "Thrift",
"bytes": "1859041"
},
{
"name": "Vim Script",
"bytes": "2887"
}
],
"symlink_target": ""
}
|
"""Tests for SparseAdd."""
import timeit
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseAddTest(test.TestCase):
def _randomTensor(self, size, np_dtype, sparse=True):
n, m = size
x = np.random.randn(n, m).astype(np_dtype)
return _sparsify(x) if sparse else x
def _SparseTensorValue_3x3(self, negate=False):
# [ 1]
# [2 ]
# [3 4]
# ...or its cwise negation, if `negate`
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, 2, 3, 4])
if negate:
val = -np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self, negate=False):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x3(negate))
def _SparseTensor_3x3_v2(self):
# [ 1]
# [-1.9 ]
# [ 3 -4.2]
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, -1.9, 3, -4.2])
shape = np.array([3, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def testAddSelf(self):
with test_util.force_cpu():
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
self.assertAllEqual((3, 3), sp_sum.get_shape())
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]])
self.assertAllEqual(sum_out.values, [2, 4, 6, 8])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testAddSelfAndNegation(self):
with test_util.force_cpu():
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3(negate=True)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, 0.1)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, np.empty([0, 2]))
self.assertAllEqual(sum_out.values, [])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testSmallValuesShouldVanish(self):
with test_util.force_cpu():
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3_v2()
# sum:
# [ 2]
# [.1 ]
# [ 6 -.2]
# two values should vanish: |.1| < .21, and |-.2| < .21
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.21)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]])
self.assertAllEqual(sum_out.values, [2, 6])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
# only .1 vanishes
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.11)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]])
self.assertAllClose(sum_out.values, [2, 6, -.2])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
@test_util.run_deprecated_v1
def testGradients(self):
np.random.seed(1618) # Make it reproducible.
with self.session(use_gpu=False):
for n in [10, 31]:
for m in [4, 17]:
sp_a, nnz_a = self._randomTensor([n, m], np.float32)
sp_b, nnz_b = self._randomTensor([n, m], np.float32)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
nnz_sum = len(self.evaluate(sp_sum.values))
err = gradient_checker.compute_gradient_error(
[sp_a.values, sp_b.values], [(nnz_a,), (nnz_b,)], sp_sum.values,
(nnz_sum,))
self.assertLess(err, 1e-3)
def testAddSparseDense(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
for dtype in [np.float32, np.float64, np.int64, np.complex64]:
for index_dtype in [np.int32, np.int64]:
rand_vals_np = np.random.randn(n, m).astype(dtype)
dense_np = np.random.randn(n, m).astype(dtype)
with test_util.force_cpu():
sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype)
s = self.evaluate(
sparse_ops.sparse_add(sparse, constant_op.constant(dense_np)))
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
# check commutativity
s = self.evaluate(
sparse_ops.sparse_add(constant_op.constant(dense_np), sparse))
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
@test_util.run_deprecated_v1
def testSparseTensorDenseAddGradients(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
rand_vals_np = np.random.randn(n, m).astype(np.float32)
dense_np = np.random.randn(n, m).astype(np.float32)
with self.session(use_gpu=False):
sparse, nnz = _sparsify(rand_vals_np)
dense = constant_op.constant(dense_np, dtype=dtypes.float32)
s = sparse_ops.sparse_add(sparse, dense)
err = gradient_checker.compute_gradient_error([sparse.values, dense],
[(nnz,), (n, m)], s, (n, m))
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testInvalidSparseTensor(self):
with test_util.force_cpu():
shape = [2, 2]
val = [0]
dense = constant_op.constant(np.zeros(shape, dtype=np.int32))
for bad_idx in [
[[-1, 0]], # -1 is invalid.
[[1, 3]], # ...so is 3.
]:
sparse = sparse_tensor.SparseTensorValue(bad_idx, val, shape)
s = sparse_ops.sparse_add(sparse, dense)
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"invalid index"):
self.evaluate(s)
######################## Benchmarking code
def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
np.random.seed(1618)
with session.Session(graph=ops.Graph()) as sess:
sp_vals = np.random.rand(n, m).astype(np.float32)
sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32)
vals = np.random.rand(n, m).astype(np.float32)
s2d = math_ops.add(
sparse_ops.sparse_tensor_to_dense(sp_t), constant_op.constant(vals))
sa = sparse_ops.sparse_add(sp_t, constant_op.constant(vals))
timeit.timeit(lambda: sess.run(s2d), number=3)
timeit.timeit(lambda: sess.run(sa), number=3)
s2d_total = timeit.timeit(lambda: sess.run(s2d), number=num_iters)
sa_total = timeit.timeit(lambda: sess.run(sa), number=num_iters)
# per-iter latency; secs to millis
return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters
class SparseAddBenchmark(test.Benchmark):
def benchmarkSparseAddDense(self):
print("SparseAddDense: add with sparse_to_dense vs. sparse_add")
print("%nnz \t n \t m \t millis(s2d) \t millis(sparse_add) \t speedup")
for sparsity in [0.99, 0.5, 0.01]:
for n in [1, 256, 50000]:
for m in [100, 1000]:
s2d_dt, sa_dt = _s2d_add_vs_sparse_add(sparsity, n, m)
print("%.2f \t %d \t %d \t %.4f \t %.4f \t %.2f" % (sparsity, n, m,
s2d_dt, sa_dt,
s2d_dt / sa_dt))
if __name__ == "__main__":
test.main()
|
{
"content_hash": "36695fcd6d01b9190e06c565f920c6db",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 80,
"avg_line_length": 36.19327731092437,
"alnum_prop": 0.6054097980032506,
"repo_name": "Intel-Corporation/tensorflow",
"id": "61ad45fb5e273e2b197cbcf685ffb20d100c12b4",
"size": "9303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/sparse_ops/sparse_add_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
import time
import unittest
import node
import config
import command
LEADER = 1
DUT_ROUTER1 = 2
ROUTER2 = 3
ROUTER3 = 4
class Cert_5_3_5_RoutingLinkQuality(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[DUT_ROUTER1].set_panid(0xface)
self.nodes[DUT_ROUTER1].set_mode('rsdn')
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[DUT_ROUTER1].enable_whitelist()
self.nodes[DUT_ROUTER1].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_panid(0xface)
self.nodes[ROUTER2].set_mode('rsdn')
self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[ROUTER3].set_panid(0xface)
self.nodes[ROUTER3].set_mode('rsdn')
self.nodes[ROUTER3].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64())
self.nodes[ROUTER3].enable_whitelist()
self.nodes[ROUTER3].set_router_selection_jitter(1)
self.sniffer = config.create_default_thread_sniffer()
self.sniffer.start()
def tearDown(self):
self.sniffer.stop()
del self.sniffer
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
# 1
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
for router in range(DUT_ROUTER1, ROUTER3 + 1):
self.nodes[router].start()
time.sleep(10)
for router in range(DUT_ROUTER1, ROUTER3 + 1):
self.assertEqual(self.nodes[router].get_state(), 'router')
# 2 & 3
leader_rloc = self.nodes[LEADER].get_ip6_address(config.ADDRESS_TYPE.RLOC)
# Verify the ICMPv6 Echo Request took the least cost path.
self.assertTrue(self.nodes[ROUTER3].ping(leader_rloc))
path = [ROUTER3, DUT_ROUTER1, LEADER]
command.check_icmp_path(self.sniffer, path, self.nodes)
# 4 & 5
self.nodes[LEADER].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64(), config.RSSI['LINK_QULITY_1'])
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64(), config.RSSI['LINK_QULITY_1'])
time.sleep(70)
# Verify the ICMPv6 Echo Request took the longer path because it cost less.
self.assertTrue(self.nodes[ROUTER3].ping(leader_rloc))
path = [ROUTER3, DUT_ROUTER1, ROUTER2, LEADER]
command.check_icmp_path(self.sniffer, path, self.nodes)
# 6 & 7
self.nodes[LEADER].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64(), config.RSSI['LINK_QULITY_2'])
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64(), config.RSSI['LINK_QULITY_2'])
time.sleep(70)
# Verify the direct neighbor would be prioritized when there are two paths with the same cost.
self.assertTrue(self.nodes[ROUTER3].ping(leader_rloc))
path = [ROUTER3, DUT_ROUTER1, LEADER]
command.check_icmp_path(self.sniffer, path, self.nodes)
# 8 & 9
self.nodes[LEADER].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64(), config.RSSI['LINK_QULITY_0'])
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64(), config.RSSI['LINK_QULITY_0'])
time.sleep(70)
# Verify the ICMPv6 Echo Request took the longer path.
leader_rloc = self.nodes[LEADER].get_ip6_address(config.ADDRESS_TYPE.RLOC)
self.assertTrue(self.nodes[ROUTER3].ping(leader_rloc))
path = [ROUTER3, DUT_ROUTER1, ROUTER2, LEADER]
command.check_icmp_path(self.sniffer, path, self.nodes)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "489e46c270a9ed743b4a3ca99846ba5b",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 108,
"avg_line_length": 39.96363636363636,
"alnum_prop": 0.6480891719745223,
"repo_name": "xiaom-GitHub/openthread",
"id": "d4e721a0f5491264e1882d95e46b9053dbc91f49",
"size": "6000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_5_3_05_RoutingLinkQuality.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "10128"
},
{
"name": "C",
"bytes": "572698"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "3486464"
},
{
"name": "M4",
"bytes": "48751"
},
{
"name": "Makefile",
"bytes": "82554"
},
{
"name": "Python",
"bytes": "1060869"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "28070"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.