text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import print_function
from ommongo.py3compat import *
from nose.tools import *
from ommongo.session import Session
from ommongo.document import *
from ommongo.fields import *
from test.util import known_failure
# Document Types used in some tests
class TestDoc(Document):
int1 = IntField()
def __repr__(self):
return 'TestDoc(int1=%d)' % self.int1
# Document Types used in some tests
class TestDoc2(Document):
sfield = StringField()
def __repr__(self):
return 'TestDoc(int1=%s)' % self.sfield
class T(Document, DictDoc):
config_default_sort = [('i', 1)]
i = IntField()
j = IntField(required=False)
s = StringField(required=False)
l = ListField(IntField(), required=False)
a = IntField(required=False, db_field='aa')
index = Index().ascending('i')
class DocA(Document):
test_doc = DocumentField(TestDoc, required=False)
test_doc2 = DocumentField(TestDoc2, required=False)
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
return self.test_doc.int1 == other.test_doc.int1
def __repr__(self):
return 'DocA()'
# Tests
def test_setup():
document_type_registry.clear()
def get_session():
return Session.connect('unit-testing')
def test_default_config():
s = get_session()
s.clear_collection(T)
t = T(i=4)
s.save(t)
for x in s.query(T).filter(T.i == 4):
pass
@raises(BadFieldSpecification)
def test_default_sort_bad_name():
class Foo(Document):
config_default_sort = [('a', 1)]
try:
f = Foo()
except BadFieldSpecification as e:
assert 'resolve field' in str(e)
raise
@raises(BadFieldSpecification)
def test_default_sort_bad_dir():
class Foo(Document):
a = IntField()
config_default_sort = [('a', 3)]
try:
f = Foo()
except BadFieldSpecification as e:
assert 'sort direction' in str(e)
raise
assert False
def test_basic():
class Doc(Document):
count = IntField()
s = get_session()
d = Doc(count=0)
s.save(d)
assert d.mongo_id
def test_basic2():
class Doc(Document):
config_collection_name = 'DocCol'
count = IntField()
assert Doc.class_name() == 'Doc', Doc.class_name()
assert Doc.get_collection_name() == 'DocCol'
def test_mongo_id():
class Doc(Document):
i = IntField(_id=True)
assert Doc.mongo_id is None
def test_update_ops():
td = TestDoc(int1=1)
doca = DocA(test_doc=td)
assert doca.get_dirty_ops() == {
'$set' : { 'test_doc.int1' : 1 }
}, doca.get_dirty_ops()
class DocB(Document):
a = DocumentField(DocA)
b = IntField()
assert DocB(a=DocA()).get_dirty_ops() == {}
def test_delete_field():
class Doc(Document):
a = IntField()
b = IntField()
d = Doc()
d.a = 5
assert d.a == 5
del d.a
try:
b = d.a
assert False, 'delete attribute a failed'
except AttributeError:
pass
try:
del d.b
assert False, 'delete attribute b failed'
except AttributeError:
pass
def test_inheritance():
# classes
class InA(Document):
config_extra_fields = 'ignore'
config_polymorphic = 'type'
config_polymorphic_collection = True
a = IntField()
type = StringField()
class InB(InA):
# config_collection_name = 'InA'
config_polymorphic_identity = 'foo'
config_polymorphic = 'type2'
b = IntField()
type = StringField(default=config_polymorphic_identity)
class InC(InB):
# config_collection_name = 'InA'
config_polymorphic_identity = 'bar'
c = IntField()
type2 = StringField(default=config_polymorphic_identity)
# clear old data
s = get_session()
s.clear_collection(InA)
b = InC(a=5, b=1, c=0)
bb = InC(a=4, b=3, c=12)
s.save(b)
s.save(bb)
for obj in s.query(InA).all():
assert type(obj) == InC, type(obj)
def test_inheritance_queries():
# classes
class InA(Document):
config_extra_fields = 'ignore'
config_polymorphic = 'type'
config_polymorphic_collection = True
config_polymorphic_identity = 'ina'
type = StringField()
class InB(InA):
config_polymorphic_identity = 'inb'
type = StringField(default=config_polymorphic_identity)
class InC(InB):
config_polymorphic_identity = 'inc'
type = StringField(default=config_polymorphic_identity)
class InD(InC):
config_polymorphic_identity = 'ind'
type = StringField(default=config_polymorphic_identity)
class InB2(InB):
config_polymorphic_identity = 'b2'
type = StringField(default=config_polymorphic_identity)
s = get_session()
s.clear_collection(InA)
# clear old data
def check(expr, value):
assert set(expr.query['type']['$in']) == set(value), expr.query
check(s.query(InA), ['ind', 'b2', 'inb', 'inc', 'ina'])
check(s.query(InB), ['ind', 'b2', 'inb', 'inc'])
check(s.query(InC), ['ind', 'inc'])
check(s.query(InD), ['ind'])
check(s.query(InB2), ['b2'])
# exclude subclasses
assert s.query(InA, exclude_subclasses=True).query['type'] == 'ina'
assert s.query(InB, exclude_subclasses=True).query['type'] == 'inb'
assert s.query(InC, exclude_subclasses=True).query['type'] == 'inc'
assert s.query(InD, exclude_subclasses=True).query['type'] == 'ind'
assert s.query(InB2, exclude_subclasses=True).query['type'] == 'b2'
def test_exclude_with_normal_class():
class PolyDoc(Document):
config_polymorphic = True
s = get_session()
s.clear_collection(PolyDoc)
assert s.query(PolyDoc, exclude_subclasses=True).query == {}
@raises(DocumentException)
def bad_extra_fields_param_test():
class BadDoc(Document):
config_extra_fields = 'blah'
def extra_fields_test():
class BadDoc(Document):
config_extra_fields = 'ignore'
doc_with_extra = {'foo' : [1]}
unwrapped = BadDoc.unwrap(doc_with_extra)
assert unwrapped.get_extra_fields() == doc_with_extra
assert BadDoc.wrap(unwrapped) == doc_with_extra
@raises(MissingValueException)
def test_required_fields():
class Doc(Document):
i = IntField()
Doc().wrap()
@raises(AttributeError)
def test_missing_fields():
class Doc(Document):
i = IntField(required=False)
Doc().i
def test_non_existant_field():
class Doc(Document):
i = IntField(required=False)
Doc().j = 5
def test_default_value():
class Doc(Document):
i = IntField(required=False, default=1)
assert Doc().i == 1
@raises(Exception)
def bad_field_test():
s = get_session()
s.clear_collection(TestDoc)
t = TestDoc(int1=1, str4='sdasa')
def loading_test():
s = get_session()
s.clear_collection(TestDoc)
t = TestDoc(int1=123431)
s.save(t)
for td in s.query(TestDoc):
break
assert td.int1 == t.int1
def docfield_not_dirty_test():
class SuperDoc(Document):
int1 = IntField()
sub = DocumentField(TestDoc)
s = get_session()
s.clear_collection(TestDoc, SuperDoc)
doc = TestDoc(int1=3)
sup = SuperDoc(int1=4, sub=doc)
s.save(sup)
s.update(sup)
def docfield_test():
class SuperDoc(Document):
int1 = IntField()
sub = DocumentField(TestDoc)
s = get_session()
s.clear_collection(TestDoc, SuperDoc)
doc = TestDoc(int1=3)
sup = SuperDoc(int1=4, sub=doc)
s.save(sup)
for sd in s.query(SuperDoc):
break
assert sd.int1 == sup.int1
assert sd.sub.int1 == doc.int1
def test_non_ma_property_attribute_error():
''' At the time of writing, it was possble for a normal class field to be
treated as a MA one. If the instance's field raised an attribute
error we would try to access the "required" attribute of the class
level field. This attribute only exists on MA Field instances,
though '''
class Doc(Document):
i = IntField()
@property
def foo(self):
raise AttributeError()
x = Doc(i=2)
assert Doc.unwrap(x.wrap()).i == x.i
def test_doc_field_with_alternate_name():
class Doc(Document):
i = IntField(db_field='ii')
def __eq__(self, other):
return self.i == other.i
d = Doc(i=3)
wrapped = d.wrap()
assert wrapped == {'ii' : 3}
assert d == Doc.unwrap({'ii' : 3})
def test_doc_field():
sd = TestDoc(int1=0)
doca = DocA(test_doc=sd)
wrapped = doca.wrap()
unwrapped = DocA.unwrap(wrapped)
assert unwrapped == doca
assert DocA.test_doc.is_valid_unwrap(wrapped)
@raises(BadValueException)
def wrong_wrap_type_test():
doc1 = TestDoc(int1=0)
doc2 = TestDoc2(sfield='a')
doca = DocA(test_doc=doc2)
doca.wrap()
@raises(BadValueException)
def wrong_wrap_type_test2():
doc2 = TestDoc2(sfield=1) # this is an invalid value
doca = DocA(test_doc2=doc2)
doca.wrap()
# def is_valid_unwrap_test_true():
# assert DocA.test_doc.is_valid_unwrap({ 'int1' : 1 }) == True
# def is_valid_unwrap_test_false():
# assert DocA.test_doc2.is_valid_unwrap({ 'int1' : 1 }) == False
@raises(ExtraValueException)
def wrong_unwrap_type_test():
DocA.unwrap({ 'test_doc2' : { 'int1' : 1 }, 'testdoc' : {'int1' : 1 } })
@raises(MissingValueException)
def test_upsert_with_required():
class D(Document):
a = IntField()
c = IntField()
b = IntField(required=False)
s = get_session()
s.clear_collection(D)
s.update(D(b=4, c=4), id_expression=D.b == 4, upsert=True)
def test_upsert_with_no_changes():
class D(Document):
a = IntField()
c = IntField()
b = IntField(required=False)
s = get_session()
s.clear_collection(D)
s.update(D(a=1, b=4, c=4), id_expression=D.b == 4, upsert=True)
d = s.query(D).one()
s.update(d, upsert=True)
def test_deepcopy():
import copy
a = TestDoc(int1=4)
b = copy.deepcopy(a)
assert id(a) != id(b)
assert a.int1 == b.int1
def test_default_eq():
a = TestDoc(int1=4)
b = TestDoc(int1=4)
assert not (a == b)
a.mongo_id = ObjectId()
b.mongo_id = ObjectId()
assert a != b
b.mongo_id = a.mongo_id
assert a == b
def test_unwrapped_is_not_dirty():
class D(Document):
a = IntField()
s = get_session()
s.clear_collection(D)
s.save(D(a=1))
d = s.query(D).one()
assert len(d.get_dirty_ops()) == 0, d.get_dirty_ops()
def test_update_with_unset():
class D(Document, DictDoc):
a = IntField()
c = IntField()
b = IntField(required=False)
s = get_session()
s.clear_collection(D)
d = D(a=1, b=4, c=4)
s.update(d, id_expression=D.b == 4, upsert=True)
d = s.query(D).one()
del d.c
s.update(d)
d = s.query(D).one()
assert 'c' not in d
@with_setup(test_setup)
def test_self_reference():
class D(Document):
d = DocumentField('D', required=False)
a = IntField()
d = D(d=D(a=5), a=4)
assert d.wrap() == { 'd' : { 'a' : 5 }, 'a' : 4 }
s = get_session()
s.clear_collection(D)
s.save(d)
d_from_db = s.query(D).one()
assert d_from_db.d.a == d.d.a
@with_setup(test_setup)
def test_config_full_name():
class E(Document):
d = DocumentField('ma.D')
class D(Document):
config_full_name = 'ma.D'
a = IntField()
e = E(d=D(a=4))
assert e.d.a == 4
@with_setup(test_setup)
def test_config_in_list():
class E(Document):
d = ListField(ListField(DocumentField('ma.D')))
class D(Document):
config_full_name = 'ma.D'
a = IntField()
e = E(d=[[D(a=4)]])
assert e.wrap() == { 'd' : [[{'a':4}]] }
@with_setup(test_setup)
@raises(BadFieldSpecification)
def test_bad_string_doc():
class D(Document):
e = DocumentField('E')
D(e=5).wrap()
@with_setup(test_setup)
@raises(BadFieldSpecification)
def test_namespaces_disabled():
class D(Document):
config_namespace = None
e = DocumentField('E')
D(e=5).wrap()
@with_setup(test_setup)
def test_set_parent_on_subtypes():
class D(Document):
a = IntField()
class ParentDoc(Document):
t = TupleField(DocumentField('D'))
e = EnumField(DocumentField('D'), D(a=1))
d = DictField(DocumentField('D'))
# test DictDoc
def test_dictdoc_contains():
t = T(i=1, retrieved_fields=[T.i, T.j])
assert 'i' in t
assert 'j' not in t
assert 's' not in t
assert 'noexist' not in t
assert t['i'] == 1
def test_dictdoc_set():
t = T(i=1, retrieved_fields=[T.i, T.j])
assert 'i' in t
t['i'] = 4
assert t.i == 4
def test_dictdoc_setdefault():
t = T(i=1, retrieved_fields=[T.i, T.j])
assert t.setdefault('i', 4) == 1
assert t.setdefault('j', 3) == 3
def test_set_dict_field():
class TestDict(Document):
data = DictField(AnythingField())
s = Session.connect('ommongo-unit-testing')
s.clear_collection(TestDict)
td = TestDict()
td.data = {"foo": "bar", "baz": "qux"}
s.save(td)
td = s.query(TestDict).one()
td.data = {"foo": "bar"}
s.save(td)
td = s.query(TestDict).one()
assert td.data == {'foo':'bar'}, td.data
|
{
"content_hash": "eacadca113006b21de27ac74a41feb20",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 77,
"avg_line_length": 25.466793168880454,
"alnum_prop": 0.5977199910587885,
"repo_name": "bapakode/OmMongo",
"id": "4d257db1f79c01fbc2c1a65012eaa364619195f9",
"size": "13421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_documents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "270963"
}
],
"symlink_target": ""
}
|
"""
Report Table Widget
This creates a composite widget designed to display tabular data.
"""
import Tkinter as tk
import tkFont
import ttk
# ZIH - temp
style = ttk.Style()
#style.configure( '.', relief = 'flat', borderwidth = 0 )
#style.layout( 'Treeview', [ ( 'Treeview.treearea', { 'sticky' : 'nswe' } ) ] )
#=============================================================================
class Data( ttk.Treeview ):
"""
Models the data displayed by a report.
"""
#=========================================================================
def __init__( self, master, **kwargs ):
"""
Initializes a Data object.
"""
# initialize the parent frame
apply( ttk.Treeview.__init__, ( self, master ), kwargs )
# ZIH - left off setting up columns/headings here
#=========================================================================
def append( self, record ):
"""
Appends a record to the report.
"""
self.insert(
'', # empty string signifies root item ID
'end', # "end" = insertion at end of list
text = record[ 0 ], # item text label
values = record[ 1 : ] # additional text labels in columns
)
#=============================================================================
class Report( ttk.Frame ):
"""
Report container widget
The report widget is made up of several internal widgets that use a grid
for layout control.
The "data" widget is used to manage and display the data in the report.
There are two scrollbar widgets that are connected to the data widget.
"""
#=========================================================================
def __init__( self, master, **kwargs ):
"""
Initializes a Report object.
"""
# initialize the parent frame
apply( ttk.Frame.__init__, ( self, master ), kwargs )
# set up resizable layout
self.columnconfigure( 0, weight = 1 )
self.rowconfigure( 0, weight = 1 )
# create the data representation widget
self.data = ReportData( self )
# add the data widget to the report
self.data.grid(
row = 0,
column = 0,
sticky = ( tk.N + tk.S + tk.E + tk.W )
)
# add a vertical scrollbar to the report widget
self.vertical_scrollbar = ttk.Scrollbar(
self,
orient = 'vertical',
command = self.data.yview
)
# add a horizontal scrollbar to the report widget
self.horizontal_scrollbar = ttk.Scrollbar(
self,
orient = 'horizontal',
command = self.data.xview
)
# attach the scrollbar inputs to the data widget's outputs
self.data.configure(
yscrollcommand = self.vertical_scrollbar.set,
xscrollcommand = self.horizontal_scrollbar.set
)
# lay out the scrollbars
self.vertical_scrollbar.grid(
row = 0,
column = 1,
sticky = ( tk.N + tk.S )
)
self.horizontal_scrollbar.grid(
row = 1,
column = 0,
sticky = ( tk.E + tk.W )
)
#=============================================================================
class ReportColumn( object ):
"""
Models a column in the report. There is no visual component to this,
it's only used to handle data and utility methods.
"""
#=========================================================================
def __init__( self, report, label = '', name = None ):
"""
Initializes a ReportColumn object.
"""
self.report = report
self.label = label
self.font = tkFont.Font()
self.width = 0
if name is None:
self.name = label
else:
self.name = name
report.heading( self.name, text = self.label )
## ZIH - temp - add this to heading setup
#, command = functools.partial( self._cmd_sort, c )
self.fitWidth()
#=========================================================================
def fitWidth( self, text = None ):
## ZIH -
# font measurement is off. error aggregates with more characters.
# short strings (2-ish characters) measure smaller than rendered.
# long strings (10+ characters) measure larger than rendered.
# consider implementing a curved adjustment to sizes based on
# number of characters (piecewise linear curve)
# a global "ui scale" factor could be added to the user's config
# that would multiply the end measurement for odd situations
## ZIH - suspect Tkinter isn't actually measuring crap... it's
# probably making up some generic em width, and multiplying by the
# length of the string
if text is None:
text_size = self.font.measure( self.label )
self.report.column( self.name, minwidth = ( self.width + 5 ) )
else:
text_size = self.font.measure( text )
if self.width < text_size:
self.width = text_size
self.report.column( self.name, width = ( self.width + 5 ) )
#=============================================================================
class ReportData( ttk.Treeview ):
"""
Represents the data contained in a report.
"""
#=========================================================================
def __init__( self, master, **kwargs ):
"""
Initializes a ReportData object.
"""
# initialize the parent Treeview widget
apply( ttk.Treeview.__init__, ( self, master ), kwargs )
# start the widget with a minimum height
self.configure( height = 20 )
# create storage for the report columns
self._columns = []
#=========================================================================
def append( self, record ):
"""
Appends a record to the report.
"""
# insert the record in the tree view
self.insert( '', 'end', text = record[ 0 ], values = record[ 1 : ] )
# check for changes in column sizes
num_fields = len( record )
for i in range( num_fields ):
self._columns[ i ].fitWidth( record[ i ] )
#=========================================================================
def clear( self ):
"""
Removes all records from the report.
"""
for record_id in self.get_children():
self.delete( record_id )
#=========================================================================
def set_columns( self, columns ):
"""
Sets the column names for the report.
"""
# delete all the existing column handling objects
num_columns = len( self._columns )
for i in range( num_columns ):
c = self._columns.pop()
del c
self._columns = []
# count the total number of columns to display
num_columns = len( columns )
# build reproducable column names for future reference
names = [ 'col_%02d' % c for c in range( num_columns ) ]
# set the special column name (always refers to the first column)
names[ 0 ] = '#0'
# reassign the "extra" columns list in the control
self[ 'columns' ] = tuple( names[ 1 : ] )
# set up each column's display/behavior in the control
for c in range( num_columns ):
# create a column handling object (performs widget setup)
self._columns.append(
ReportColumn( self, label = columns[ c ], name = names[ c ] )
)
#=========================================================================
def _cmd_sort( self, column_index ):
## ZIH - temp
print 'Hey! Sort by column %d' % column_index
#=============================================================================
if __name__ == "__main__":
# dependencies only needed for testing
import sys
import util
# some test data for testing reports
test_data = [
[ 0, 'Baker', 200 ],
[ 1, 'Charlie', 100 ],
[ 2, 'Adam', 42 ]
]
# create a simple test application
app = util.TestApp()
app.master.title( 'Report Test Application' )
app.master.geometry( '480x320' )
# create a bare data listing widget
bare_data = util.create_wrapped( Data, app, row = 0 )
# put data in the widget
for r in test_data:
bare_data.append( r )
# create a report, and set up the widget layout
#rep = util.create_wrapped( Report, app, row = 1 )
# put data in the report
#rep.data.set_columns( [ 'A', 'B', 'C' ] )
#for r in test_data:
# rep.data.append( r )
# run the test application
app.mainloop()
# return to the shell
sys.exit( 0 )
|
{
"content_hash": "1fe810f4d4fd297f5a3ab5f8c632d4eb",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 79,
"avg_line_length": 30.827118644067795,
"alnum_prop": 0.48361557070595995,
"repo_name": "zhester/hzpy",
"id": "c1b09142d58469fdea63953d9f7dec5e6e9e538e",
"size": "9118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/hztk/report.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "37"
},
{
"name": "CSS",
"bytes": "55"
},
{
"name": "HTML",
"bytes": "309"
},
{
"name": "JavaScript",
"bytes": "109"
},
{
"name": "Python",
"bytes": "484663"
}
],
"symlink_target": ""
}
|
"""
A utility script that purges st2 executions older than certain
timestamp.
*** RISK RISK RISK. You will lose data. Run at your own risk. ***
"""
from __future__ import absolute_import
from datetime import datetime
import six
import pytz
from oslo_config import cfg
from st2common import config
from st2common import log as logging
from st2common.config import do_register_cli_opts
from st2common.script_setup import setup as common_setup
from st2common.script_setup import teardown as common_teardown
from st2common.constants.exit_codes import SUCCESS_EXIT_CODE
from st2common.constants.exit_codes import FAILURE_EXIT_CODE
from st2common.garbage_collection.executions import purge_executions
__all__ = ["main"]
LOG = logging.getLogger(__name__)
def _register_cli_opts():
cli_opts = [
cfg.StrOpt(
"timestamp",
default=None,
help="Will delete execution and liveaction models older than "
+ "this UTC timestamp. "
+ "Example value: 2015-03-13T19:01:27.255542Z.",
),
cfg.StrOpt(
"action-ref", default="", help="action-ref to delete executions for."
),
cfg.BoolOpt(
"purge-incomplete",
default=False,
help="Purge all models irrespective of their ``status``."
+ 'By default, only executions in completed states such as "succeeeded" '
+ ', "failed", "canceled" and "timed_out" are deleted.',
),
]
do_register_cli_opts(cli_opts)
def main():
_register_cli_opts()
common_setup(config=config, setup_db=True, register_mq_exchanges=False)
# Get config values
timestamp = cfg.CONF.timestamp
action_ref = cfg.CONF.action_ref
purge_incomplete = cfg.CONF.purge_incomplete
if not timestamp:
LOG.error("Please supply a timestamp for purging models. Aborting.")
return 1
else:
timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
timestamp = timestamp.replace(tzinfo=pytz.UTC)
try:
purge_executions(
logger=LOG,
timestamp=timestamp,
action_ref=action_ref,
purge_incomplete=purge_incomplete,
)
except Exception as e:
LOG.exception(six.text_type(e))
return FAILURE_EXIT_CODE
finally:
common_teardown()
return SUCCESS_EXIT_CODE
|
{
"content_hash": "cc51ac27cbfc129a4f42ec494535d4e0",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 85,
"avg_line_length": 29.21951219512195,
"alnum_prop": 0.6456594323873122,
"repo_name": "StackStorm/st2",
"id": "27225d661c96dc838a8422966e7244a1e4c72f8a",
"size": "3025",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2common/st2common/cmd/purge_executions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
from neutron.api.v2 import attributes as attr
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import portsecurity as psec
from neutron import manager
from neutron.tests.unit import test_db_plugin
from oslo.config import cfg
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_allowedaddresspairs.'
'AllowedAddressPairTestPlugin')
class AllowedAddressPairTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
super(AllowedAddressPairTestCase, self).setUp(plugin)
# Check if a plugin supports security groups
plugin_obj = manager.NeutronManager.get_plugin()
self._skip_port_security = ('port-security' not in
plugin_obj.supported_extension_aliases)
class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin,
db_base_plugin_v2.NeutronDbPluginV2,
addr_pair_db.AllowedAddressPairsMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with port security and allowed address pairs.
"""
supported_extension_aliases = ["allowed-address-pairs"]
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
neutron_db = super(AllowedAddressPairTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)):
self._process_create_allowed_address_pairs(
context, p,
p[addr_pair.ADDRESS_PAIRS])
else:
p[addr_pair.ADDRESS_PAIRS] = None
return port['port']
def update_port(self, context, id, port):
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(AllowedAddressPairTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
if (delete_addr_pairs or has_addr_pairs):
# delete address pairds and readd them
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port,
ret_port[addr_pair.ADDRESS_PAIRS])
return ret_port
class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
super(AllowedAddressPairDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestAllowedAddressPairs(AllowedAddressPairDBTestCase):
def test_create_port_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_true_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_false_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=False,
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_port_bad_mac(self):
address_pairs = [{'mac_address': 'invalid_mac',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_bad_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1222'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_missing_ip_field(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_duplicate_mac_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_more_than_max_allowed_address_pair(self):
cfg.CONF.set_default('max_allowed_address_pair', 3)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:02',
'ip_address': '10.0.0.2'},
{'mac_address': '00:00:00:00:00:03',
'ip_address': '10.0.0.3'},
{'mac_address': '00:00:00:00:00:04',
'ip_address': '10.0.0.4'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_equal_to_max_allowed_address_pair(self):
cfg.CONF.set_default('max_allowed_address_pair', 3)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:02',
'ip_address': '10.0.0.2'},
{'mac_address': '00:00:00:00:00:03',
'ip_address': '10.0.0.3'}]
self._create_port_with_address_pairs(address_pairs, 201)
def test_add_address_pairs_on_baned_net(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
with self.network() as network:
network_id = network['network']['id']
cfg.CONF.set_override('limit_network_for_pair',
network_id,
'unitedstack')
res = self._create_port(self.fmt, network_id)
port = self.deserialize(self.fmt, res)
address_pairs = [{'ip_address': '10.0.0.1'}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
self._delete('ports', port['port']['id'])
def test_create_overlap_with_fixed_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.2'}]
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
res = self._create_port(self.fmt, network['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,
'fixed_ips'),
allowed_address_pairs=address_pairs,
fixed_ips=fixed_ips)
self.assertEqual(res.status_int, 201)
port = self.deserialize(self.fmt, res)
self._delete('ports', port['port']['id'])
def test_create_port_extra_args(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1',
'icbb': 'agreed'}]
self._create_port_with_address_pairs(address_pairs, 400)
def _create_port_with_address_pairs(self, address_pairs, ret_code):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, ret_code)
if ret_code == 201:
self._delete('ports', port['port']['id'])
def test_update_add_address_pairs(self):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_update_add_none_and_own_mac_address_pairs(self):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)
mac_address = port['port']['mac_address']
address_pairs = [{'ip_address': '10.0.0.1'},
{'mac_address': mac_address,
'ip_address': '10.0.0.1'}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
self._delete('ports', port['port']['id'])
def test_create_address_gets_port_mac(self):
with self.network() as net:
address_pairs = [{'ip_address': '23.23.23.23'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)['port']
port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address']
self.assertEqual(port_addr_mac,
port['mac_address'])
self._delete('ports', port['id'])
def test_update_port_security_off_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
with self.subnet(network=net):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {psec.PORTSECURITY: False}}
# If plugin implements security groups we also need to remove
# the security group on port.
plugin_obj = manager.NeutronManager.get_plugin()
if 'security-groups' in plugin_obj.supported_extension_aliases:
update_port['port']['security_groups'] = []
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
self._delete('ports', port['port']['id'])
def test_create_port_remove_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {addr_pair.ADDRESS_PAIRS: []}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], [])
self._delete('ports', port['port']['id'])
class TestAllowedAddressPairsXML(TestAllowedAddressPairs):
fmt = 'xml'
|
{
"content_hash": "f26847068c68ca699b20aae779fa1028",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 79,
"avg_line_length": 49.46254071661238,
"alnum_prop": 0.5193282844912743,
"repo_name": "CingHu/neutron-ustack",
"id": "c5e599a057075f11b7e265b794a1df18bc7cc483",
"size": "15777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_extension_allowedaddresspairs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "11544804"
},
{
"name": "Shell",
"bytes": "29485"
}
],
"symlink_target": ""
}
|
"""PollSubmissions API Tests for Version 1.0.
This is a testing template for the generated PollSubmissionsAPI Class.
"""
import unittest
import requests
import secrets
from pycanvas.apis.poll_submissions import PollSubmissionsAPI
from pycanvas.apis.poll_submissions import Pollsubmission
class TestPollSubmissionsAPI(unittest.TestCase):
"""Tests for the PollSubmissionsAPI."""
def setUp(self):
self.client = PollSubmissionsAPI(secrets.instance_address, secrets.access_token)
def test_get_single_poll_submission(self):
"""Integration test for the PollSubmissionsAPI.get_single_poll_submission method."""
poll_id = None # Change me!!
poll_session_id = None # Change me!!
id = None # Change me!!
r = self.client.get_single_poll_submission(id, poll_id, poll_session_id)
def test_create_single_poll_submission(self):
"""Integration test for the PollSubmissionsAPI.create_single_poll_submission method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
|
{
"content_hash": "9ac166b8534584ee98d663e607fc9ac1",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 126,
"avg_line_length": 38.43333333333333,
"alnum_prop": 0.7085862966175195,
"repo_name": "PGower/PyCanvas",
"id": "03842a9b470943463d1cea893a56ce11ab49ee7d",
"size": "1153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycanvas/tests/poll_submissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1877053"
}
],
"symlink_target": ""
}
|
import os
from ingenico.connect.sdk.factory import Factory
from ingenico.connect.sdk.domain.sessions.session_request import SessionRequest
class CreateSessionExample(object):
def example(self):
with self.__get_client() as client:
tokens = []
tokens.append("126166b16ed04b3ab85fb06da1d7a167")
tokens.append("226166b16ed04b3ab85fb06da1d7a167")
tokens.append("122c5b4d-dd40-49f0-b7c9-3594212167a9")
tokens.append("326166b16ed04b3ab85fb06da1d7a167")
tokens.append("426166b16ed04b3ab85fb06da1d7a167")
body = SessionRequest()
body.tokens = tokens
response = client.merchant("merchantId").sessions().create(body)
def __get_client(self):
api_key_id = os.getenv("connect.api.apiKeyId", "someKey")
secret_api_key = os.getenv("connect.api.secretApiKey", "someSecret")
configuration_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../example_configuration.ini'))
return Factory.create_client_from_file(configuration_file_name=configuration_file_name,
api_key_id=api_key_id, secret_api_key=secret_api_key)
|
{
"content_hash": "e2e4cf8d0554fc5a5664d01afdd6e787",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 100,
"avg_line_length": 44.48275862068966,
"alnum_prop": 0.6263565891472869,
"repo_name": "Ingenico-ePayments/connect-sdk-python3",
"id": "ca6b66cc2c223a3e7c9da910c57eb4f97f1f47ab",
"size": "1417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/merchant/sessions/create_session_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1735057"
}
],
"symlink_target": ""
}
|
"""The tests for the image_processing component."""
from unittest.mock import patch, PropertyMock
from homeassistant.core import callback
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.setup import setup_component
from homeassistant.exceptions import HomeAssistantError
import homeassistant.components.http as http
import homeassistant.components.image_processing as ip
from tests.common import (
get_test_home_assistant, get_test_instance_port, assert_setup_component)
from tests.components.image_processing import common
class TestSetupImageProcessing:
"""Test class for setup image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Set up demo platform on image_process component."""
config = {
ip.DOMAIN: {
'platform': 'demo'
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
def test_setup_component_with_service(self):
"""Set up demo platform on image_process component test service."""
config = {
ip.DOMAIN: {
'platform': 'demo'
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
assert self.hass.services.has_service(ip.DOMAIN, 'scan')
class TestImageProcessing:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
setup_component(
self.hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: get_test_instance_port()}})
config = {
ip.DOMAIN: {
'platform': 'test'
},
'camera': {
'platform': 'demo'
},
}
setup_component(self.hass, ip.DOMAIN, config)
state = self.hass.states.get('camera.demo_camera')
self.url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.camera.demo.DemoCamera.camera_image',
autospec=True, return_value=b'Test')
def test_get_image_from_camera(self, mock_camera):
"""Grab an image from camera entity."""
self.hass.start()
common.scan(self.hass, entity_id='image_processing.test')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.test')
assert mock_camera.called
assert state.state == '1'
assert state.attributes['image'] == b'Test'
@patch('homeassistant.components.camera.async_get_image',
side_effect=HomeAssistantError())
def test_get_image_without_exists_camera(self, mock_image):
"""Try to get image without exists camera."""
self.hass.states.remove('camera.demo_camera')
common.scan(self.hass, entity_id='image_processing.test')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.test')
assert mock_image.called
assert state.state == '0'
class TestImageProcessingAlpr:
"""Test class for alpr image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
'platform': 'demo'
},
'camera': {
'platform': 'demo'
},
}
with patch('homeassistant.components.image_processing.demo.'
'DemoImageProcessingAlpr.should_poll',
new_callable=PropertyMock(return_value=False)):
setup_component(self.hass, ip.DOMAIN, config)
state = self.hass.states.get('camera.demo_camera')
self.url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
self.alpr_events = []
@callback
def mock_alpr_event(event):
"""Mock event."""
self.alpr_events.append(event)
self.hass.bus.listen('image_processing.found_plate', mock_alpr_event)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_alpr_event_single_call(self, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(self.url, content=b'image')
common.scan(self.hass, entity_id='image_processing.demo_alpr')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_alpr')
assert len(self.alpr_events) == 4
assert state.state == 'AC3829'
event_data = [event.data for event in self.alpr_events if
event.data.get('plate') == 'AC3829']
assert len(event_data) == 1
assert event_data[0]['plate'] == 'AC3829'
assert event_data[0]['confidence'] == 98.3
assert event_data[0]['entity_id'] == 'image_processing.demo_alpr'
def test_alpr_event_double_call(self, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(self.url, content=b'image')
common.scan(self.hass, entity_id='image_processing.demo_alpr')
common.scan(self.hass, entity_id='image_processing.demo_alpr')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_alpr')
assert len(self.alpr_events) == 4
assert state.state == 'AC3829'
event_data = [event.data for event in self.alpr_events if
event.data.get('plate') == 'AC3829']
assert len(event_data) == 1
assert event_data[0]['plate'] == 'AC3829'
assert event_data[0]['confidence'] == 98.3
assert event_data[0]['entity_id'] == 'image_processing.demo_alpr'
@patch('homeassistant.components.image_processing.demo.'
'DemoImageProcessingAlpr.confidence',
new_callable=PropertyMock(return_value=95))
def test_alpr_event_single_call_confidence(self, confidence_mock,
aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(self.url, content=b'image')
common.scan(self.hass, entity_id='image_processing.demo_alpr')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_alpr')
assert len(self.alpr_events) == 2
assert state.state == 'AC3829'
event_data = [event.data for event in self.alpr_events if
event.data.get('plate') == 'AC3829']
assert len(event_data) == 1
assert event_data[0]['plate'] == 'AC3829'
assert event_data[0]['confidence'] == 98.3
assert event_data[0]['entity_id'] == 'image_processing.demo_alpr'
class TestImageProcessingFace:
"""Test class for face image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
'platform': 'demo'
},
'camera': {
'platform': 'demo'
},
}
with patch('homeassistant.components.image_processing.demo.'
'DemoImageProcessingFace.should_poll',
new_callable=PropertyMock(return_value=False)):
setup_component(self.hass, ip.DOMAIN, config)
state = self.hass.states.get('camera.demo_camera')
self.url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
self.face_events = []
@callback
def mock_face_event(event):
"""Mock event."""
self.face_events.append(event)
self.hass.bus.listen('image_processing.detect_face', mock_face_event)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_face_event_call(self, aioclient_mock):
"""Set up and scan a picture and test faces from event."""
aioclient_mock.get(self.url, content=b'image')
common.scan(self.hass, entity_id='image_processing.demo_face')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_face')
assert len(self.face_events) == 2
assert state.state == 'Hans'
assert state.attributes['total_faces'] == 4
event_data = [event.data for event in self.face_events if
event.data.get('name') == 'Hans']
assert len(event_data) == 1
assert event_data[0]['name'] == 'Hans'
assert event_data[0]['confidence'] == 98.34
assert event_data[0]['gender'] == 'male'
assert event_data[0]['entity_id'] == \
'image_processing.demo_face'
@patch('homeassistant.components.image_processing.demo.'
'DemoImageProcessingFace.confidence',
new_callable=PropertyMock(return_value=None))
def test_face_event_call_no_confidence(self, mock_config, aioclient_mock):
"""Set up and scan a picture and test faces from event."""
aioclient_mock.get(self.url, content=b'image')
common.scan(self.hass, entity_id='image_processing.demo_face')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_face')
assert len(self.face_events) == 3
assert state.state == '4'
assert state.attributes['total_faces'] == 4
event_data = [event.data for event in self.face_events if
event.data.get('name') == 'Hans']
assert len(event_data) == 1
assert event_data[0]['name'] == 'Hans'
assert event_data[0]['confidence'] == 98.34
assert event_data[0]['gender'] == 'male'
assert event_data[0]['entity_id'] == \
'image_processing.demo_face'
|
{
"content_hash": "2c127eba1add201e6e4f9e74bd8b44e3",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 78,
"avg_line_length": 35.11036789297659,
"alnum_prop": 0.5969708515907792,
"repo_name": "nugget/home-assistant",
"id": "7a31b2ffadfc4a858834ff197edefe9288fc5f21",
"size": "10498",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/image_processing/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14492390"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17526"
}
],
"symlink_target": ""
}
|
import datetime
import logging
import requests
from .exceptions import RequestsError, InputError
logging.basicConfig()
class Connection(object):
BASE_URL = 'http://192.168.9.1'
def __init__(self):
self._session = requests.Session()
self._set_headers()
def get(self, params):
logging.getLogger(__name__).debug('calling post')
logging.getLogger(__name__).debug(params)
self._validate_dict(params)
params.update(self._extra_params())
request = requests.Request('GET', self._get_url(), params=params)
return self._handle_request(request)
def post(self, data):
logging.getLogger(__name__).debug('calling get')
logging.getLogger(__name__).debug(data)
self._validate_dict(data)
data.update(self._extra_params())
request = requests.Request('POST', self._post_url(), data=data)
return self._handle_request(request)
def _get_url(self):
logging.getLogger(__name__).debug('calling _get_url')
return self.BASE_URL + '/goform/goform_get_cmd_process'
def _post_url(self):
logging.getLogger(__name__).debug('calling _post_url')
return self.BASE_URL + '/goform/goform_set_cmd_process'
def _handle_request(self, request):
logging.getLogger(__name__).debug('calling handle_request')
try:
prepared_request = self._session.prepare_request(request)
logging.getLogger(__name__).debug(
'Request %s', prepared_request.url)
logging.getLogger(__name__).debug(
'Request Method: %s', prepared_request.method)
logging.getLogger(__name__).debug(
'Request Headers: %s', prepared_request.headers)
logging.getLogger(__name__).debug(
'Request Body: %s', prepared_request.body)
response = self._session.send(
prepared_request, stream=True, timeout=(3.05, 120))
response.raise_for_status()
logging.getLogger(__name__).debug('Response %s', response.text)
logging.getLogger(__name__).debug(
'Response Status %s', response.status_code)
logging.getLogger(__name__).debug(
'Response Headers %s', response.headers)
logging.getLogger(__name__).debug(
'Response Cookies %s', response.cookies.items())
return self._decode_json(response)
except requests.RequestException as ex:
logging.getLogger(__name__).error(ex)
raise RequestsError(ex)
def _set_headers(self):
logging.getLogger(__name__).debug('calling _set_headers')
self._session.headers.update({
'Accept': 'application/json, text/javascript, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': self.BASE_URL + '/home.htm',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-GB,en-US;q=0.8,en;q=0.6',
})
@staticmethod
def _decode_json(response):
logging.getLogger(__name__).debug('calling _decode_json')
try:
return response.json()
except ValueError as ex:
# Incorrectly escaped json is returned in some instances,
# so it must be handled gracefully
logging.getLogger(__name__).debug(ex)
return response.text
@staticmethod
def _extra_params():
logging.getLogger(__name__).debug('calling _extra_params')
delta = datetime.datetime.now() - datetime.datetime(1970, 1, 1)
milliseconds = delta.total_seconds() * 1000
params = {
'_': '{0:13.0f}'.format(milliseconds),
}
return params
@staticmethod
def _validate_dict(params_or_data):
logging.getLogger(__name__).debug('calling _validate_dict')
logging.getLogger(__name__).debug(params_or_data)
if params_or_data is None or isinstance(params_or_data, dict) is False:
raise InputError('Invalid parameters',
params_or_data, type(params_or_data))
for key, val in params_or_data.items():
if key is None or isinstance(key, str) is False or len(key) == 0:
raise InputError('Invalid parameter key', key, val)
if val is None or isinstance(val, str) is False:
raise InputError('Invalid parameter value', key, val)
DEFAULT_CONNECTION = Connection()
def get(params):
return DEFAULT_CONNECTION.get(params)
def post(params):
return DEFAULT_CONNECTION.post(params)
|
{
"content_hash": "90e2fde28c767b910219d26129a7296b",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 36.51968503937008,
"alnum_prop": 0.5937904269081501,
"repo_name": "alzeih/python-vodem-vodafone-K4607-Z",
"id": "1d64d231ac0b7e5c0d52dac8852a57be724084d4",
"size": "4638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vodem/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19346"
},
{
"name": "JavaScript",
"bytes": "444689"
},
{
"name": "Python",
"bytes": "84811"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
urlpatterns = [
url(r'^$', 'schedules.views.index', name='index'),
url(r'^home$', 'schedules.views.home', name='home'),
url(r'^login$', 'schedules.views.log_in', name='login'),
url(r'^logout$', "schedules.views.log_out", name="logout"),
url(r'^query-users$', 'schedules.views.query_users'),
url(r'^project/create$', 'schedules.views.create_project'),
url(r'^project/s/(?P<project_slug>.+?)/all-members', 'schedules.views.query_project_members'),
url(r'^project/s/(?P<project_slug>.+?)/remove-member', 'schedules.views.query_remove_member'),
url(r'^project/s/(?P<project_slug>.+?)/add-member', 'schedules.views.add_member'),
url(r'^project/s/(?P<project_slug>.+?)/submit$', 'schedules.views.submit_schedule'),
url(r'^project/s/(?P<project_slug>.+?)/home$', 'schedules.views.project'),
]
|
{
"content_hash": "250e45abca3c4a3c8c67b40d8e2544b1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 98,
"avg_line_length": 50.94117647058823,
"alnum_prop": 0.6466512702078522,
"repo_name": "Shelo/Schedules",
"id": "6468fe4aa0af8a24a8abd258e5b56561e1cbc1b4",
"size": "866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "schedules/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20340"
},
{
"name": "HTML",
"bytes": "341641"
},
{
"name": "JavaScript",
"bytes": "2999"
},
{
"name": "Python",
"bytes": "17366"
}
],
"symlink_target": ""
}
|
from logging import Formatter
from logging.handlers import RotatingFileHandler
from celery.utils.log import logging
def sketchy_logger(app):
'''Logger configuration for Sketchy'''
file_handler = RotatingFileHandler(
app.config.get('SKETCHY_LOG_FILE'),
maxBytes=10000000,
backupCount=100)
file_handler.setFormatter(Formatter('%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.DEBUG)
|
{
"content_hash": "804c1702ebb54f2d4caaea57ee3ea3e7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 37.06666666666667,
"alnum_prop": 0.6654676258992805,
"repo_name": "Netflix/sketchy",
"id": "0913de0b5345eb789694e2d3e90765e8b53c1b68",
"size": "1173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sketchy/loggers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "238224"
},
{
"name": "Python",
"bytes": "59985"
},
{
"name": "Shell",
"bytes": "3058"
}
],
"symlink_target": ""
}
|
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as cfg_fixture
from designate import exceptions
from designate.tests import fixtures
from designate.tests import TestCase
from designate.worker import processing
CONF = cfg.CONF
class TestProcessingExecutor(TestCase):
def setUp(self):
super(TestProcessingExecutor, self).setUp()
self.stdlog = fixtures.StandardLogging()
self.useFixture(cfg_fixture.Config(CONF))
self.useFixture(self.stdlog)
def test_default_executor(self):
CONF.set_override('threads', 100, 'service:worker')
executor = processing.default_executor()
self.assertEqual(100, executor._max_workers)
def test_execute_multiple_tasks(self):
def t1():
return 1
def t2():
return 2
tasks = [t1, t2, t1, t2, t1]
exe = processing.Executor()
results = exe.run(tasks)
self.assertEqual([1, 2, 1, 2, 1], results)
def test_execute_single_task(self):
def t1():
return 1
exe = processing.Executor()
results = exe.run(t1)
self.assertEqual(1, results[0])
def test_execute_bad_task(self):
def failed_task():
raise exceptions.BadAction('Not Great')
exe = processing.Executor()
results = exe.run(failed_task)
self.assertIsNone(results[0])
self.assertIn('Not Great', self.stdlog.logger.output)
def test_executor_name_with_task(self):
mock_task = mock.NonCallableMock(spec_set=[
'task_name',
])
mock_task.task_name = 'task_name'
exe = processing.Executor()
self.assertEqual('task_name', exe.task_name(mock_task))
def test_executor_name_with_func(self):
mock_task = mock.NonCallableMock(spec_set=[
'func_name',
])
mock_task.func_name = 'func_name'
exe = processing.Executor()
self.assertEqual('func_name', exe.task_name(mock_task))
|
{
"content_hash": "3462d9a8b12be3c2d49fb6aff06da2ca",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 63,
"avg_line_length": 26.128205128205128,
"alnum_prop": 0.6211972522080471,
"repo_name": "openstack/designate",
"id": "5229b1573e574aec33ccde3a4af79116e6322e6e",
"size": "2684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/tests/unit/workers/test_processing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "71074"
},
{
"name": "Jinja",
"bytes": "2004"
},
{
"name": "Mako",
"bytes": "1012"
},
{
"name": "Python",
"bytes": "2442862"
},
{
"name": "Shell",
"bytes": "46200"
}
],
"symlink_target": ""
}
|
import pysal
from pysal.cg.standalone import get_shared_segments
import numpy as np
from collections import defaultdict
from itertools import combinations
import multiprocessing as mp
# delta to get buckets right
DELTA = 0.000001
QUEEN = 1
ROOK = 2
# constants for bucket sizes
BUCK_SM = 8
BUCK_LG = 80
SHP_SMALL = 1000
def bbcommon(bb, bbother):
"""
Checks for overlaps of bounding boxes. First, east-west, then north-south.
Element 0 is west, element 2 is east, element 1 is north?, element 3 is
south?
All four checks must be false for chflag to be true, meaning the two
bounding boxes do not overlap.
"""
chflag = 0
if not ((bbother[2] < bb[0]) or (bbother[0] > bb[2])):
if not ((bbother[3] < bb[1]) or (bbother[1] > bb[3])):
chflag = 1
return chflag
def bin_shapefile(shpFile, wtype='rook', n_cols=10, n_rows=10, buff=1.0001):
shpFileObject = pysal.open(shpFile)
if shpFileObject.type != pysal.cg.Polygon:
return False
shapebox = shpFileObject.bbox # bounding box
numPoly = len(shpFileObject)
shapes = [[]] * numPoly
# bucket size
if (numPoly < SHP_SMALL):
bucketmin = numPoly / BUCK_SM + 2
else:
bucketmin = numPoly / BUCK_LG + 2
# bucket length
lengthx = ((shapebox[2] + DELTA) - shapebox[0]) / bucketmin
lengthy = ((shapebox[3] + DELTA) - shapebox[1]) / bucketmin
# initialize buckets
columns = [set() for i in range(bucketmin)]
rows = [set() for i in range(bucketmin)]
minbox = shapebox[:2] * \
2 # minx,miny,minx,miny
binWidth = [lengthx, lengthy] * \
2 # lenx,leny,lenx,leny
bbcache = {}
poly2Column = [set() for i in range(numPoly)]
poly2Row = [set() for i in range(numPoly)]
for i in range(numPoly):
shpObj = shpFileObject.get(i)
bbcache[i] = shpObj.bounding_box[:]
shapes[i] = shpObj
projBBox = [int((shpObj.bounding_box[:][j] -
minbox[j]) / binWidth[j]) for j in xrange(4)]
for j in range(projBBox[0], projBBox[2] + 1):
columns[j].add(i)
poly2Column[i].add(j)
for j in range(projBBox[1], projBBox[3] + 1):
rows[j].add(i)
poly2Row[i].add(j)
# loop over polygons rather than bins
w = {}
for polyId in xrange(numPoly):
idRows = poly2Row[polyId]
idCols = poly2Column[polyId]
rowPotentialNeighbors = set()
colPotentialNeighbors = set()
for row in idRows:
rowPotentialNeighbors = rowPotentialNeighbors.union(rows[row])
for col in idCols:
colPotentialNeighbors = colPotentialNeighbors.union(
columns[col])
potentialNeighbors = rowPotentialNeighbors.intersection(
colPotentialNeighbors)
if polyId not in w:
w[polyId] = set()
for j in potentialNeighbors:
if polyId < j:
if bbcommon(bbcache[polyId], bbcache[j]):
w[polyId].add(j)
results = {}
results['n_polygons'] = numPoly
results['potential_neighbors'] = w
results['shapes'] = shapes
return results
def check_joins(potential_neighbors, shapes, weight_type='ROOK',
polygon_ids = []):
w = {}
weight_type = weight_type.upper()
if not polygon_ids:
polygon_ids = xrange(len(shapes))
if weight_type == 'QUEEN':
# check for a shared vertex
vertCache = {}
for polyId in polygon_ids:
iVerts = shapes[polyId].vertices
nbrs = potential_neighbors[polyId]
if polyId not in vertCache:
vertCache[polyId] = set(iVerts)
if polyId not in w:
w[polyId] = set()
for j in nbrs:
join = False
if j not in vertCache:
vertCache[j] = set(shapes[j].vertices)
common = vertCache[polyId].intersection(vertCache[j])
if len(common) > 0:
join = True
if join:
w[polyId].add(j)
if j not in w:
w[j] = set()
w[j].add(polyId)
return w
elif weight_type == 'ROOK':
# check for a shared edge
edgeCache = {}
for polyId in polygon_ids:
if polyId not in edgeCache:
iEdges ={}
iVerts = shapes[polyId].vertices
nv = len(iVerts)
ne = nv - 1
for i in range(ne):
l = iVerts[i]
r = iVerts[i+1]
iEdges[(l,r)] = []
iEdges[(r,l)] = []
edgeCache[polyId] = iEdges
nbrs = potential_neighbors[polyId]
if polyId not in w:
w[polyId] = set()
for j in nbrs:
join = False
if j not in edgeCache:
jVerts = shapes[j].vertices
jEdges = {}
nv = len(jVerts)
ne = nv - 1
for e in range(ne):
l = jVerts[e]
r = jVerts[e+1]
jEdges[(l,r)] = []
jEdges[(r,l)] = []
edgeCache[j] = jEdges
for edge in edgeCache[j]:
if edge in edgeCache[polyId]:
join = True
w[polyId].add(j)
if j not in w:
w[j] = set()
w[j].add(polyId)
break
return w
else:
print 'unsupported weight type'
return None
def pcheck_joins(potential_neighbors, shapes, mdict,x,step, weight_type='ROOK',
polygon_ids = []):
#PolyId Setup
polygon_ids = range(x, x+step)
if x+step > len(shapes):
polygon_ids = range(x, len(shapes))
from collections import defaultdict
from itertools import combinations
def bbcommon(bb, bbother):
"""
Checks for overlaps of bounding boxes. First, east-west, then north-south.
Element 0 is west, element 2 is east, element 1 is north, element 3 is
south
All four checks must be false for chflag to be true, meaning the two
bounding boxes do not overlap.
"""
chflag = 0
if not ((bbother[2] < bb[0]) or (bbother[0] > bb[2])):
if not ((bbother[3] < bb[1]) or (bbother[1] > bb[3])):
chflag = 1
return chflag
#w = {}
weight_type = weight_type.upper()
if not polygon_ids:
polygon_ids = xrange(len(shapes))
if weight_type == 'QUEEN':
# check for a shared vertex
vertCache = {}
for polyId in polygon_ids:
iVerts = shapes[polyId].vertices
nbrs = potential_neighbors[polyId]
if polyId not in vertCache:
vertCache[polyId] = set(iVerts)
if polyId not in w:
w[polyId] = set()
for j in nbrs:
join = False
if j not in vertCache:
vertCache[j] = set(shapes[j].vertices)
common = vertCache[polyId].intersection(vertCache[j])
if len(common) > 0:
join = True
if join:
w[polyId].add(j)
if j not in w:
w[j] = set()
w[j].add(polyId)
return w
elif weight_type == 'ROOK':
# check for a shared edge
edgeCache = {}
for polyId in polygon_ids:
if polyId not in edgeCache:
iEdges ={}
iVerts = shapes[polyId].vertices
nv = len(iVerts)
ne = nv - 1
for i in range(ne):
l = iVerts[i]
r = iVerts[i+1]
iEdges[(l,r)] = []
iEdges[(r,l)] = []
edgeCache[polyId] = iEdges
nbrs = potential_neighbors[polyId]
for j in nbrs:
join = False
if j not in edgeCache:
jVerts = shapes[j].vertices
jEdges = {}
nv = len(jVerts)
ne = nv - 1
for e in range(ne):
l = jVerts[e]
r = jVerts[e+1]
jEdges[(l,r)] = []
jEdges[(r,l)] = []
edgeCache[j] = jEdges
for edge in edgeCache[j]:
if edge in edgeCache[polyId]:
join = True
d = mdict[polyId]
d.add(j)
mdict[polyId] = d
if j not in mdict:
mdict[j] = set()
k = mdict[j]
k.add(polyId)
mdict[j] = k
break
else:
print 'unsupported weight type'
return None
if __name__ == "__main__":
import time
#fname = pysal.examples.get_path('10740.shp')
fname = '100x100.shp' #pysal.examples.get_path('nat.shp')
t0 = time.time()
c = pysal.weights.Contiguity.ContiguityWeights(pysal.open(fname), ROOK)
t1 = time.time()
print "using " + str(fname)
print "time elapsed for ... using bins: " + str(t1 - t0)
res= bin_shapefile(fname)
t2 = time.time()
res = bin_shapefile(fname)
w = check_joins(res['potential_neighbors'], res['shapes'])
t3 = time.time()
print 'time refactored prior to parallelization: ', str(t3-t2)
print w[0]
print c.w == w
if c.w != w:
keys = c.w.keys()
for key in keys:
if c.w[key] != w[key]:
print key, c.w[key], w[key]
t4 = time.time()
#mp test using Processes
cores = mp.cpu_count()
pool = mp.Pool()
step = len(res['shapes']) / cores
manager = mp.Manager()
mdict = manager.dict() #The w
for x in range(len(res['shapes'])):
mdict[x] = set()
jobs = [pool.Process(target=pcheck_joins, args=(res['potential_neighbors'], res['shapes'],mdict,x,step)) for x in range(0,len(res['shapes']), len(res['shapes'])/cores)]
for job in jobs:
job.start()
for job in jobs:
job.join()
t5 = time.time()
print "MP using Process: {0}".format(t5-t4)
print c.w == mdict
if c.w != mdict:
keys = c.w.keys()
for key in keys:
if c.w[key] != mdict[key]:
print key, c.w[key], mdict[key]
|
{
"content_hash": "e1e844e462df5ce44b6f7069a338c52a",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 172,
"avg_line_length": 32.21533923303835,
"alnum_prop": 0.48539511033788113,
"repo_name": "pysal/pPysal",
"id": "2042221847fffd917b28256be180ec9966fde594",
"size": "10921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weights/_contW_binning_par.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2559"
},
{
"name": "FORTRAN",
"bytes": "241"
},
{
"name": "HTML",
"bytes": "42412"
},
{
"name": "Python",
"bytes": "429423"
},
{
"name": "Shell",
"bytes": "2658"
}
],
"symlink_target": ""
}
|
from trac.core import Component, implements
from trac.web import ITemplateStreamFilter
from trac.ticket import ITicketManipulator, ITicketChangeListener
from trac.ticket.model import Ticket, TicketSystem, Type
from genshi.filters import Transformer
from genshi.builder import tag
import re
NUMBERS_RE = re.compile(r'\d+', re.U)
VALID_RELATION = re.compile(r'^[\d, ]+$')
class TicketRelationSystem(Component):
implements(
#ITemplateProvider,
ITemplateStreamFilter,
ITicketChangeListener,
#ITicketManipulator
)
_relations = None
_relations_ready = False
def build_relations(self):
"""
Build relation model from config.
[ticket-relation]
subticket_relation = bug -> bug
subticket_relation.type = many -> many
subticket_relation.label = Parent Tickets -> Child Tickets
"""
if not self._relations_ready:
self._relations = {}
for name in [i for i in self.config['ticket-relation'] if not '.' in i]:
try:
self._relations [name] = Relation(name, self.config)
except Exception as e:
self.log.warning(e.message)
self.check_and_create_fields(self._relations )
return self._relations
else:
return self._relations
def check_and_create_fields(self, relations):
"""
Check and create the custom fields required by ticket-relations and save in config
"""
dirty = False
config = self.config['ticket-custom']
for relation in relations.values():
if relation.name + '_a' not in config:
config.set(relation.name + '_a', 'text' if relation.relation_type_a == 'one' else 'textarea')
config.set(relation.name + '_a.label', relation.label_a)
config.set(relation.name + '_a.relation_type', relation.relation_type_a)
if relation.relation_type_a == 'many':
config.set(relation.name + '_a.format', 'summary,status,owner')
dirty = True
if relation.name + '_b' not in config:
config.set(relation.name + '_b', 'text' if relation.relation_type_b == 'one' else 'textarea')
config.set(relation.name + '_b.label', relation.label_b)
config.set(relation.name + '_b.relation_type', relation.relation_type_b)
if relation.relation_type_b == 'many':
config.set(relation.name + '_b.format', 'summary,status,owner')
dirty = True
if dirty:
self.config.save()
def get_relations_id(self, ticket, relation, role):
relations = set(NUMBERS_RE.findall(ticket[relation.name + '_' + role] or ''))
return relations
def get_relations(self, ticket, relation, role):
id = self.get_relations_id(ticket, relation, role)
return map(lambda x: Ticket(self.env, x), id)
# ITicketChangeListener methods
def ticket_created(self, ticket):
pass
def ticket_deleted(self, ticket):
def _relation_changed(relation, role):
relations = relation.get_relations_id(ticket, relation, role)
for target_ticket in relations:
xticket = Ticket(self.env, target_ticket)
if self.remove_relation(xticket, ticket.id, relation.name, self.opposite(role)):
xticket.save_changes('', '(#%s %s) %s' % (ticket.id, ticket['summary'], 'Ticket deleted.'))
for relation in self.build_relations().values():
if relation.ticket_type_a == ticket['type']:
_relation_changed(relation, 'a')
if relation.ticket_type_b == ticket['type']:
_relation_changed(relation, 'b')
def ticket_changed(self, ticket, comment, author, old_values):
def _relation_changed(relation, role):
old_relations = old_values.get(relation.name + '_' + role, '') or ''
old_relations = set(NUMBERS_RE.findall(old_relations))
new_relations = set(NUMBERS_RE.findall(ticket[relation.name + '_' + role] or ''))
if new_relations == old_relations:
return
# remove old relations
for target_ticket in old_relations - new_relations:
xticket = Ticket(self.env, target_ticket)
if self.remove_relation(xticket, ticket.id, relation.name, self.opposite(role)):
xticket.save_changes(author, '(#%s %s) %s' % (ticket.id, ticket['summary'], comment))
# add new relations
for target_ticket in new_relations - old_relations:
xticket = Ticket(self.env, target_ticket)
if self.add_relation(xticket, ticket.id, relation.name, self.opposite(role)):
xticket.save_changes(author, '(#%s %s) %s' % (ticket.id, ticket['summary'], comment))
for relation in self.build_relations().values():
if relation.ticket_type_a == ticket['type']:
_relation_changed(relation, 'a')
if relation.ticket_type_b == ticket['type']:
_relation_changed(relation, 'b')
def opposite(self, role):
return 'a' if role == 'b' else 'b'
def remove_relation(self, ticket, id, relation, role):
value = ticket[relation+ '_' + role]
ids = map(unicode.strip, value.split(',')) if value is not None else []
if str(id) in ids:
if '' in ids: ids.remove('')
ids.remove(str(id))
ticket[relation+ '_' + role] = ','.join(ids)
return True
else:
return False
def add_relation(self, ticket, id, relation, role):
value = ticket[relation + '_' + role]
ids = map(unicode.strip, value.split(',')) if value is not None else []
if not str(id) in ids:
if '' in ids: ids.remove('')
ids.append(str(id))
ticket[relation+ '_' + role] = ','.join(ids)
return True
else:
return False
# ITicketManipulator methods
def prepare_ticket(self, req, ticket, fields, actions):
pass
def validate_ticket(self, req, ticket):
def _relation_validate(relation, role):
if not VALID_RELATION.match(ticket.get(relation.name + '_' + role), ''):
return 'Invalid relation.'
result = []
for relation in self.build_relations().values():
if relation.ticket_type_a == ticket['type']:
result.append(_relation_validate(relation, 'a'))
if relation.ticket_type_b == ticket['type']:
result.append(_relation_validate(relation, 'b'))
def filter_stream(self, req, method, filename, stream, data):
if filename != "ticket.html" and filename != 'ticket_preview.html':
return stream
if 'ticket' in data:
ticket = data['ticket']
stream |= Transformer('//head').append(tag.style("""
.relation_table {
width: 100%;
}
.relation_table td {
border-bottom: dotted 1px #eed;
}
"""))
for relation in self.build_relations().values():
if relation.ticket_type_a == ticket['type']:
stream = self._generate_html(relation, relation.relation_type_a, 'a', stream, ticket)
elif relation.ticket_type_b == ticket['type']:
stream = self._generate_html(relation, relation.relation_type_b, 'b', stream, ticket)
return stream
def _generate_html(self, relation, relation_type, relation_role, stream, ticket):
config = self.config['ticket-custom']
try:
if relation_type == 'one':
if ticket[relation.name + '_' + relation_role] is not None:
target_ticket = Ticket(self.env, int(ticket[relation.name + '_' + relation_role]))
stream |= Transformer('//div[@id="ticket"]//td[@headers="h_%s"]/text()' % (relation.name + '_' + relation_role)) \
.replace(tag.a('#%s %s' % (target_ticket.id, target_ticket['summary']), href='/ticket/' + str(target_ticket.id)))
else:
if ticket[relation.name + '_' + relation_role] is not None:
target_tickets = [Ticket(self.env, int(i)) for i in ticket[relation.name + '_' + relation_role].split(',')]
format = map(unicode.strip, config.get(relation.name + '_' + relation_role + '.format').split(','))
tbody = tag.tbody()
for target_ticket in target_tickets:
columns = [tag.td(tag.a('#' + str(target_ticket.id), href='/ticket/' + str(target_ticket.id)))]
columns.extend([tag.td(target_ticket[field]) for field in format])
tbody.append(tag.tr(*columns))
stream |= Transformer('//div[@id="ticket"]//td[@headers="h_%s"]/text()' % (relation.name + '_' + relation_role)) \
.replace(tag.table(tbody, class_='relation_table'))
except Exception as e:
self.log.error(e.message)
return stream
class Relation(object):
def __init__(self, name, config):
self.name = name
self.ticket_type_a = None
self.ticket_type_b = None
self.relation_type_a = None
self.relation_type_b = None
self.label_a = None
self.label_b = None
try:
for opt_name, opt_value in config.options('ticket-relation'):
if opt_name.startswith(name):
if opt_name == name:
self.ticket_type_a, self.ticket_type_b = map(unicode.strip, opt_value.split('->'))
elif opt_name == name + '.type':
self.relation_type_b, self.relation_type_a = map(unicode.strip, opt_value.split('->'))
self.relation_type_a = 'one' if self.relation_type_a == 'one' else 'many'
self.relation_type_b = 'one' if self.relation_type_b == 'one' else 'many'
elif opt_name == name + '.label':
self.label_b, self.label_a = map(unicode.strip, opt_value.split('->'))
if None in [self.ticket_type_a, self.ticket_type_b, self.relation_type_a, self.relation_type_b, self.label_a, self.label_b]:
raise Exception()
except:
raise Exception('Ticket Relation: %s is not properly configured.' % name)
def get_ticket_type(self, role):
if role == 'a':
return self.ticket_type_a
elif role == 'b':
return self.ticket_type_b
else:
return None
def get_label(self, role):
if role == 'a':
return self.label_a
elif role == 'b':
return self.label_b
else:
return None
|
{
"content_hash": "b9932f21da37384dc2bd1a9ec23f77d6",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 137,
"avg_line_length": 39.15438596491228,
"alnum_prop": 0.5512142665113361,
"repo_name": "CaulyKan/TracTicketRelationPlugin",
"id": "7b0f6e10e5839dd3645e2dfe5ca9a7c0716e62a8",
"size": "11160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ticketrelation/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1022"
},
{
"name": "HTML",
"bytes": "12782"
},
{
"name": "JavaScript",
"bytes": "316006"
},
{
"name": "Python",
"bytes": "37111"
},
{
"name": "Vue",
"bytes": "19570"
}
],
"symlink_target": ""
}
|
import os.path
import sys
from functools import wraps
from django.conf import settings as django_settings
from django.core.management.utils import get_random_secret_key
from fabric.api import (cd, env, prefix, prompt, put, quiet, require, run,
settings, sudo, task)
from fabric.colors import green, yellow
from fabric.contrib import django
# put project directory in path
project_root = os.path.abspath(os.path.dirname(__file__))
sys.path.append(project_root)
# -------------------------------
# SETTINGS VARIABLES
# Please verify each variable below and edit as necessary to match
# your project configuration.
# TODO: externalise to settings to base.py
# so this becomes a generic script without project-specific code.
# The name of the Django app for this project
# Folder that contains wsgi.py
PROJECT_NAME = 'mpol'
# Git repository pointer
REPOSITORY = 'https://github.com/kingsdigitallab/{}-django.git'.format(
PROJECT_NAME)
env.gateway = 'ssh.kdl.kcl.ac.uk'
# Host names used as deployment targets
env.hosts = ['{}3.kdl.kcl.ac.uk'.format(PROJECT_NAME)]
# Absolute filesystem path to project 'webroot'
env.root_path = '/vol/mpol3/webroot/'
# Absolute filesystem path to project Django root
env.django_root_path = '/vol/mpol3/webroot/'
# Absolute filesystem path to Python virtualenv for this project
env.envs_path = os.path.join(env.root_path, 'envs')
# -------------------------------
django.project(PROJECT_NAME)
# Set FABRIC_GATEWAY = 'username@proxy.x' in local.py
# if you are behind a proxy.
FABRIC_GATEWAY = getattr(django_settings, 'FABRIC_GATEWAY', None)
if FABRIC_GATEWAY:
env.forward_agent = True
env.gateway = FABRIC_GATEWAY
# Name of linux user who deploys on the remote server
env.user = django_settings.FABRIC_USER
def server(func):
"""Wraps functions that set environment variables for servers"""
@wraps(func)
def decorated(*args, **kwargs):
try:
env.servers.append(func)
except AttributeError:
env.servers = [func]
return func(*args, **kwargs)
return decorated
@task
@server
def dev():
env.srvr = 'dev'
set_srvr_vars()
@task
@server
def stg():
env.srvr = 'stg'
set_srvr_vars()
@task
@server
def liv():
env.srvr = 'liv'
set_srvr_vars()
def set_srvr_vars():
# Absolute filesystem path to the django project root
# Contains manage.py
env.path = os.path.join(env.root_path, env.srvr, 'django',
'{}-django'.format(PROJECT_NAME))
env.within_virtualenv = 'source {}'.format(
os.path.join(get_virtual_env_path(), 'bin', 'activate'))
@task
def setup_environment(version=None):
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
create_virtualenv()
clone_repo()
update(version)
install_requirements()
@task
def create_virtualenv():
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
with quiet():
env_vpath = get_virtual_env_path()
if run('ls {}'.format(env_vpath)).succeeded:
print(
green('virtual environment at [{}] exists'.format(env_vpath)))
return
print(yellow('setting up virtual environment in [{}]'.format(env_vpath)))
run('virtualenv {}'.format(env_vpath))
def get_virtual_env_path():
'''Returns the absolute path to the python virtualenv for the server
(dev, stg, live) we are working on.
E.g. /vol/tvof/webroot/envs/dev
'''
return os.path.join(env.envs_path, env.srvr)
@task
def clone_repo():
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
with quiet():
if run('ls {}'.format(os.path.join(env.path, '.git'))).succeeded:
print(green(('repository at'
' [{}] exists').format(env.path)))
return
print(yellow('cloneing repository to [{}]'.format(env.path)))
run('git clone {} {}'.format(REPOSITORY, env.path))
@task
def install_requirements():
fix_permissions('virtualenv')
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
reqs = 'requirements-{}.txt'.format(env.srvr)
try:
assert os.path.exists(reqs)
except AssertionError:
reqs = 'requirements.txt'
with cd(env.path), prefix(env.within_virtualenv):
# GN: | cat to prevent shard-shaped progress bar polluting the output
# Until --no-progress-bar option appears in new pip version
run('pip install -q --no-cache -U -r {}'.format(reqs))
@task
def reinstall_requirement(which):
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
with cd(env.path), prefix(env.within_virtualenv):
run('pip uninstall {0} && pip install --no-deps {0}'.format(which))
@task
def deploy(version=None):
update(version)
install_requirements()
upload_local_settings()
own_django_log()
# fix_permissions()
migrate()
collect_static()
# update_index()
clear_cache()
touch_wsgi()
@task
def update(version=None):
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
if version:
# try specified version first
to_version = version
elif not version and env.srvr in ['local', 'vagrant', 'dev']:
# if local, vagrant or dev deploy to develop branch
to_version = 'develop'
else:
# else deploy to master branch
to_version = 'master'
with cd(env.path), prefix(env.within_virtualenv):
run('git pull')
run('git checkout {}'.format(to_version))
@task
def upload_local_settings():
require('srvr', 'path', provided_by=env.servers)
with cd(env.path):
with settings(warn_only=True):
if run('ls {}/settings/local.py'.format(PROJECT_NAME)).failed:
db_host = prompt('Database host: ')
db_pwd = prompt('Database password: ')
put('{}/settings/local_{}.py'.format(PROJECT_NAME, env.srvr),
'{}/settings/local.py'.format(PROJECT_NAME), mode='0664')
run('echo >> {}/settings/local.py'.format(PROJECT_NAME))
run('echo '
'"DATABASES[\'default\'][\'PASSWORD\'] = \'{}\'" >>'
'{}/settings/local.py'.format(db_pwd, PROJECT_NAME))
run('echo '
'"DATABASES[\'default\'][\'HOST\'] = \'{}\'" >>'
'{}/settings/local.py'.format(db_host, PROJECT_NAME))
run('echo '
'"SECRET_KEY = \'{}\'" >>'
'{}/settings/local.py'.format(
get_random_secret_key(), PROJECT_NAME))
@task
def own_django_log():
""" make sure logs/django.log is owned by www-data"""
# GN: why do we need VE for this task?
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
with quiet():
log_path = os.path.join(env.path, 'logs', 'django.log')
if run('ls {}'.format(log_path)).succeeded:
sudo('chown www-data:kdl-staff {}'.format(log_path))
@task
def fix_permissions(category='static'):
'''
Reset the permissions on various paths.
category: determines which set of paths to work on:
'static' (default): django static path + general project path
'virtualenv': fix the virtualenv permissions
'''
# GN: why do we need VE?
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
processed = False
with quiet():
if category == 'static':
processed = True
log_path = os.path.join(env.path, 'logs', 'django.log')
if run('ls {}'.format(log_path)).succeeded:
sudo('setfacl -R -m g:www-data:rwx {0}/logs {0}/static'.
format(env.path))
sudo('setfacl -R -d -m g:www-data:rwx {0}/logs {0}/static'.
format(env.path))
sudo('setfacl -R -m g:kdl-staff:rwx {0}/logs {0}/static'.
format(env.path))
sudo('setfacl -R -d -m g:kdl-staff:rwx {0}/logs {0}/static'.
format(env.path))
sudo('chgrp -Rf kdl-staff {}'.format(env.path))
sudo('chmod -Rf g+w {}'.format(env.path))
if category == 'virtualenv':
path = get_virtual_env_path()
sudo('chgrp -Rf kdl-staff {}'.format(path))
sudo('chmod -Rf g+rw {}'.format(path))
processed = True
if not processed:
raise Exception(
'fix_permission(category="{}"): unrecognised category name.'.
format(category)
)
@task
def migrate(app=None):
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
with cd(env.path), prefix(env.within_virtualenv):
run('./manage.py migrate {}'.format(app if app else ''))
@task
def collect_static(process=False):
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
if env.srvr in ['local', 'vagrant']:
print(yellow('Do not run collect_static on local servers'))
return
with cd(env.path), prefix(env.within_virtualenv):
run('./manage.py collectstatic {process} --noinput'.format(
process=('--no-post-process' if not process else '')))
@task
def update_index():
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
with cd(env.path), prefix(env.within_virtualenv):
run('./manage.py update_index')
@task
def clear_cache():
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
with cd(env.path), prefix(env.within_virtualenv):
run('./manage.py clear_cache')
if env.srvr in ['liv']:
print(yellow('Live environment detected, clearing cache'))
sudo('/etc/init.d/trafficserver stop')
sudo('traffic_server -Cclear')
sudo('/etc/init.d/trafficserver start')
@task
def touch_wsgi():
require('srvr', 'path', 'within_virtualenv', provided_by=env.servers)
with cd(os.path.join(env.path, PROJECT_NAME)), \
prefix(env.within_virtualenv):
run('touch wsgi.py')
|
{
"content_hash": "04d13833a9f0ff5e916c76800ff9f9cb",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 78,
"avg_line_length": 30.67168674698795,
"alnum_prop": 0.6069920455661396,
"repo_name": "kingsdigitallab/mpol-django",
"id": "e7fe7752c1450887047c143310298d50829d41d4",
"size": "10230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "66023"
},
{
"name": "HTML",
"bytes": "66412"
},
{
"name": "JavaScript",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "108791"
},
{
"name": "Shell",
"bytes": "3782"
}
],
"symlink_target": ""
}
|
import os
import subprocess
from typing import Optional, Type
import irctest
from irctest.basecontrollers import BaseServicesController, DirectoryBasedController
import irctest.cases
import irctest.runner
TEMPLATE_CONFIG = """
loadmodule "modules/protocol/{protocol}";
loadmodule "modules/backend/opensex";
loadmodule "modules/crypto/pbkdf2";
loadmodule "modules/nickserv/main";
loadmodule "modules/nickserv/cert";
loadmodule "modules/nickserv/register";
loadmodule "modules/nickserv/verify";
loadmodule "modules/saslserv/authcookie";
#loadmodule "modules/saslserv/ecdh-x25519-challenge";
loadmodule "modules/saslserv/ecdsa-nist256p-challenge";
loadmodule "modules/saslserv/external";
loadmodule "modules/saslserv/plain";
#loadmodule "modules/saslserv/scram";
serverinfo {{
name = "services.example.org";
desc = "Atheme IRC Services";
numeric = "00A";
netname = "testnet";
adminname = "no admin";
adminemail = "no-admin@example.org";
registeremail = "registration@example.org";
auth = none; // Disable email check
}};
general {{
commit_interval = 5;
}};
uplink "My.Little.Server" {{
host = "{server_hostname}";
port = {server_port};
send_password = "password";
receive_password = "password";
}};
saslserv {{
nick = "SaslServ";
}};
"""
class AthemeController(BaseServicesController, DirectoryBasedController):
"""Mixin for server controllers that rely on Atheme"""
def run(self, protocol: str, server_hostname: str, server_port: int) -> None:
self.create_config()
if protocol == "inspircd3":
# That's the name used by Anope
protocol = "inspircd"
assert protocol in ("bahamut", "inspircd", "charybdis", "unreal4")
with self.open_file("services.conf") as fd:
fd.write(
TEMPLATE_CONFIG.format(
protocol=protocol,
server_hostname=server_hostname,
server_port=server_port,
)
)
assert self.directory
self.proc = subprocess.Popen(
[
"atheme-services",
"-n", # don't fork
"-c",
os.path.join(self.directory, "services.conf"),
"-l",
f"/tmp/services-{server_port}.log",
"-p",
os.path.join(self.directory, "services.pid"),
"-D",
self.directory,
],
# stdout=subprocess.DEVNULL,
# stderr=subprocess.DEVNULL,
)
def registerUser(
self,
case: irctest.cases.BaseServerTestCase,
username: str,
password: Optional[str] = None,
) -> None:
assert password
if len(password.encode()) > 288:
# It's hardcoded at compile-time :(
# https://github.com/atheme/atheme/blob/4fa0e03bd3ce2cb6041a339f308616580c5aac29/include/atheme/constants.h#L51
raise irctest.runner.NotImplementedByController("Passwords over 288 bytes")
super().registerUser(case, username, password)
def get_irctest_controller_class() -> Type[AthemeController]:
return AthemeController
|
{
"content_hash": "a122f0b17c0b20c237031903ba34dee1",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 123,
"avg_line_length": 29.495412844036696,
"alnum_prop": 0.6202177293934681,
"repo_name": "ProgVal/irctest",
"id": "6877c9f8a93d6eda35600a527f4bd6ecc3c31b4c",
"size": "3215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irctest/controllers/atheme_services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "173307"
}
],
"symlink_target": ""
}
|
"""
Given a sorted linked list, delete all nodes that have duplicate numbers,
leaving only distinct numbers from the original list.
For example,
Given 1->2->3->3->4->4->5, return 1->2->5.
Given 1->1->1->2->3, return 2->3.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
return head
solnHead = ListNode(None)
solnHead.next = head
prev = solnHead
curr = head
while curr and curr.next:
if curr.val == curr.next.val:
currVal = curr.val
while curr and curr.val == currVal:
curr = curr.next
prev.next = curr
else:
curr = curr.next
prev = prev.next
return solnHead.next
|
{
"content_hash": "34424c2d69afc577d9038bf17e544f6b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 73,
"avg_line_length": 28.694444444444443,
"alnum_prop": 0.5353339787028074,
"repo_name": "ufjfeng/leetcode-jf-soln",
"id": "49e923e7706d7868cf867bd47be668e60cb671da",
"size": "1033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/082_remove_duplicates_from_sorted_list_ii.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "426793"
},
{
"name": "SQLPL",
"bytes": "738"
},
{
"name": "Shell",
"bytes": "1518"
}
],
"symlink_target": ""
}
|
from truck import Truck
from utils.setting import configuration
def test():
car = Truck('GMC', configuration)
car.display()
print car.power
if __name__ == '__main__':test()
|
{
"content_hash": "4254a60581b11fdc289469b146a4d0d5",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 21.11111111111111,
"alnum_prop": 0.6473684210526316,
"repo_name": "vollov/py-lab",
"id": "2cd1bc74b919582231794a2e20312b92ac53e434",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/vehicle/run_truck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22"
},
{
"name": "JavaScript",
"bytes": "685"
},
{
"name": "PLSQL",
"bytes": "6838"
},
{
"name": "Python",
"bytes": "254226"
},
{
"name": "Shell",
"bytes": "734"
},
{
"name": "Smarty",
"bytes": "1829"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
from resource_management import *
def slider():
import params
Directory(params.slider_conf_dir,
create_parents=True
)
slider_client_config = params.config['configurations']['slider-client'] if 'configurations' in params.config and 'slider-client' in params.config['configurations'] else {}
XmlConfig("slider-client.xml",
conf_dir=params.slider_conf_dir,
configurations=slider_client_config
)
File(format("{slider_conf_dir}/slider-env.sh"),
mode=0755,
content=InlineTemplate(params.slider_env_sh_template)
)
"""
Directory(params.storm_slider_conf_dir,
create_parents=True
)
File(format("{storm_slider_conf_dir}/storm-slider-env.sh"),
mode=0755,
content=Template('storm-slider-env.sh.j2')
)
"""
if (params.log4j_props != None):
File(format("{params.slider_conf_dir}/log4j.properties"),
mode=0644,
content=params.log4j_props
)
elif (os.path.exists(format("{params.slider_conf_dir}/log4j.properties"))):
File(format("{params.slider_conf_dir}/log4j.properties"),
mode=0644
)
|
{
"content_hash": "f46ce7442c6e4b8e96a34f2ab371f3de",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 173,
"avg_line_length": 31.933333333333334,
"alnum_prop": 0.7113778705636743,
"repo_name": "alexryndin/ambari",
"id": "076e4511987d47b320d09fa8409afec4bfdcf680",
"size": "1916",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
from .client import AdServiceClient
__all__ = ("AdServiceClient",)
|
{
"content_hash": "17bbd59588f0dac7b65f36dc3c4c6127",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7205882352941176,
"repo_name": "googleads/google-ads-python",
"id": "7880d3db959175ea35c3b48ec931fe1d022bc7ec",
"size": "668",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/services/services/ad_service/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak, James Bowman
Updated: Copyright (c) 2016, Tal Regev.
"""
import sys
import os
from optparse import OptionParser
import rclpy
import sensor_msgs.msg
from cv_bridge import CvBridge
import cv2
import numpy
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = (10, 10)
image_scale = 2
haar_scale = 1.2
min_neighbors = 2
haar_flags = 0
def detect_and_draw(imgmsg):
img = br.imgmsg_to_cv2(imgmsg, "bgr8")
# allocate temporary images
new_size = (int(img.shape[1] / image_scale), int(img.shape[0] / image_scale))
# convert color input image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# scale input image for faster processing
small_img = cv2.resize(gray, new_size, interpolation = cv2.INTER_LINEAR)
small_img = cv2.equalizeHist(small_img)
if(cascade):
faces = cascade.detectMultiScale(small_img, haar_scale, min_neighbors, haar_flags, min_size)
if faces is not None:
for (x, y, w, h) in faces:
# the input to detectMultiScale was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
cv2.rectangle(img, pt1, pt2, (255, 0, 0), 3, 8, 0)
cv2.imshow("result", img)
cv2.waitKey(6)
def compressed_detect_and_draw(compressed_imgmsg):
img = br.compressed_imgmsg_to_cv2(compressed_imgmsg, "bgr8")
# allocate temporary images
new_size = (int(img.shape[1] / image_scale), int(img.shape[0] / image_scale))
# convert color input image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# scale input image for faster processing
small_img = cv2.resize(gray, new_size, interpolation = cv2.INTER_LINEAR)
small_img = cv2.equalizeHist(small_img)
if(cascade):
faces = cascade.detectMultiScale(small_img, haar_scale, min_neighbors, haar_flags, min_size)
if faces is not None:
for (x, y, w, h) in faces:
# the input to detectMultiScale was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
cv2.rectangle(img, pt1, pt2, (255, 0, 0), 3, 8, 0)
cv2.imshow("compressed_result", img)
cv2.waitKey(6)
def main(args=None):
if args is None:
args = sys.argv
rclpy.init(args=args)
# TODO add this file in the repository and make it relative to this python script. (not all people will run this from linux)
haarfile = '/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml'
parser = OptionParser(usage = "usage: %prog [options]")
parser.add_option("-c", "--cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default = haarfile)
parser.add_option("-t", "--topic", action="store", dest="topic", type="str", help="Topic to find a face on, default %default", default = '/camera/rgb/image_raw')
parser.add_option("-s", "--ctopic", action="store", dest="ctopic", type="str", help="Compressed topic to find a face on, default %default", default = '/camera/rgb/image/compressed')
(options, args) = parser.parse_args()
cascade = cv2.CascadeClassifier()
cascade.load(options.cascade)
br = CvBridge()
node = rclpy.create_node('rosfacedetect')
node_logger = node.get_logger()
sub_img = node.create_subscription(sensor_msgs.msg.Image, options.topic, detect_and_draw)
sub_cpimg = node.create_subscription(sensor_msgs.msg.CompressedImage, options.ctopic, compressed_detect_and_draw)
while rclpy.ok():
try:
rclpy.spin_once(node)
except KeyboardInterrupt:
node_logger.info("shutting down: keyboard interrupt")
break
node_logger.info("test_completed")
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
{
"content_hash": "59da6cf52c0ba52d656b72331b4ba475",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 185,
"avg_line_length": 38.46280991735537,
"alnum_prop": 0.6602922217447357,
"repo_name": "ros-perception/vision_opencv",
"id": "ee0e30f0f8719211d91ca18284b8e7987c710661",
"size": "4654",
"binary": false,
"copies": "1",
"ref": "refs/heads/rolling",
"path": "opencv_tests/opencv_tests/rosfacedetect.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6784"
},
{
"name": "C++",
"bytes": "175627"
},
{
"name": "CMake",
"bytes": "8002"
},
{
"name": "Python",
"bytes": "61019"
},
{
"name": "Shell",
"bytes": "2500"
}
],
"symlink_target": ""
}
|
import parlai.core.build_data as build_data
from parlai.core.build_data import DownloadableFile
import os
RESOURCES = [
DownloadableFile(
url='1kVr-YsUVFisceiIklvpWEe0kHNSIFtNh',
file_name='entailment_trees_emnlp2021_data_v3.zip',
hashcode='fe05a02f181bb3d27fa2f8bafda824f7a988af9df59f848d694458925be7c497',
zipped=True,
from_google=True,
),
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'entailment_bank')
version = '1.0'
# check if data had been previously built
if not build_data.built(dpath, version_string=version):
print("[building data: " + dpath + "]")
# make a clean directory if needed
if build_data.built(dpath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# mark the data as built
build_data.mark_done(dpath, version_string=version)
|
{
"content_hash": "c5608b59668532732005ff390cb55482",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 84,
"avg_line_length": 30.38888888888889,
"alnum_prop": 0.6590493601462523,
"repo_name": "facebookresearch/ParlAI",
"id": "92252c0d1e682207f3cb46fe0976a63cf59891a3",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/tasks/entailment_bank/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
}
|
import filecmp
import os
from typing import Any, Dict, List, Mapping, Optional
from unittest.mock import MagicMock, patch
import orjson
from django.core import mail
from django.test import override_settings
from zulip_bots.custom_exceptions import ConfigValidationError
from zerver.lib.actions import (
do_change_stream_invite_only,
do_deactivate_user,
do_set_realm_property,
)
from zerver.lib.bot_config import ConfigError, get_bot_config
from zerver.lib.bot_lib import get_bot_handler
from zerver.lib.integrations import EMBEDDED_BOTS, WebhookIntegration
from zerver.lib.test_classes import UploadSerializeMixin, ZulipTestCase
from zerver.lib.test_helpers import avatar_disk_path, get_test_image_file, queries_captured
from zerver.models import (
Realm,
Service,
UserProfile,
get_bot_services,
get_realm,
get_stream,
get_user,
is_cross_realm_bot_email,
)
# A test validator
def _check_string(var_name: str, val: object) -> Optional[str]:
if str(val).startswith("_"):
return f'{var_name} starts with a "_" and is hence invalid.'
return None
stripe_sample_config_options = [
WebhookIntegration(
"stripe",
["financial"],
display_name="Stripe",
config_options=[("Stripe API key", "stripe_api_key", _check_string)],
),
]
class BotTest(ZulipTestCase, UploadSerializeMixin):
def get_bot_user(self, email: str) -> UserProfile:
realm = get_realm("zulip")
bot = get_user(email, realm)
return bot
def assert_num_bots_equal(self, count: int) -> None:
result = self.client_get("/json/bots")
self.assert_json_success(result)
self.assert_length(result.json()["bots"], count)
def create_bot(self, **extras: Any) -> Dict[str, Any]:
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": "1",
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
return result.json()
def test_bot_domain(self) -> None:
self.login("hamlet")
self.create_bot()
self.assertTrue(UserProfile.objects.filter(email="hambot-bot@zulip.testserver").exists())
# The other cases are hard to test directly, since we don't allow creating bots from
# the wrong subdomain, and because 'testserver.example.com' is not a valid domain for the bot's email.
# So we just test the Raelm.get_bot_domain function.
realm = get_realm("zulip")
self.assertEqual(realm.get_bot_domain(), "zulip.testserver")
def deactivate_bot(self) -> None:
email = "hambot-bot@zulip.testserver"
result = self.client_delete(f"/json/bots/{self.get_bot_user(email).id}")
self.assert_json_success(result)
def test_add_bot_with_bad_username(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
# Invalid username
bot_info = dict(
full_name="My bot name",
short_name="@",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Bad name or username")
self.assert_num_bots_equal(0)
# Empty username
bot_info = dict(
full_name="My bot name",
short_name="",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Bad name or username")
self.assert_num_bots_equal(0)
@override_settings(FAKE_EMAIL_DOMAIN="invaliddomain", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_add_bot_with_invalid_fake_email_domain(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": "1",
}
result = self.client_post("/json/bots", bot_info)
error_message = (
"Can't create bots until FAKE_EMAIL_DOMAIN is correctly configured.\n"
+ "Please contact your server administrator."
)
self.assert_json_error(result, error_message)
self.assert_num_bots_equal(0)
def test_add_bot_with_no_name(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
bot_info = dict(
full_name="a",
short_name="bot",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Name too short!")
self.assert_num_bots_equal(0)
def test_json_users_with_bots(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
self.assert_num_bots_equal(0)
num_bots = 3
for i in range(num_bots):
full_name = f"Bot {i}"
short_name = f"bot-{i}"
bot_info = dict(
full_name=full_name,
short_name=short_name,
bot_type=1,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
self.assert_num_bots_equal(num_bots)
with queries_captured() as queries:
users_result = self.client_get("/json/users")
self.assert_json_success(users_result)
self.assert_length(queries, 3)
def test_add_bot(self) -> None:
hamlet = self.example_user("hamlet")
self.login("hamlet")
self.assert_num_bots_equal(0)
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=2):
result = self.create_bot()
self.assert_num_bots_equal(1)
email = "hambot-bot@zulip.testserver"
bot = self.get_bot_user(email)
(event,) = (e for e in events if e["event"]["type"] == "realm_bot")
self.assertEqual(result["user_id"], bot.id)
self.assertEqual(
dict(
type="realm_bot",
op="add",
bot=dict(
email="hambot-bot@zulip.testserver",
user_id=bot.id,
bot_type=bot.bot_type,
full_name="The Bot of Hamlet",
is_active=True,
api_key=result["api_key"],
avatar_url=result["avatar_url"],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
services=[],
owner_id=hamlet.id,
),
),
event["event"],
)
users_result = self.client_get("/json/users")
members = orjson.loads(users_result.content)["members"]
[bot] = [m for m in members if m["email"] == "hambot-bot@zulip.testserver"]
self.assertEqual(bot["bot_owner_id"], self.example_user("hamlet").id)
self.assertEqual(bot["user_id"], self.get_bot_user(email).id)
@override_settings(FAKE_EMAIL_DOMAIN="fakedomain.com", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_add_bot_with_fake_email_domain(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
email = "hambot-bot@fakedomain.com"
self.get_bot_user(email)
@override_settings(EXTERNAL_HOST="example.com")
def test_add_bot_verify_subdomain_in_email_address(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
email = "hambot-bot@zulip.example.com"
self.get_bot_user(email)
@override_settings(
FAKE_EMAIL_DOMAIN="fakedomain.com", REALM_HOSTS={"zulip": "zulip.example.com"}
)
def test_add_bot_host_used_as_domain_if_valid(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
email = "hambot-bot@zulip.example.com"
self.get_bot_user(email)
def test_add_bot_with_username_in_use(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# The short_name is used in the email, which we call
# "Username" for legacy reasons.
bot_info = dict(
full_name="whatever",
short_name="hambot",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Username already in use")
dup_full_name = "The Bot of Hamlet"
bot_info = dict(
full_name=dup_full_name,
short_name="whatever",
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Name is already in use!")
def test_add_bot_with_user_avatar(self) -> None:
email = "hambot-bot@zulip.testserver"
realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
with get_test_image_file("img.png") as fp:
self.create_bot(file=fp)
profile = get_user(email, realm)
# Make sure that avatar image that we've uploaded is same with avatar image in the server
self.assertTrue(
filecmp.cmp(fp.name, os.path.splitext(avatar_disk_path(profile))[0] + ".original")
)
self.assert_num_bots_equal(1)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
self.assertTrue(os.path.exists(avatar_disk_path(profile)))
def test_add_bot_with_too_many_files(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
with get_test_image_file("img.png") as fp1, get_test_image_file("img.gif") as fp2:
bot_info = dict(
full_name="whatever",
short_name="whatever",
file1=fp1,
file2=fp2,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "You may only upload one file at a time")
self.assert_num_bots_equal(0)
def test_add_bot_with_default_sending_stream(self) -> None:
email = "hambot-bot@zulip.testserver"
realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream="Denmark")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_sending_stream"], "Denmark")
profile = get_user(email, realm)
assert profile.default_sending_stream is not None
self.assertEqual(profile.default_sending_stream.name, "Denmark")
def test_add_bot_with_default_sending_stream_not_subscribed(self) -> None:
email = "hambot-bot@zulip.testserver"
realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream="Rome")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_sending_stream"], "Rome")
profile = get_user(email, realm)
assert profile.default_sending_stream is not None
self.assertEqual(profile.default_sending_stream.name, "Rome")
def test_add_bot_email_address_visibility(self) -> None:
# Test that we don't mangle the email field with
# email_address_visiblity limited to admins
user = self.example_user("hamlet")
do_set_realm_property(
user.realm,
"email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
acting_user=None,
)
user.refresh_from_db()
self.login_user(user)
self.assert_num_bots_equal(0)
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=2):
result = self.create_bot()
self.assert_num_bots_equal(1)
email = "hambot-bot@zulip.testserver"
bot = self.get_bot_user(email)
(event,) = (e for e in events if e["event"]["type"] == "realm_bot")
self.assertEqual(
dict(
type="realm_bot",
op="add",
bot=dict(
email="hambot-bot@zulip.testserver",
user_id=bot.id,
bot_type=bot.bot_type,
full_name="The Bot of Hamlet",
is_active=True,
api_key=result["api_key"],
avatar_url=result["avatar_url"],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
services=[],
owner_id=user.id,
),
),
event["event"],
)
users_result = self.client_get("/json/users")
members = orjson.loads(users_result.content)["members"]
[bot] = [m for m in members if m["email"] == "hambot-bot@zulip.testserver"]
self.assertEqual(bot["bot_owner_id"], user.id)
self.assertEqual(bot["user_id"], self.get_bot_user(email).id)
def test_bot_add_subscription(self) -> None:
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and a stream to the
list of subscriptions and confirm the right number of events
are generated.
When 'principals' has a bot, no notification message event or invitation email
is sent when add_subscriptions_backend is called in the above API call.
"""
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
self.login_user(hamlet)
# Normal user i.e. not a bot.
request_data = {
"principals": '["' + iago.email + '"]',
}
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=3):
result = self.common_subscribe_to_streams(hamlet, ["Rome"], request_data)
self.assert_json_success(result)
msg_event = [e for e in events if e["event"]["type"] == "message"]
self.assert_length(msg_event, 1) # Notification message event is sent.
# Create a bot.
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# A bot
bot_request_data = {
"principals": '["hambot-bot@zulip.testserver"]',
}
events_bot: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events_bot, expected_num_events=2):
result = self.common_subscribe_to_streams(hamlet, ["Rome"], bot_request_data)
self.assert_json_success(result)
# No notification message event or invitation email is sent because of bot.
msg_event = [e for e in events_bot if e["event"]["type"] == "message"]
self.assert_length(msg_event, 0)
self.assert_length(events_bot, len(events) - 1)
# Test runner automatically redirects all sent email to a dummy 'outbox'.
self.assert_length(mail.outbox, 0)
def test_add_bot_with_default_sending_stream_private_allowed(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = get_stream("Denmark", user_profile.realm)
self.subscribe(user_profile, stream.name)
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=2):
result = self.create_bot(default_sending_stream="Denmark")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_sending_stream"], "Denmark")
email = "hambot-bot@zulip.testserver"
realm = get_realm("zulip")
profile = get_user(email, realm)
assert profile.default_sending_stream is not None
self.assertEqual(profile.default_sending_stream.name, "Denmark")
(event,) = (e for e in events if e["event"]["type"] == "realm_bot")
self.assertEqual(
dict(
type="realm_bot",
op="add",
bot=dict(
email="hambot-bot@zulip.testserver",
user_id=profile.id,
full_name="The Bot of Hamlet",
bot_type=profile.bot_type,
is_active=True,
api_key=result["api_key"],
avatar_url=result["avatar_url"],
default_sending_stream="Denmark",
default_events_register_stream=None,
default_all_public_streams=False,
services=[],
owner_id=user_profile.id,
),
),
event["event"],
)
self.assertEqual(event["users"], [user_profile.id])
def test_add_bot_with_default_sending_stream_private_denied(self) -> None:
self.login("hamlet")
realm = self.example_user("hamlet").realm
stream = get_stream("Denmark", realm)
self.unsubscribe(self.example_user("hamlet"), "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"default_sending_stream": "Denmark",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_add_bot_with_default_events_register_stream(self) -> None:
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot(default_events_register_stream="Denmark")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_events_register_stream"], "Denmark")
profile = get_user(bot_email, bot_realm)
assert profile.default_events_register_stream is not None
self.assertEqual(profile.default_events_register_stream.name, "Denmark")
def test_add_bot_with_default_events_register_stream_private_allowed(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = self.subscribe(user_profile, "Denmark")
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=2):
result = self.create_bot(default_events_register_stream="Denmark")
self.assert_num_bots_equal(1)
self.assertEqual(result["default_events_register_stream"], "Denmark")
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
bot_profile = get_user(bot_email, bot_realm)
assert bot_profile.default_events_register_stream is not None
self.assertEqual(bot_profile.default_events_register_stream.name, "Denmark")
(event,) = (e for e in events if e["event"]["type"] == "realm_bot")
self.assertEqual(
dict(
type="realm_bot",
op="add",
bot=dict(
email="hambot-bot@zulip.testserver",
full_name="The Bot of Hamlet",
user_id=bot_profile.id,
bot_type=bot_profile.bot_type,
is_active=True,
api_key=result["api_key"],
avatar_url=result["avatar_url"],
default_sending_stream=None,
default_events_register_stream="Denmark",
default_all_public_streams=False,
services=[],
owner_id=user_profile.id,
),
),
event["event"],
)
self.assertEqual(event["users"], [user_profile.id])
def test_add_bot_with_default_events_register_stream_private_denied(self) -> None:
self.login("hamlet")
realm = self.example_user("hamlet").realm
stream = get_stream("Denmark", realm)
self.unsubscribe(self.example_user("hamlet"), "Denmark")
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"default_events_register_stream": "Denmark",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_add_bot_with_default_all_public_streams(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.create_bot(default_all_public_streams=orjson.dumps(True).decode())
self.assert_num_bots_equal(1)
self.assertTrue(result["default_all_public_streams"])
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.default_all_public_streams, True)
def test_deactivate_bot(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
self.deactivate_bot()
# You can deactivate the same bot twice.
self.deactivate_bot()
self.assert_num_bots_equal(0)
def test_deactivate_bogus_bot(self) -> None:
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
invalid_user_id = 1000
result = self.client_delete(f"/json/bots/{invalid_user_id}")
self.assert_json_error(result, "No such bot")
self.assert_num_bots_equal(1)
def test_deactivate_bot_with_owner_deactivation(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"full_name": "The Another Bot of Hamlet",
"short_name": "hambot-another",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
self.assertEqual(
UserProfile.objects.filter(is_bot=True, bot_owner=user, is_active=True).count(), 2
)
result = self.client_delete("/json/users/me")
self.assert_json_success(result)
user = self.example_user("hamlet")
self.assertFalse(user.is_active)
self.login("iago")
self.assertFalse(
UserProfile.objects.filter(is_bot=True, bot_owner=user, is_active=True).exists()
)
def test_cannot_deactivate_other_realm_bot(self) -> None:
user = self.mit_user("starnine")
self.login_user(user)
bot_info = {
"full_name": "The Bot in zephyr",
"short_name": "starn-bot",
"bot_type": "1",
}
result = self.client_post("/json/bots", bot_info, subdomain="zephyr")
self.assert_json_success(result)
result = self.client_get("/json/bots", subdomain="zephyr")
bot_email = result.json()["bots"][0]["username"]
bot = get_user(bot_email, user.realm)
self.login("iago")
result = self.client_delete(f"/json/bots/{bot.id}")
self.assert_json_error(result, "No such bot")
def test_bot_deactivation_attacks(self) -> None:
"""You cannot deactivate somebody else's bot."""
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to deactivate both Hamlet and
# Hamlet's bot.
self.login("othello")
# Cannot deactivate a user as a bot
result = self.client_delete("/json/bots/{}".format(self.example_user("hamlet").id))
self.assert_json_error(result, "No such bot")
email = "hambot-bot@zulip.testserver"
result = self.client_delete(f"/json/bots/{self.get_bot_user(email).id}")
self.assert_json_error(result, "Insufficient permission")
# But we don't actually deactivate the other person's bot.
self.login("hamlet")
self.assert_num_bots_equal(1)
# Cannot deactivate a bot as a user
result = self.client_delete(f"/json/users/{self.get_bot_user(email).id}")
self.assert_json_error(result, "No such user")
self.assert_num_bots_equal(1)
def test_bot_permissions(self) -> None:
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to mess with Hamlet's bots.
self.login("othello")
email = "hambot-bot@zulip.testserver"
result = self.client_post(f"/json/bots/{self.get_bot_user(email).id}/api_key/regenerate")
self.assert_json_error(result, "Insufficient permission")
bot_info = {
"full_name": "Fred",
}
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Insufficient permission")
def get_bot(self) -> Dict[str, Any]:
result = self.client_get("/json/bots")
bots = result.json()["bots"]
return bots[0]
def test_update_api_key(self) -> None:
self.login("hamlet")
self.create_bot()
bot = self.get_bot()
old_api_key = bot["api_key"]
email = "hambot-bot@zulip.testserver"
result = self.client_post(f"/json/bots/{self.get_bot_user(email).id}/api_key/regenerate")
self.assert_json_success(result)
new_api_key = result.json()["api_key"]
self.assertNotEqual(old_api_key, new_api_key)
bot = self.get_bot()
self.assertEqual(new_api_key, bot["api_key"])
def test_update_api_key_for_invalid_user(self) -> None:
self.login("hamlet")
invalid_user_id = 1000
result = self.client_post(f"/json/bots/{invalid_user_id}/api_key/regenerate")
self.assert_json_error(result, "No such bot")
def test_add_bot_with_bot_type_default(self) -> None:
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.DEFAULT_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.DEFAULT_BOT)
def test_add_bot_with_bot_type_incoming_webhook(self) -> None:
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
self.login("hamlet")
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.INCOMING_WEBHOOK_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.INCOMING_WEBHOOK_BOT)
def test_add_bot_with_bot_type_invalid(self) -> None:
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": 7,
}
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.client_post("/json/bots", bot_info)
self.assert_num_bots_equal(0)
self.assert_json_error(result, "Invalid bot type")
def test_no_generic_bots_allowed_for_non_admins(self) -> None:
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": 1,
}
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
bot_realm.save(update_fields=["bot_creation_policy"])
# A regular user cannot create a generic bot
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.client_post("/json/bots", bot_info)
self.assert_num_bots_equal(0)
self.assert_json_error(result, "Must be an organization administrator")
# But can create an incoming webhook
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.INCOMING_WEBHOOK_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.INCOMING_WEBHOOK_BOT)
def test_no_generic_bot_reactivation_allowed_for_non_admins(self) -> None:
self.login("hamlet")
self.create_bot(bot_type=UserProfile.DEFAULT_BOT)
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
bot_realm.save(update_fields=["bot_creation_policy"])
bot_email = "hambot-bot@zulip.testserver"
bot_user = get_user(bot_email, bot_realm)
do_deactivate_user(bot_user, acting_user=None)
# A regular user cannot reactivate a generic bot
self.assert_num_bots_equal(0)
result = self.client_post(f"/json/users/{bot_user.id}/reactivate")
self.assert_json_error(result, "Must be an organization administrator")
self.assert_num_bots_equal(0)
def test_no_generic_bots_allowed_for_admins(self) -> None:
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
bot_realm.save(update_fields=["bot_creation_policy"])
# An administrator can create any type of bot
self.login("iago")
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.DEFAULT_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.DEFAULT_BOT)
def test_no_bots_allowed_for_non_admins(self) -> None:
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": 1,
}
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_ADMINS_ONLY
bot_realm.save(update_fields=["bot_creation_policy"])
# A regular user cannot create a generic bot
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.client_post("/json/bots", bot_info)
self.assert_num_bots_equal(0)
self.assert_json_error(result, "Must be an organization administrator")
# Also, a regular user cannot create a incoming bot
bot_info["bot_type"] = 2
self.login("hamlet")
self.assert_num_bots_equal(0)
result = self.client_post("/json/bots", bot_info)
self.assert_num_bots_equal(0)
self.assert_json_error(result, "Must be an organization administrator")
def test_no_bots_allowed_for_admins(self) -> None:
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
bot_realm.bot_creation_policy = Realm.BOT_CREATION_ADMINS_ONLY
bot_realm.save(update_fields=["bot_creation_policy"])
# An administrator can create any type of bot
self.login("iago")
self.assert_num_bots_equal(0)
self.create_bot(bot_type=UserProfile.DEFAULT_BOT)
self.assert_num_bots_equal(1)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.bot_type, UserProfile.DEFAULT_BOT)
def test_patch_bot_full_name(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"full_name": "Fred",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Fred", result.json()["full_name"])
bot = self.get_bot()
self.assertEqual("Fred", bot["full_name"])
def test_patch_bot_full_name_in_use(self) -> None:
self.login("hamlet")
original_name = "The Bot of Hamlet"
bot_info = {
"full_name": original_name,
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_email = "hambot-bot@zulip.testserver"
bot = self.get_bot_user(bot_email)
url = f"/json/bots/{bot.id}"
# It doesn't matter whether a name is taken by a human
# or a bot, we can't use it.
already_taken_name = self.example_user("cordelia").full_name
bot_info = {
"full_name": already_taken_name,
}
result = self.client_patch(url, bot_info)
self.assert_json_error(result, "Name is already in use!")
# We can use our own name (with extra whitespace), and the
# server should silently do nothing.
original_name_with_padding = " " + original_name + " "
bot_info = {
"full_name": original_name_with_padding,
}
result = self.client_patch(url, bot_info)
self.assert_json_success(result)
bot = self.get_bot_user(bot_email)
self.assertEqual(bot.full_name, original_name)
# And let's do a sanity check with an actual name change
# after our various attempts that either failed or did
# nothing.
bot_info = {
"full_name": "Hal",
}
result = self.client_patch(url, bot_info)
self.assert_json_success(result)
bot = self.get_bot_user(bot_email)
self.assertEqual(bot.full_name, "Hal")
def test_patch_bot_full_name_non_bot(self) -> None:
self.login("iago")
bot_info = {
"full_name": "Fred",
}
result = self.client_patch("/json/bots/{}".format(self.example_user("hamlet").id), bot_info)
self.assert_json_error(result, "No such bot")
def test_patch_bot_owner(self) -> None:
self.login("hamlet")
othello = self.example_user("othello")
bot_info: Dict[str, object] = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"bot_owner_id": othello.id,
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
# Test bot's owner has been changed successfully.
self.assertEqual(result.json()["bot_owner"], othello.email)
self.login("othello")
bot = self.get_bot()
self.assertEqual("The Bot of Hamlet", bot["full_name"])
def test_patch_bot_owner_bad_user_id(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
email = "hambot-bot@zulip.testserver"
profile = get_user("hambot-bot@zulip.testserver", get_realm("zulip"))
bad_bot_owner_id = 999999
bot_info = {
"bot_owner_id": bad_bot_owner_id,
}
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Failed to change owner, no such user")
profile = get_user("hambot-bot@zulip.testserver", get_realm("zulip"))
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_owner_deactivated(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
target_user_profile = self.example_user("othello")
do_deactivate_user(target_user_profile, acting_user=None)
target_user_profile = self.example_user("othello")
self.assertFalse(target_user_profile.is_active)
bot_info = {
"bot_owner_id": self.example_user("othello").id,
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Failed to change owner, user is deactivated")
profile = self.get_bot_user(email)
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_owner_must_be_in_same_realm(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
bot_info = {
"bot_owner_id": self.mit_user("starnine").id,
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Failed to change owner, no such user")
profile = self.get_bot_user(email)
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_owner_noop(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
bot_info = {
"bot_owner_id": self.example_user("hamlet").id,
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
# Check that we're still the owner
self.assert_json_success(result)
profile = self.get_bot_user(email)
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_owner_a_bot(self) -> None:
self.login("hamlet")
self.create_bot()
self.assert_num_bots_equal(1)
bot_info: Dict[str, object] = {
"full_name": "Another Bot of Hamlet",
"short_name": "hamelbot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"bot_owner_id": self.get_bot_user("hamelbot-bot@zulip.testserver").id,
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Failed to change owner, bots can't own other bots")
profile = get_user(email, get_realm("zulip"))
self.assertEqual(profile.bot_owner, self.example_user("hamlet"))
def test_patch_bot_avatar(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_GRAVATAR)
email = "hambot-bot@zulip.testserver"
# Try error case first (too many files):
with get_test_image_file("img.png") as fp1, get_test_image_file("img.gif") as fp2:
result = self.client_patch_multipart(
f"/json/bots/{self.get_bot_user(email).id}", dict(file1=fp1, file2=fp2)
)
self.assert_json_error(result, "You may only upload one file at a time")
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.avatar_version, 1)
# HAPPY PATH
with get_test_image_file("img.png") as fp:
result = self.client_patch_multipart(
f"/json/bots/{self.get_bot_user(email).id}", dict(file=fp)
)
profile = get_user(bot_email, bot_realm)
self.assertEqual(profile.avatar_version, 2)
# Make sure that avatar image that we've uploaded is same with avatar image in the server
self.assertTrue(
filecmp.cmp(fp.name, os.path.splitext(avatar_disk_path(profile))[0] + ".original")
)
self.assert_json_success(result)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
self.assertTrue(os.path.exists(avatar_disk_path(profile)))
def test_patch_bot_to_stream(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "Denmark",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Denmark", result.json()["default_sending_stream"])
bot = self.get_bot()
self.assertEqual("Denmark", bot["default_sending_stream"])
def test_patch_bot_to_stream_not_subscribed(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "Rome",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Rome", result.json()["default_sending_stream"])
bot = self.get_bot()
self.assertEqual("Rome", bot["default_sending_stream"])
def test_patch_bot_to_stream_none(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
default_sending_stream = get_user(bot_email, bot_realm).default_sending_stream
self.assertEqual(None, default_sending_stream)
bot = self.get_bot()
self.assertEqual(None, bot["default_sending_stream"])
def test_patch_bot_to_stream_private_allowed(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = self.subscribe(user_profile, "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "Denmark",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Denmark", result.json()["default_sending_stream"])
bot = self.get_bot()
self.assertEqual("Denmark", bot["default_sending_stream"])
def test_patch_bot_to_stream_private_denied(self) -> None:
self.login("hamlet")
realm = self.example_user("hamlet").realm
stream = get_stream("Denmark", realm)
self.unsubscribe(self.example_user("hamlet"), "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "Denmark",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_patch_bot_to_stream_not_found(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_sending_stream": "missing",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Invalid stream name 'missing'")
def test_patch_bot_events_register_stream(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
email = "hambot-bot@zulip.testserver"
bot_user = self.get_bot_user(email)
url = f"/json/bots/{bot_user.id}"
# Successfully give the bot a default stream.
stream_name = "Denmark"
bot_info = dict(default_events_register_stream=stream_name)
result = self.client_patch(url, bot_info)
self.assert_json_success(result)
self.assertEqual(stream_name, result.json()["default_events_register_stream"])
bot = self.get_bot()
self.assertEqual(stream_name, bot["default_events_register_stream"])
# Make sure we are locked out of an unsubscribed private stream.
# We'll subscribe the bot but not the owner (since the check is
# on owner).
stream_name = "private_stream"
self.make_stream(stream_name, hamlet.realm, invite_only=True)
self.subscribe(bot_user, stream_name)
bot_info = dict(default_events_register_stream=stream_name)
result = self.client_patch(url, bot_info)
self.assert_json_error_contains(result, "Invalid stream name")
# Subscribing the owner allows us to patch the stream.
self.subscribe(hamlet, stream_name)
bot_info = dict(default_events_register_stream=stream_name)
result = self.client_patch(url, bot_info)
self.assert_json_success(result)
# Make sure the bot cannot create their own default stream.
url = f"/api/v1/bots/{bot_user.id}"
result = self.api_patch(bot_user, url, bot_info)
self.assert_json_error_contains(result, "endpoint does not accept")
def test_patch_bot_events_register_stream_allowed(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = self.subscribe(user_profile, "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_events_register_stream": "Denmark",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Denmark", result.json()["default_events_register_stream"])
bot = self.get_bot()
self.assertEqual("Denmark", bot["default_events_register_stream"])
def test_patch_bot_events_register_stream_denied(self) -> None:
self.login("hamlet")
realm = self.example_user("hamlet").realm
stream = get_stream("Denmark", realm)
self.unsubscribe(self.example_user("hamlet"), "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_events_register_stream": "Denmark",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_patch_bot_events_register_stream_none(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_events_register_stream": "",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
bot_email = "hambot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
default_events_register_stream = get_user(
bot_email, bot_realm
).default_events_register_stream
self.assertEqual(None, default_events_register_stream)
bot = self.get_bot()
self.assertEqual(None, bot["default_events_register_stream"])
def test_patch_bot_events_register_stream_not_found(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_events_register_stream": "missing",
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_error(result, "Invalid stream name 'missing'")
def test_patch_bot_default_all_public_streams_true(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_all_public_streams": orjson.dumps(True).decode(),
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual(result.json()["default_all_public_streams"], True)
bot = self.get_bot()
self.assertEqual(bot["default_all_public_streams"], True)
def test_patch_bot_default_all_public_streams_false(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"default_all_public_streams": orjson.dumps(False).decode(),
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual(result.json()["default_all_public_streams"], False)
bot = self.get_bot()
self.assertEqual(bot["default_all_public_streams"], False)
def test_patch_bot_via_post(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"full_name": "Fred",
"method": "PATCH",
}
email = "hambot-bot@zulip.testserver"
# Important: We intentionally use the wrong method, post, here.
result = self.client_post(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
self.assertEqual("Fred", result.json()["full_name"])
bot = self.get_bot()
self.assertEqual("Fred", bot["full_name"])
def test_patch_bogus_bot(self) -> None:
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet")
self.create_bot()
bot_info = {
"full_name": "Fred",
}
invalid_user_id = 1000
result = self.client_patch(f"/json/bots/{invalid_user_id}", bot_info)
self.assert_json_error(result, "No such bot")
self.assert_num_bots_equal(1)
def test_patch_outgoing_webhook_bot(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
"bot_type": UserProfile.OUTGOING_WEBHOOK_BOT,
"payload_url": orjson.dumps("http://foo.bar.com").decode(),
"service_interface": Service.GENERIC,
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
"service_payload_url": orjson.dumps("http://foo.bar2.com").decode(),
"service_interface": Service.SLACK,
}
email = "hambot-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
service_interface = orjson.loads(result.content)["service_interface"]
self.assertEqual(service_interface, Service.SLACK)
service_payload_url = orjson.loads(result.content)["service_payload_url"]
self.assertEqual(service_payload_url, "http://foo.bar2.com")
@patch("zulip_bots.bots.giphy.giphy.GiphyHandler.validate_config")
def test_patch_bot_config_data(self, mock_validate_config: MagicMock) -> None:
self.create_test_bot(
"test",
self.example_user("hamlet"),
full_name="Bot with config data",
bot_type=UserProfile.EMBEDDED_BOT,
service_name="giphy",
config_data=orjson.dumps({"key": "12345678"}).decode(),
)
bot_info = {"config_data": orjson.dumps({"key": "87654321"}).decode()}
email = "test-bot@zulip.testserver"
result = self.client_patch(f"/json/bots/{self.get_bot_user(email).id}", bot_info)
self.assert_json_success(result)
config_data = orjson.loads(result.content)["config_data"]
self.assertEqual(config_data, orjson.loads(bot_info["config_data"]))
def test_outgoing_webhook_invalid_interface(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "Outgoing Webhook test bot",
"short_name": "outgoingservicebot",
"bot_type": UserProfile.OUTGOING_WEBHOOK_BOT,
"payload_url": orjson.dumps("http://127.0.0.1:5002").decode(),
"interface_type": -1,
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid interface type")
bot_info["interface_type"] = Service.GENERIC
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
def test_create_outgoing_webhook_bot(self, **extras: Any) -> None:
self.login("hamlet")
bot_info = {
"full_name": "Outgoing Webhook test bot",
"short_name": "outgoingservicebot",
"bot_type": UserProfile.OUTGOING_WEBHOOK_BOT,
"payload_url": orjson.dumps("http://127.0.0.1:5002").decode(),
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_email = "outgoingservicebot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
bot = get_user(bot_email, bot_realm)
[service] = get_bot_services(bot.id)
self.assertEqual(service.name, "outgoingservicebot")
self.assertEqual(service.base_url, "http://127.0.0.1:5002")
self.assertEqual(service.user_profile, bot)
# invalid URL test case.
bot_info["payload_url"] = orjson.dumps("http://127.0.0.:5002").decode()
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "payload_url is not a URL")
def test_get_bot_handler(self) -> None:
# Test for valid service.
test_service_name = "converter"
test_bot_handler = get_bot_handler(test_service_name)
self.assertEqual(
str(type(test_bot_handler)),
"<class 'zulip_bots.bots.converter.converter.ConverterHandler'>",
)
# Test for invalid service.
test_service_name = "incorrect_bot_service_foo"
test_bot_handler = get_bot_handler(test_service_name)
self.assertEqual(test_bot_handler, None)
def test_if_each_embedded_bot_service_exists(self) -> None:
for embedded_bot in EMBEDDED_BOTS:
self.assertIsNotNone(get_bot_handler(embedded_bot.name))
def test_outgoing_webhook_interface_type(self) -> None:
self.login("hamlet")
bot_info = {
"full_name": "Outgoing Webhook test bot",
"short_name": "outgoingservicebot",
"bot_type": UserProfile.OUTGOING_WEBHOOK_BOT,
"payload_url": orjson.dumps("http://127.0.0.1:5002").decode(),
"interface_type": -1,
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid interface type")
bot_info["interface_type"] = Service.GENERIC
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
def test_create_embedded_bot_with_disabled_embedded_bots(self, **extras: Any) -> None:
with self.settings(EMBEDDED_BOTS_ENABLED=False):
self.fail_to_create_test_bot(
short_name="embeddedservicebot",
user_profile=self.example_user("hamlet"),
bot_type=UserProfile.EMBEDDED_BOT,
service_name="followup",
config_data=orjson.dumps({"key": "value"}).decode(),
assert_json_error_msg="Embedded bots are not enabled.",
**extras,
)
def test_create_embedded_bot(self, **extras: Any) -> None:
bot_config_info = {"key": "value"}
self.create_test_bot(
short_name="embeddedservicebot",
user_profile=self.example_user("hamlet"),
bot_type=UserProfile.EMBEDDED_BOT,
service_name="followup",
config_data=orjson.dumps(bot_config_info).decode(),
**extras,
)
bot_email = "embeddedservicebot-bot@zulip.testserver"
bot_realm = get_realm("zulip")
bot = get_user(bot_email, bot_realm)
[service] = get_bot_services(bot.id)
bot_config = get_bot_config(bot)
self.assertEqual(bot_config, bot_config_info)
self.assertEqual(service.name, "followup")
self.assertEqual(service.user_profile, bot)
def test_create_embedded_bot_with_incorrect_service_name(self, **extras: Any) -> None:
self.fail_to_create_test_bot(
short_name="embeddedservicebot",
user_profile=self.example_user("hamlet"),
bot_type=UserProfile.EMBEDDED_BOT,
service_name="not_existing_service",
assert_json_error_msg="Invalid embedded bot name.",
**extras,
)
def test_create_embedded_bot_with_invalid_config_value(self, **extras: Any) -> None:
self.fail_to_create_test_bot(
short_name="embeddedservicebot",
user_profile=self.example_user("hamlet"),
service_name="followup",
config_data=orjson.dumps({"invalid": ["config", "value"]}).decode(),
assert_json_error_msg="config_data contains a value that is not a string",
**extras,
)
# Test to create embedded bot with an incorrect config value
incorrect_bot_config_info = {"key": "incorrect key"}
bot_info = {
"full_name": "Embedded test bot",
"short_name": "embeddedservicebot3",
"bot_type": UserProfile.EMBEDDED_BOT,
"service_name": "giphy",
"config_data": orjson.dumps(incorrect_bot_config_info).decode(),
}
bot_info.update(extras)
with patch(
"zulip_bots.bots.giphy.giphy.GiphyHandler.validate_config",
side_effect=ConfigValidationError,
):
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid configuration data!")
def test_is_cross_realm_bot_email(self) -> None:
self.assertTrue(is_cross_realm_bot_email("notification-bot@zulip.com"))
self.assertTrue(is_cross_realm_bot_email("notification-BOT@zulip.com"))
self.assertFalse(is_cross_realm_bot_email("random-bot@zulip.com"))
with self.settings(CROSS_REALM_BOT_EMAILS={"random-bot@zulip.com"}):
self.assertTrue(is_cross_realm_bot_email("random-bot@zulip.com"))
self.assertFalse(is_cross_realm_bot_email("notification-bot@zulip.com"))
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_with_service_name_and_with_keys(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
"service_name": "stripe",
"config_data": orjson.dumps({"stripe_api_key": "sample-api-key"}).decode(),
}
self.create_bot(**bot_metadata)
new_bot = UserProfile.objects.get(full_name="My Stripe Bot")
config_data = get_bot_config(new_bot)
self.assertEqual(
config_data, {"integration_id": "stripe", "stripe_api_key": "sample-api-key"}
)
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_with_service_name_incorrect_keys(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
"service_name": "stripe",
"config_data": orjson.dumps({"stripe_api_key": "_invalid_key"}).decode(),
}
response = self.client_post("/json/bots", bot_metadata)
self.assertEqual(response.status_code, 400)
expected_error_message = 'Invalid stripe_api_key value _invalid_key (stripe_api_key starts with a "_" and is hence invalid.)'
self.assertEqual(orjson.loads(response.content)["msg"], expected_error_message)
with self.assertRaises(UserProfile.DoesNotExist):
UserProfile.objects.get(full_name="My Stripe Bot")
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_with_service_name_without_keys(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
"service_name": "stripe",
}
response = self.client_post("/json/bots", bot_metadata)
self.assertEqual(response.status_code, 400)
expected_error_message = "Missing configuration parameters: {'stripe_api_key'}"
self.assertEqual(orjson.loads(response.content)["msg"], expected_error_message)
with self.assertRaises(UserProfile.DoesNotExist):
UserProfile.objects.get(full_name="My Stripe Bot")
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_without_service_name(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
}
self.create_bot(**bot_metadata)
new_bot = UserProfile.objects.get(full_name="My Stripe Bot")
with self.assertRaises(ConfigError):
get_bot_config(new_bot)
@patch("zerver.lib.integrations.WEBHOOK_INTEGRATIONS", stripe_sample_config_options)
def test_create_incoming_webhook_bot_with_incorrect_service_name(self) -> None:
self.login("hamlet")
bot_metadata = {
"full_name": "My Stripe Bot",
"short_name": "my-stripe",
"bot_type": UserProfile.INCOMING_WEBHOOK_BOT,
"service_name": "stripes",
}
response = self.client_post("/json/bots", bot_metadata)
self.assertEqual(response.status_code, 400)
expected_error_message = "Invalid integration 'stripes'."
self.assertEqual(orjson.loads(response.content)["msg"], expected_error_message)
with self.assertRaises(UserProfile.DoesNotExist):
UserProfile.objects.get(full_name="My Stripe Bot")
|
{
"content_hash": "cb0db5fa867258622b575169c530320d",
"timestamp": "",
"source": "github",
"line_count": 1668,
"max_line_length": 133,
"avg_line_length": 39.6378896882494,
"alnum_prop": 0.5931242059410733,
"repo_name": "eeshangarg/zulip",
"id": "837a33bb13ef8ce734300019eeb22ae548f83476",
"size": "66116",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/tests/test_bots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "484233"
},
{
"name": "Dockerfile",
"bytes": "5056"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "713408"
},
{
"name": "Handlebars",
"bytes": "343958"
},
{
"name": "JavaScript",
"bytes": "3738321"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "106355"
},
{
"name": "Python",
"bytes": "9442083"
},
{
"name": "Ruby",
"bytes": "3250"
},
{
"name": "Shell",
"bytes": "135667"
},
{
"name": "TypeScript",
"bytes": "275302"
}
],
"symlink_target": ""
}
|
import os
import sys
import matplotlib
matplotlib.use('Qt5Agg')
from PyQt5.QtWidgets import QApplication
from bssrdf_estimate.interface import MainWindow
if __name__ == '__main__':
# Create directory for intermediate results
try:
os.mkdir('result')
print('[INFO] result directory is created!')
except:
print('[INFO] result directory already exists. Skip creating.')
# Start application
app = QApplication(sys.argv)
win = MainWindow()
win.showMaximized()
sys.exit(app.exec_())
|
{
"content_hash": "4f0c3b891226580231f79c7de67ef4c6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 24.363636363636363,
"alnum_prop": 0.6772388059701493,
"repo_name": "tatsy/bssrdf-estimate",
"id": "7f02d55b598e76342b30d86705fe6a69bdb68d02",
"size": "561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6147"
},
{
"name": "CMake",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "66212"
}
],
"symlink_target": ""
}
|
import re
from nova import exception
from nova.i18n import _
# Define the minimum and maximum version of the API across all of the
# REST API. The format of the version is:
# X.Y where:
#
# - X will only be changed if a significant backwards incompatible API
# change is made which affects the API as whole. That is, something
# that is only very very rarely incremented.
#
# - Y when you make any change to the API. Note that this includes
# semantic changes which may not affect the input or output formats or
# even originate in the API code layer. We are not distinguishing
# between backwards compatible and backwards incompatible changes in
# the versioning system. It must be made clear in the documentation as
# to what is a backwards compatible change and what is a backwards
# incompatible one.
#
# You must update the API version history string below with a one or
# two line description as well as update rest_api_version_history.rst
REST_API_VERSION_HISTORY = """REST API Version History:
* 2.1 - Initial version. Equivalent to v2.0 code
* 2.2 - Adds (keypair) type parameter for os-keypairs plugin
Fixes success status code for create/delete a keypair method
* 2.3 - Exposes additional os-extended-server-attributes
Exposes delete_on_termination for os-extended-volumes
* 2.4 - Exposes reserved field in os-fixed-ips.
* 2.5 - Allow server search option ip6 for non-admin
* 2.6 - Consolidate the APIs for getting remote consoles
* 2.7 - Check flavor type before add tenant access.
* 2.8 - Add new protocol for VM console (mks)
* 2.9 - Exposes lock information in server details.
* 2.10 - Allow admins to query, create and delete keypairs owned by any
user.
* 2.11 - Exposes forced_down attribute for os-services
"""
# The minimum and maximum versions of the API supported
# The default api version request is definied to be the
# the minimum version of the API supported.
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = "2.1"
_MAX_API_VERSION = "2.11"
DEFAULT_API_VERSION = _MIN_API_VERSION
# NOTE(cyeoh): min and max versions declared as functions so we can
# mock them for unittests. Do not use the constants directly anywhere
# else.
def min_api_version():
return APIVersionRequest(_MIN_API_VERSION)
def max_api_version():
return APIVersionRequest(_MAX_API_VERSION)
class APIVersionRequest(object):
"""This class represents an API Version Request with convenience
methods for manipulation and comparison of version
numbers that we need to do to implement microversions.
"""
def __init__(self, version_string=None):
"""Create an API version request object.
:param version_string: String representation of APIVersionRequest.
Correct format is 'X.Y', where 'X' and 'Y' are int values.
None value should be used to create Null APIVersionRequest,
which is equal to 0.0
"""
self.ver_major = 0
self.ver_minor = 0
if version_string is not None:
match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$",
version_string)
if match:
self.ver_major = int(match.group(1))
self.ver_minor = int(match.group(2))
else:
raise exception.InvalidAPIVersionString(version=version_string)
def __str__(self):
"""Debug/Logging representation of object."""
return ("API Version Request Major: %s, Minor: %s"
% (self.ver_major, self.ver_minor))
def is_null(self):
return self.ver_major == 0 and self.ver_minor == 0
def _format_type_error(self, other):
return TypeError(_("'%(other)s' should be an instance of '%(cls)s'") %
{"other": other, "cls": self.__class__})
def __lt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) <
(other.ver_major, other.ver_minor))
def __eq__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) ==
(other.ver_major, other.ver_minor))
def __gt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) >
(other.ver_major, other.ver_minor))
def __le__(self, other):
return self < other or self == other
def __ne__(self, other):
return not self.__eq__(other)
def __ge__(self, other):
return self > other or self == other
def matches(self, min_version, max_version):
"""Returns whether the version object represents a version
greater than or equal to the minimum version and less than
or equal to the maximum version.
@param min_version: Minimum acceptable version.
@param max_version: Maximum acceptable version.
@returns: boolean
If min_version is null then there is no minimum limit.
If max_version is null then there is no maximum limit.
If self is null then raise ValueError
"""
if self.is_null():
raise ValueError
if max_version.is_null() and min_version.is_null():
return True
elif max_version.is_null():
return min_version <= self
elif min_version.is_null():
return self <= max_version
else:
return min_version <= self <= max_version
def get_string(self):
"""Converts object to string representation which if used to create
an APIVersionRequest object results in the same version request.
"""
if self.is_null():
raise ValueError
return "%s.%s" % (self.ver_major, self.ver_minor)
|
{
"content_hash": "c6741f826656a1b7b603f533885f60c8",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 79,
"avg_line_length": 37.331288343558285,
"alnum_prop": 0.6440427280197206,
"repo_name": "nikesh-mahalka/nova",
"id": "450b2717fd8659c9dc1de3175387655a0abfcb46",
"size": "6687",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/api/openstack/api_version_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16554867"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259485"
}
],
"symlink_target": ""
}
|
import pbr.version
version_info = pbr.version.VersionInfo('ironic_lib')
|
{
"content_hash": "e6f8dcf9ef83107ccb36adfeda0a7b1d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 52,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.7808219178082192,
"repo_name": "NaohiroTamura/ironic-lib",
"id": "cd45253b1a29ea281267655b3688a3cf38af0f21",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic_lib/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "197128"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name='python-escpos',
version='1.0.9',
url='https://github.com/mosquito/python-escpos',
download_url='https://github.com/mosquito/python-escpos/archive/master.zip',
description='Python library to manipulate ESC/POS Printers',
long_description=open('README.rst').read(),
license='GNU GPL v3',
author=['Manuel F Martinez', 'Dmitry Orlov'],
author_email=['manpaz@bashlinux.com', 'me@mosquito.su'],
platforms=['linux'],
packages=[
'escpos',
],
install_requires=[
'pyusb',
'Pillow>=2.0',
'qrcode>=4.0',
'pyserial',
],
)
|
{
"content_hash": "fa1af07f96f8ea21b7e6ee9f3ccaf19d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 28.347826086956523,
"alnum_prop": 0.6119631901840491,
"repo_name": "1upon0/rfid-auth-system",
"id": "e5ee86010948c8f226bfcab89461782f296d20a7",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GUI/printer/python-escpos-1.0.9/setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "110445"
},
{
"name": "C",
"bytes": "848319"
},
{
"name": "C++",
"bytes": "257556"
},
{
"name": "CSS",
"bytes": "142637"
},
{
"name": "Gnuplot",
"bytes": "1125"
},
{
"name": "HTML",
"bytes": "921766"
},
{
"name": "JavaScript",
"bytes": "400339"
},
{
"name": "Makefile",
"bytes": "119274"
},
{
"name": "Python",
"bytes": "2308656"
},
{
"name": "QMake",
"bytes": "987"
},
{
"name": "Shell",
"bytes": "6209"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from talkstarter.models import *
class EventAdmin(ModelAdmin):
list_display = ['title', 'date', 'operator', 'minimum_backers', 'backing_percentage', 'is_backed',]
admin.site.register(Operator)
admin.site.register(Event, EventAdmin)
admin.site.register(Backer)
admin.site.register(Subscription)
|
{
"content_hash": "9faef5ff8bc05f9e90c28d339f3d0622",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 100,
"avg_line_length": 33.90909090909091,
"alnum_prop": 0.7882037533512064,
"repo_name": "appsburg/talkstarter",
"id": "2fe28136308046b804cf6b80845ea89e6c936c4c",
"size": "373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3014"
}
],
"symlink_target": ""
}
|
"""Python front-end supports for functions.
NOTE: functions are currently experimental and subject to change!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function it will add `call` ops to the
default graph and adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions are frozen in a graph as soon as the graph is used to
create a session. Therefore, nodes using the function must be created in the
graph before the corresponding session is created.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.Constant([1.0])
b = tf.Constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is either a
`_DefinedFunction` or a `Declare` object. The gradient
function must satisify the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("func %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError("Functions with argument defaults or keyword "
"arguments are not supported.")
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
class Declare(object):
"""Declares a TensorFlow function.
The object represents a TensorFlow function which will be defined
later during a graph construction.
For example,
# Declares a function Foo, which takes a tf.int32 named "n" and a
# tf.float32 named "x" as inputs and returns a tf.float32 named "z"
# as its output.
foo = Declare("Foo", [("n", tf.int32), ("x", tf.float32)],
[("z", tf.float32)])
# Defines a function Bar calls Foo.
@tf.Defun(tf.float32)
def Bar(x):
return foo(6, x)
# Defines Foo, with output named "z".
@tf.Defun(tf.int32, tf.float32, out_names=["z"])
def Foo(n, x):
... # Calculation.
return result
"""
def __init__(self, func_name, inputs, outputs):
"""Creates a `Declare` object.
Args:
func_name: The name of the function.
inputs: A list of (name, data type) pairs of function arguments.
outputs: A list of (name, data type) pairs of function return values.
"""
self._sig = op_def_pb2.OpDef()
self._sig.name = func_name
def _to_argdef_list(args):
names = [n for n, t in args]
if len(names) != len(set(names)):
raise ValueError("Expected names to all be unique: %s" % str(names))
return [
op_def_pb2.OpDef.ArgDef(type=t.as_datatype_enum, name=n)
for n, t in args
]
self._sig.input_arg.extend(_to_argdef_list(inputs))
self._sig.output_arg.extend(_to_argdef_list(outputs))
def __call__(self, *inputs, **kwargs):
inputs = [ops.convert_to_tensor(_) for _ in inputs]
return _call(self._sig, *inputs, **kwargs)[0]
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._capture_by_value = capture_by_value
self._extra_kwargs = kwargs
self._definition = None # Constructed lazily.
self._c_func = None # Constructed with definition.
self._sub_functions = dict() # Constructed with definition.
self._args = []
assert isinstance(input_types, (list, tuple))
for i in range(len(input_types)):
argname = argnames[i] if i < len(argnames) else ("arg%d" % i)
argtype = input_types[i]
self._args.append((argname, argtype))
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
return self._definition
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Its gradient function's name."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
with context.graph_mode():
self._create_definition_if_needed_impl()
def _create_definition_if_needed_impl(self):
"""This is not what you want, see _create_definition_if_needed."""
if self._definition is not None:
return
# Create the func_def object.
temp_graph = _FuncGraph(capture_by_value=self._capture_by_value)
with temp_graph.as_default():
# List of placeholders for the function_def.
inputs = []
for (argname, argtype) in self._args:
argholder = array_ops.placeholder(argtype, name=argname)
inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=temp_graph.getvar):
outputs = self._func(*inputs)
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any([_ is None for _ in outputs]):
raise ValueError("Function can not return None.")
# Ensures each output is a Tensor.
outputs = [ops.convert_to_tensor(_) for _ in outputs]
self._extra_inputs = temp_graph.extra_inputs
inputs.extend(temp_graph.extra_args)
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
inputs,
outputs,
out_names=self._out_names)
# Extra kwargs are treated as attrs on the function def.
sig_pre_func_name = self._func_name or _get_func_name(self._func)
kwargs_attr = _parse_kwargs_as_attrs(sig_pre_func_name,
**self._extra_kwargs)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([_get_func_name(self._func), self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
# pylint: disable=protected-access
if temp_graph._c_graph:
output_names = ([compat.as_bytes(x) for x in self._out_names]
if self._out_names else [])
description = self._func.__doc__ or None
with errors.raise_exception_on_not_ok_status() as status:
self._c_func = c_api.TF_GraphToFunction_wrapper(
temp_graph._c_graph,
self._func_name,
None, # opers
[t._as_tf_output() for t in inputs],
[t._as_tf_output() for t in outputs],
output_names,
None, # opts
description,
status)
self._set_c_attrs(kwargs_attr)
# pylint: enable=protected-access
def _set_c_attrs(self, attrs):
"""Sets `attrs` as attributes of self._c_func.
Requires that self._c_func is not None.
Args:
attrs: a dictionary from attribute name to attribute proto value
"""
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
with errors.raise_exception_on_not_ok_status() as status:
c_api.TF_FunctionSetAttrValueProto(self._c_func, compat.as_str(name),
serialized, status)
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# pylint: disable=protected-access
# If 'g' has an identical function already, do nothing.
prev = g._get_function(self.name)
if prev and (prev._hash_str == self._hash_str):
return
# Adds this function into 'g'.
if context.in_graph_mode():
g._add_function(self)
else:
context.context().add_function_def(self.definition)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._definition.signature, *args, **kwargs)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError("shape_func produced %d shapes for %d outputs" %
(len(shapes), len(op.outputs)))
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type)
for _ in defined.definition.signature.output_arg
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(
input_types + output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError("Expect a Tensor but get ", x)
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class _FuncGraph(ops.Graph):
"""A helper for constructing a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of all inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, capture_by_value, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._capture_by_value = capture_by_value
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
self._captured = {}
self.extra_inputs = []
self.extra_args = []
self.extra_vars = []
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if isinstance(var, resource_variable_ops.ResourceVariable):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def create_op(self, op_type, inputs, data_types, **kwargs):
for i, x in enumerate(inputs):
if x.graph is not self:
# Referring to a tensor from other graph.
if x in self._captured:
# Captured already.
inputs[i] = self._captured[x]
elif self._capture_by_value:
inputs[i] = self._add_tensor_and_parents(x)
else:
# Substitute with a placeholder.
self.extra_inputs.append(x)
ph = array_ops.placeholder(x.dtype, shape=x.get_shape())
# pylint: disable=protected-access
ph._handle_data = x._handle_data
# pylint: enable=protected-access
inputs[i] = ph
self._captured[x] = ph
self.extra_args.append(ph)
return super(_FuncGraph, self).create_op(op_type, inputs, data_types,
**kwargs)
def _add_tensor_and_parents(self, tensor):
op = self._add_op_and_parents(tensor.op)
return op.outputs[tensor.value_index]
def _add_op_and_parents(self, op):
# pylint: disable=protected-access
op_def = graph_to_function_def._get_op_def(op)
# pylint: enable=protected-access
if op_def.is_stateful:
raise ValueError("Cannot capture a stateful node (name:%s, type:%s) "
"by value." % (op.name, op.type))
elif op.type in ("Placeholder", "PlaceholderV2"):
raise ValueError("Cannot capture a placeholder (name:%s, type:%s) "
"by value." % (op.name, op.type))
captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]
captured_op = self.create_op(
op.type,
captured_inputs, [o.dtype for o in op.outputs],
name=op.name,
attrs=op.node_def.attr,
op_def=op_def)
for t, captured_t in zip(op.outputs, captured_op.outputs):
self._captured[t] = captured_t
return captured_op
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" %
(len(sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
with ops.name_scope(name, func_name, inputs) as name:
op = g.create_op(
func_name,
list(inputs),
output_types,
name=name,
attrs=attrs,
op_def=sig,
compute_shapes=False)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(
dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
result._definition = fdef
# Captured inputs are added as regular inputs to a function when it's
# serialized, i.e. any extra inputs from the original function are now
# included in `result`._args
result._extra_inputs = []
result._hash_str = result._create_hash_str(
result._definition.signature.input_arg,
result._definition.signature.output_arg, result._definition.node_def)
# pylint: enable=protected-access
return result
def _from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient:
return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.function_name, str(lib)))
if g.gradient_func not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.gradient_func, str(lib)))
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [
fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
]
if not ready:
raise ValueError("FunctionDefLibrary contains cyclic gradient functions!\n"
+ str(lib))
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]:
assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
# Forward _XlaScope from enclosing context (if set), otherwise create new.
# pylint: disable=protected-access
if "_XlaScope" in ops.get_default_graph()._attr_scope_map:
attrs["_XlaScope"] = ops.get_default_graph()._attr_scope_map["_XlaScope"]
else:
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
# pylint: enable=protected-access
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
def _get_func_name(func):
_, func = tf_decorator.unwrap(func)
if callable(func):
if tf_inspect.isfunction(func):
return func.__name__
elif tf_inspect.ismethod(func):
return "%s.%s" % (func.__self__.__name__, func.__name__)
else: # Probably a class instance with __call__
return type(func)
else:
raise ValueError("Argument must be callable")
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
def _type_list_to_str(types):
if any([_ not in _DTYPE_TO_STR for _ in types]):
raise ValueError("Unsupported dtypes: %s" % types)
return "".join([_DTYPE_TO_STR[_] for _ in types])
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
|
{
"content_hash": "57e2cb6e85ec3c5c0c72791ecebbffcc",
"timestamp": "",
"source": "github",
"line_count": 1007,
"max_line_length": 80,
"avg_line_length": 34.25124131082423,
"alnum_prop": 0.6480821083760981,
"repo_name": "adamtiger/tensorflow",
"id": "727242383e33e3449b1bbc1cc9a84c48155ea1bd",
"size": "35179",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "196671"
},
{
"name": "C++",
"bytes": "26224486"
},
{
"name": "CMake",
"bytes": "169710"
},
{
"name": "Go",
"bytes": "898393"
},
{
"name": "Java",
"bytes": "318633"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "246360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "22308636"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "336273"
}
],
"symlink_target": ""
}
|
from django.db import models
POSITIONS = (
('G', 'Goalkeeper'),
('D', 'Defender'),
('M', 'Midfielder'),
('F', 'Forward'),
('S', 'Sub'),
)
class BaseModel(models.Model):
''' Simple abstract base model '''
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
{
"content_hash": "b658907525c052d71a762f11b42aa33d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 19.894736842105264,
"alnum_prop": 0.58994708994709,
"repo_name": "f4nt/mls-api",
"id": "be1ee535c658317d0f97dd6b955499b640499a91",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mls_api/models/base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "236170"
}
],
"symlink_target": ""
}
|
import ConfigParser
import logging
import tornado.web
import httplib
import json
from tornado.escape import json_encode
from MongoManager import MongoManager
import FormatConverterUtil
logger = logging.getLogger('log.application')
class SearchController(tornado.web.RequestHandler):
config = ConfigParser.ConfigParser()
configPath = '../config/config.cnf'
config.read(configPath)
db_config = {
'host': config.get('MongoDBConnection', 'db.host'),
'port': config.get('MongoDBConnection', 'db.port'),
'username': config.get('MongoDBConnection', 'db.username'),
'password': config.get('MongoDBConnection', 'db.password')
}
schema = config.get('MongoDBConnection', 'db.schema')
batch_size = config.get('MongoDBConnection', 'db.batch_limit')
cameo_table = config.get('Cameo', 'db.Cameo')
jrc_table = config.get('JRCNames', 'db.JRCNames')
cameo_jrc_table = config.get('CameoJRC', 'db.CameoJRCCountryActor')
bablenet_cache = config.get('BableNet', 'db.BableNet.Cache')
dbpedia_cache = config.get('DBPedia', 'db.DBPedia.Cache')
cameo = MongoManager(schema, cameo_table, batch_size, db_config).get_collection()
jrc = MongoManager(schema, jrc_table, batch_size, db_config).get_collection()
cameo_jrc = MongoManager(schema, cameo_jrc_table, batch_size, db_config).get_collection()
bablenet_cache = MongoManager(schema, bablenet_cache, batch_size, db_config).get_collection()
dbpedia_cache = MongoManager(schema, dbpedia_cache, batch_size, db_config).get_collection()
def get(self):
logger.info(self.config.get('Logging', 'Logger.GetMessage1') + '' + self.request.remote_ip)
query = self.get_argument(self.config.get('AccessParameters', 'Access.QueryString'))
result = {}
result['query'] = query
normalized_query = FormatConverterUtil.convertToCompareFormat(query)
# Get result from Bablenet and push to object
conn = httplib.HTTPConnection('babelnet.io')
headers = {"Accept": "application/json"}
search_query = "/v4/getSenses?word=" + normalized_query + "&lang=EN&pos=NOUN&filterLangs=AR&filterLangs=ES&key=" + self.config.get('BableNet', 'access.key')
conn.request("GET", search_query, None, headers)
result['bablenet'] = json.loads(conn.getresponse().read())
# Get result from DBpedia and push to object
conn = httplib.HTTPConnection('lookup.dbpedia.org')
headers = {"Accept": "application/json"}
search_query = "/api/search/KeywordSearch?QueryString=" + normalized_query
conn.request("GET", search_query, None, headers)
result['dbpedia'] = json.loads(conn.getresponse().read())
# get result from caameo and JRC and push to object
result['cameojrc'] = self.get_cameo_jrc_result(normalized_query)
result['status'] = self.config.get('GeneralMsg', 'Status.success')
result['licence'] = self.config.get('GeneralMsg', 'Licence.Ack')
self.write(json_encode(result))
self.set_header("Content-Type", "application/json")
def get_cameo_jrc_result(self, normalized_query):
cameo_jrc_data = self.cameo_jrc.find(
{"$or": [{"cameo_string": normalized_query}, {"jrc_string": normalized_query}]})
cameo_jrc_result = list(cameo_jrc_data)
for data in cameo_jrc_result:
data['cameo_data'] = list(self.cameo.find({"_id": data['cameo_id']}, {"_id": 0, "compare_strings": 0}))
data['jrc_data'] = list(self.jrc.find({"_id": data['jrc_id']}, {"_id": 0, "compare_strings": 0}))
del data['cameo_id']
del data['jrc_id']
del data['_id']
return cameo_jrc_result
|
{
"content_hash": "b5ea92caefc7bf1d5f8a5c37915c9b6a",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 164,
"avg_line_length": 45.01204819277108,
"alnum_prop": 0.6587259100642399,
"repo_name": "SubhasisDutta/CAMEO-JRC-Database",
"id": "72fac8f3a3de858896ee345b9d222dca90cd5cdc",
"size": "3795",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rest-server/SearchController.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1206"
},
{
"name": "HTML",
"bytes": "2509"
},
{
"name": "Java",
"bytes": "18059"
},
{
"name": "Python",
"bytes": "49448"
},
{
"name": "R",
"bytes": "1788"
},
{
"name": "Scala",
"bytes": "5126"
},
{
"name": "TypeScript",
"bytes": "9141"
}
],
"symlink_target": ""
}
|
from . import api
class StudentEvents():
# StudentEvents.StudentEvents
def student_events(self, titleOnly=False, auth=True):
params = {'TitleOnly': titleOnly}
return api.call('StudentEvents', params, auth)
# StudentEvents.StudentEvents_Categories
def student_events_categories(self, includeEvents=True, titleOnly=False, auth=True):
params = {'IncludeEvents': includeEvents, 'TitleOnly': titleOnly}
return api.call('StudentEvents_Categories', params, auth)
# StudentEvents.StudentEvents_Committees
def student_events_committees(self, includeEvents=True, titleOnly=False, auth=True):
params = {'IncludeEvents': includeEvents, 'TitleOnly': titleOnly}
return api.call('StudentEvents_Committees', params, auth)
# StudentEvents.StudentEvents_Category
def student_events_category(self, categoryId, titleOnly=False, auth=True):
params = {'CategoryID': categoryId, 'TitleOnly': titleOnly}
return api.call('StudentEvents_Category', params, auth)
# StudentEvents.StudentEvents_Committee
def student_events_committee(self, committeeId, titleOnly=False, auth=True):
params = {'CommitteeID': committeeId, 'TitleOnly': titleOnly}
return api.call('StudentEvents_Committee', params, auth)
# StudentEvents.StudentEvents_PostNewEvent_JSON
def student_events_post_new_event(self, categoryId, committeeId, title, evtStartDate, evtEndDate, auth=True):
params = {'CategoryID': categoryId, 'CommitteeID': committeeId, 'Title': title, 'evtStartDate': evtStartDate, 'evtEndDate': evtEndDate}
return api.call('StudentEvents_PostNewEvent_JSON', params, auth, 'post')
|
{
"content_hash": "d4b255777a934248515686aed7a1b765",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 143,
"avg_line_length": 52.78125,
"alnum_prop": 0.7217288336293665,
"repo_name": "karen/ivle-bot",
"id": "a20bf48fcfbae6ba84cad96392d9a645aceb41aa",
"size": "1689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyivle/helpers/student_events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61407"
}
],
"symlink_target": ""
}
|
'''
Created on Jul 15, 2021
@author: 658723
'''
from dateutil import parser
from datetime import datetime
from datetime import timedelta
from ....core.BaseAgent3 import BaseAgent
import json
class SpinnakerAgent(BaseAgent):
@BaseAgent.timed
def process(self):
baseUrl = self.config.get("baseUrl", '')
applicationsUrl = baseUrl + 'applications'
accessToken = self.getCredential("accessToken")
headers = {"Authorization": "Bearer " + accessToken}
startFrom = self.config.get("startFrom", '')
spinnakerApplications = self.getResponse(applicationsUrl, 'GET', None, None, None, reqHeaders=headers)
responseTemplate = self.getResponseTemplate()
dynamicTemplate = self.config.get('dynamicTemplate', {})
stagesTemplate = dynamicTemplate.get('stages', {})
stageMetadata = dynamicTemplate.get('extensions', {}).get('relationMetadata', None)
executionMetadata = dynamicTemplate.get('metadata', {}).get('executions', None)
for application in spinnakerApplications:
applicationName = application["name"]
data = []
stageData = []
timestamp = self.tracking.get(applicationName, startFrom)
lastUpdatedDate = None
executionsUrl = applicationsUrl + '/' + applicationName + '/executions/search?triggerTimeStartBoundary=' + str(timestamp)
executions = self.getResponse(executionsUrl, 'GET', None, None, None, reqHeaders=headers)
pagenum = 0
fetchNextPage = True
while fetchNextPage:
if len(executions) == 0:
fetchNextPage = False
break
for execution in executions:
data += self.parseResponse(responseTemplate, execution)
stages = execution.get("stages", {})
stageData += self.getStageDetails(stages, stagesTemplate, execution["id"])
if lastUpdatedDate is None:
lastUpdatedDate = execution.get("buildTime")
self.tracking[applicationName] =str(lastUpdatedDate + 1)
self.publishToolsData(data, executionMetadata, "buildTime", None, True)
self.publishToolsData(stageData, stageMetadata, "stageStartTime", None, True)
pagenum = pagenum + 10
executionsPageUrl = executionsUrl + '&startIndex=' + str(pagenum)
executions = self.getResponse(executionsPageUrl, 'GET', None, None, None, reqHeaders=headers)
self.updateTrackingJson(self.tracking)
def getStageDetails(self, stages, template, executionId):
data = []
for stage in stages:
stageData = self.parseResponse(template, stage)
stageData[0]['pipelineExecutionId'] = executionId
data += stageData
return data
if __name__ == "__main__":
SpinnakerAgent()
|
{
"content_hash": "619f95328df49d5c3863c2f069ec0b0b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 133,
"avg_line_length": 45.015151515151516,
"alnum_prop": 0.6199932682598451,
"repo_name": "CognizantOneDevOps/Insights",
"id": "755d33b0e526e6ec02325d4c386976b925a6813a",
"size": "3727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PlatformAgents/com/cognizant/devops/platformagents/agents/ci/spinnaker/SpinnakerAgent3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "93761"
},
{
"name": "CSS",
"bytes": "362982"
},
{
"name": "Dockerfile",
"bytes": "30938"
},
{
"name": "HTML",
"bytes": "1118798"
},
{
"name": "Java",
"bytes": "4099059"
},
{
"name": "JavaScript",
"bytes": "39094"
},
{
"name": "Python",
"bytes": "1518111"
},
{
"name": "SCSS",
"bytes": "218059"
},
{
"name": "Shell",
"bytes": "541300"
},
{
"name": "TypeScript",
"bytes": "2097909"
}
],
"symlink_target": ""
}
|
import cairocffi
from . import base
from os import statvfs
import time
__all__ = [
'CPUGraph',
'MemoryGraph',
'SwapGraph',
'NetGraph',
'HDDGraph',
'HDDBusyGraph',
]
class _Graph(base._Widget):
fixed_upper_bound = False
defaults = [
("graph_color", "18BAEB", "Graph color"),
("fill_color", "1667EB.3", "Fill color for linefill graph"),
("border_color", "215578", "Widget border color"),
("border_width", 2, "Widget border width"),
("margin_x", 3, "Margin X"),
("margin_y", 3, "Margin Y"),
("samples", 100, "Count of graph samples."),
("frequency", 1, "Update frequency in seconds"),
("type", "linefill", "'box', 'line', 'linefill'"),
("line_width", 3, "Line width"),
("start_pos", "bottom", "Drawer starting position ('bottom'/'top')"),
]
def __init__(self, width=100, **config):
base._Widget.__init__(self, width, **config)
self.add_defaults(_Graph.defaults)
self.values = [0] * self.samples
self.maxvalue = 0
self.oldtime = time.time()
self.lag_cycles = 0
def timer_setup(self):
self.timeout_add(self.frequency, self.update)
@property
def graphwidth(self):
return self.width - self.border_width * 2 - self.margin_x * 2
@property
def graphheight(self):
return self.bar.height - self.margin_y * 2 - self.border_width * 2
def draw_box(self, x, y, values):
step = self.graphwidth / float(self.samples)
self.drawer.set_source_rgb(self.graph_color)
for val in values:
val = self.val(val)
self.drawer.fillrect(x, y - val, step, val)
x += step
def draw_line(self, x, y, values):
step = self.graphwidth / float(self.samples - 1)
self.drawer.ctx.set_line_join(cairocffi.LINE_JOIN_ROUND)
self.drawer.set_source_rgb(self.graph_color)
self.drawer.ctx.set_line_width(self.line_width)
for val in values:
self.drawer.ctx.line_to(x, y - self.val(val))
x += step
self.drawer.ctx.stroke()
def draw_linefill(self, x, y, values):
step = self.graphwidth / float(self.samples - 2)
self.drawer.ctx.set_line_join(cairocffi.LINE_JOIN_ROUND)
self.drawer.set_source_rgb(self.graph_color)
self.drawer.ctx.set_line_width(self.line_width)
for index, val in enumerate(values):
self.drawer.ctx.line_to(x + index * step, y - self.val(val))
self.drawer.ctx.stroke_preserve()
self.drawer.ctx.line_to(
x + (len(values) - 1) * step,
y - 1 + self.line_width / 2.0
)
self.drawer.ctx.line_to(x, y - 1 + self.line_width / 2.0)
self.drawer.set_source_rgb(self.fill_color)
self.drawer.ctx.fill()
def val(self, val):
if self.start_pos == 'bottom':
return val
elif self.start_pos == 'top':
return -val
else:
raise ValueError("Unknown starting position: %s." % self.start_pos)
def draw(self):
self.drawer.clear(self.background or self.bar.background)
if self.border_width:
self.drawer.set_source_rgb(self.border_color)
self.drawer.ctx.set_line_width(self.border_width)
self.drawer.ctx.rectangle(
self.margin_x + self.border_width / 2.0,
self.margin_y + self.border_width / 2.0,
self.graphwidth + self.border_width,
self.bar.height - self.margin_y * 2 - self.border_width,
)
self.drawer.ctx.stroke()
x = self.margin_x + self.border_width
y = self.margin_y + self.border_width
if self.start_pos == 'bottom':
y += self.graphheight
elif not self.start_pos == 'top':
raise ValueError("Unknown starting position: %s." % self.start_pos)
k = 1.0 / (self.maxvalue or 1)
scaled = [self.graphheight * val * k for val in reversed(self.values)]
if self.type == "box":
self.draw_box(x, y, scaled)
elif self.type == "line":
self.draw_line(x, y, scaled)
elif self.type == "linefill":
self.draw_linefill(x, y, scaled)
else:
raise ValueError("Unknown graph type: %s." % self.type)
self.drawer.draw(offsetx=self.offset, width=self.width)
def push(self, value):
if self.lag_cycles > self.samples:
# compensate lag by sending the same value up to
# the graph samples limit
self.lag_cycles = 1
self.values = ([value] * min(self.samples, self.lag_cycles)) + self.values
self.values = self.values[:self.samples]
if not self.fixed_upper_bound:
self.maxvalue = max(self.values)
self.draw()
def update(self):
# lag detection
newtime = time.time()
self.lag_cycles = int((newtime - self.oldtime) / self.frequency)
self.oldtime = newtime
self.update_graph()
self.timeout_add(self.frequency, self.update)
def fullfill(self, value):
self.values = [value] * len(self.values)
class CPUGraph(_Graph):
"""
Display CPU usage graph.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("core", "all", "Which core to show (all/0/1/2/...)"),
]
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(CPUGraph.defaults)
self.maxvalue = 100
self.oldvalues = self._getvalues()
def _getvalues(self):
with open('/proc/stat') as file:
lines = file.readlines()
# default to all cores (first line)
line = lines.pop(0)
# core specified, grab the corresponding line
if isinstance(self.core, int):
# we already removed the first line from the list,
# so it's 0 indexed now :D
line = lines[self.core]
if not line.startswith("cpu%s" % self.core):
raise ValueError("No such core: %s" % self.core)
name, user, nice, sys, idle, iowait, tail = line.split(None, 6)
return (int(user), int(nice), int(sys), int(idle))
def update_graph(self):
nval = self._getvalues()
oval = self.oldvalues
busy = nval[0] + nval[1] + nval[2] - oval[0] - oval[1] - oval[2]
total = busy + nval[3] - oval[3]
# sometimes this value is zero for unknown reason (time shift?)
# we just sent the previous value, because it gives us no info about
# cpu load, if it's zero.
if total:
push_value = busy * 100.0 / total
self.push(push_value)
else:
self.push(self.values[0])
self.oldvalues = nval
def get_meminfo():
with open('/proc/meminfo') as file:
val = {}
for line in file:
key, tail = line.split(':')
uv = tail.split()
val[key] = int(uv[0])
return val
class MemoryGraph(_Graph):
"""
Displays a memory usage graph.
"""
orientations = base.ORIENTATION_HORIZONTAL
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
val = self._getvalues()
self.maxvalue = val['MemTotal']
mem = val['MemTotal'] - val['MemFree'] - val['Buffers'] - val['Cached']
self.fullfill(mem)
def _getvalues(self):
return get_meminfo()
def update_graph(self):
val = self._getvalues()
self.push(
val['MemTotal'] - val['MemFree'] - val['Buffers'] - val['Cached']
)
class SwapGraph(_Graph):
"""
Display a swap info graph.
"""
orientations = base.ORIENTATION_HORIZONTAL
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
val = self._getvalues()
self.maxvalue = val['SwapTotal']
swap = val['SwapTotal'] - val['SwapFree'] - val.get('SwapCached', 0)
self.fullfill(swap)
def _getvalues(self):
return get_meminfo()
def update_graph(self):
val = self._getvalues()
swap = val['SwapTotal'] - val['SwapFree'] - val.get('SwapCached', 0)
# can change, swapon/off
if self.maxvalue != val['SwapTotal']:
self.maxvalue = val['SwapTotal']
self.fullfill(swap)
self.push(swap)
class NetGraph(_Graph):
"""
Display a network usage graph.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
(
"interface",
"auto",
"Interface to display info for ('auto' for detection)"
),
("bandwidth_type", "down", "down(load)/up(load)"),
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(NetGraph.defaults)
if self.interface == "auto":
try:
self.interface = self.get_main_iface()
except RuntimeError:
self.log.warning(
"NetGraph - Automatic interface detection failed, "
"falling back to 'eth0'"
)
self.interface = "eth0"
self.filename = '/sys/class/net/{interface}/statistics/{type}'.format(
interface=self.interface,
type=self.bandwidth_type == 'down' and 'rx_bytes' or 'tx_bytes'
)
self.bytes = 0
self.bytes = self._getValues()
def _getValues(self):
try:
with open(self.filename) as file:
val = int(file.read())
rval = val - self.bytes
self.bytes = val
return rval
except IOError:
return 0
def update_graph(self):
val = self._getValues()
self.push(val)
@staticmethod
def get_main_iface():
filename = "/proc/net/route"
def make_route(line):
return dict(zip(['iface', 'dest'], line.split()))
routes = [make_route(line) for line in list(open(filename))[1:]]
try:
return next(
(r for r in routes if not int(r['dest'], 16)),
routes[0]
)['iface']
except (KeyError, IndexError, ValueError):
raise RuntimeError('No valid interfaces available')
class HDDGraph(_Graph):
"""
Display HDD free or used space graph.
"""
fixed_upper_bound = True
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("path", "/", "Partition mount point."),
("space_type", "used", "free/used")
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(HDDGraph.defaults)
stats = statvfs(self.path)
self.maxvalue = stats.f_blocks * stats.f_frsize
values = self._getValues()
self.fullfill(values)
def _getValues(self):
stats = statvfs(self.path)
if self.space_type == 'used':
return (stats.f_blocks - stats.f_bfree) * stats.f_frsize
else:
return stats.f_bavail * stats.f_frsize
def update_graph(self):
val = self._getValues()
self.push(val)
class HDDBusyGraph(_Graph):
"""
Parses /sys/block/<dev>/stat file and extracts overall device
IO usage, based on ``io_ticks``'s value.
See https://www.kernel.org/doc/Documentation/block/stat.txt
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("device", "sda", "Block device to display info for")
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(HDDBusyGraph.defaults)
self.path = '/sys/block/{dev}/stat'.format(
dev=self.device
)
self._prev = 0
def _getActivity(self):
try:
# io_ticks is field number 9
with open(self.path) as f:
io_ticks = int(f.read().split()[9])
except IOError:
return 0
activity = io_ticks - self._prev
self._prev = io_ticks
return activity
def update_graph(self):
self.push(self._getActivity())
|
{
"content_hash": "03455e1c9af803d0ff13b8f09d17dd38",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 82,
"avg_line_length": 31.439086294416242,
"alnum_prop": 0.5487204327117139,
"repo_name": "aniruddhkanojia/qtile",
"id": "5ab252c05dca6a5775c767645f4910c1e6016a07",
"size": "13941",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "libqtile/widget/graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3598"
},
{
"name": "Makefile",
"bytes": "1032"
},
{
"name": "Python",
"bytes": "862182"
},
{
"name": "Shell",
"bytes": "2765"
}
],
"symlink_target": ""
}
|
from flask import Flask
app = Flask(__name__)
app.config.from_object('config')
|
{
"content_hash": "ad4ec99e58467e89e0c5c625d529032c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 20,
"alnum_prop": 0.7125,
"repo_name": "Akagi201/learning-python",
"id": "f3d0dca854e3089a28fc109ebbb3657711bc0c37",
"size": "80",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask/Flask-Script/test1/app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "125"
},
{
"name": "CSS",
"bytes": "82315"
},
{
"name": "HTML",
"bytes": "16738"
},
{
"name": "JavaScript",
"bytes": "253132"
},
{
"name": "Jupyter Notebook",
"bytes": "3666"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "336950"
},
{
"name": "Rich Text Format",
"bytes": "49342"
},
{
"name": "Shell",
"bytes": "4498"
}
],
"symlink_target": ""
}
|
from abc import abstractmethod
class Algo(object):
"""Training algorithm interface."""
@abstractmethod
def train(self, dataset, LR, M):
raise NotImplementedError
def supervised(algo, dataset,
learningRate, momentum,
epoches, E=0.001):
"""Supervised training on the given dataset (a sequence of
2-element tuples). Returns a tuple of (converged, error)."""
for i in range(epoches):
e = algo.train(dataset, learningRate, momentum)
if e <= E:
return True, e
return False, e
|
{
"content_hash": "b3a814994ac2fd031659c2ce82da196d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 28.4,
"alnum_prop": 0.625,
"repo_name": "khachik/ghugh",
"id": "27f3f13be921ef10d5f63acf142bc029674a54d4",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ghugh/trainer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "44320"
}
],
"symlink_target": ""
}
|
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
required: false
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
ANSIBLE_NET_USERNAME will be used instead.
required: false
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
ANSIBLE_NET_PASSWORD will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable ANSIBLE_NET_SSH_KEYFILE will be used instead.
required: false
authorize:
description:
- Instructs the module to enter priviledged mode on the remote device
before sending any commands. If not specified, the device will
attempt to excecute all commands in non-priviledged mode. If the value
is not specified in the task, the value of environment variable
ANSIBLE_NET_AUTHORIZE will be used instead.
required: false
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable ANSIBLE_NET_AUTH_PASS will be used instead.
required: false
default: none
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
required: false
default: 10
provider:
description:
- Convenience method that allows all M(dellos6) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
|
{
"content_hash": "36b4b7dad9daccfac695b7c3f363bb3c",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 38.44927536231884,
"alnum_prop": 0.6950621937429325,
"repo_name": "abtreece/ansible",
"id": "dd9944b99595a2f7817a1fe632c2c42541571ffa",
"size": "3395",
"binary": false,
"copies": "3",
"ref": "refs/heads/stable-2.2",
"path": "lib/ansible/utils/module_docs_fragments/dellos6.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import os
import sys
import time
import httplib
import hmac
import json
import hashlib
import urllib
import re
import socket
host = 'api.pusherapp.com'
port = 80
app_id = None
key = None
secret = None
channel_name_re = re.compile('^[-a-zA-Z0-9_=@,.;]+$')
app_id_re = re.compile('^[0-9]+$')
def url2options(url):
assert url.startswith('http://'), "invalid URL"
url = url[7:]
key, url = url.split(':', 1)
secret, url = url.split('@', 1)
host, url = url.split('/', 1)
url, app_id = url.split('/', 1)
return {'key': key, 'secret': secret, 'host': host, 'app_id': app_id}
def pusher_from_url(url=None):
url = url or os.environ['PUSHER_URL']
return Pusher(**url2options(url))
class Pusher(object):
def __init__(self, app_id=None, key=None, secret=None, host=None, port=None, encoder=None):
_globals = globals()
self.app_id = str(app_id or _globals['app_id'])
if not app_id_re.match(self.app_id):
raise NameError("Invalid app id")
self.key = key or _globals['key']
self.secret = secret or _globals['secret']
self.host = host or _globals['host']
self.port = port or _globals['port']
self.encoder = encoder
self._channels = {}
def __getitem__(self, key):
if not self._channels.has_key(key):
return self._make_channel(key)
return self._channels[key]
def _make_channel(self, name):
self._channels[name] = channel_type(name, self)
return self._channels[name]
class Channel(object):
def __init__(self, name, pusher):
self.pusher = pusher
self.name = str(name)
if not channel_name_re.match(self.name):
raise NameError("Invalid channel id: %s" % self.name)
self.path = '/apps/%s/channels/%s/events' % (self.pusher.app_id, urllib.quote(self.name))
def trigger(self, event, data={}, socket_id=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
json_data = json.dumps(data, cls=self.pusher.encoder)
query_string = self.signed_query(event, json_data, socket_id)
signed_path = "%s?%s" % (self.path, query_string)
status, resp_content = self.send_request(signed_path, json_data, timeout=timeout)
if status == 202:
return True
elif status == 401:
raise AuthenticationError("Status: 401; Message: %s" % resp_content)
elif status == 404:
raise NotFoundError("Status: 404; Message: %s" % resp_content)
elif status == 403:
raise AppDisabledOrMessageQuotaError("Status: 403; Message: %s" % resp_content)
else:
raise UnexpectedReturnStatusError("Status: %s; Message: %s" % (status, resp_content))
def signed_query(self, event, json_data, socket_id):
query_string = self.compose_querystring(event, json_data, socket_id)
string_to_sign = "POST\n%s\n%s" % (self.path, query_string)
signature = hmac.new(self.pusher.secret, string_to_sign, hashlib.sha256).hexdigest()
return "%s&auth_signature=%s" % (query_string, signature)
def compose_querystring(self, event, json_data, socket_id):
hasher = hashlib.md5()
hasher.update(json_data)
hash_str = hasher.hexdigest()
ret = "auth_key=%s&auth_timestamp=%s&auth_version=1.0&body_md5=%s&name=%s" % (self.pusher.key, int(time.time()), hash_str, event)
if socket_id:
ret += "&socket_id=" + unicode(socket_id)
return ret
def send_request(self, signed_path, data_string, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
http = httplib.HTTPConnection(self.pusher.host, self.pusher.port, timeout=timeout)
http.request('POST', signed_path, data_string, {'Content-Type': 'application/json'})
resp = http.getresponse()
return resp.status, resp.read()
def authenticate(self, socket_id, custom_data=None):
if custom_data:
custom_data = json.dumps(custom_data, cls=self.pusher.encoder)
auth = self.authentication_string(socket_id, custom_data)
r = {'auth': auth}
if custom_data:
r['channel_data'] = custom_data
return r
def authentication_string(self, socket_id, custom_string=None):
if not socket_id:
raise Exception("Invalid socket_id")
string_to_sign = "%s:%s" % (socket_id, self.name)
if custom_string:
string_to_sign += ":%s" % custom_string
signature = hmac.new(self.pusher.secret, string_to_sign, hashlib.sha256).hexdigest()
return "%s:%s" % (self.pusher.key,signature)
def get_absolute_path(self, signed_path):
return 'http://%s%s' % (self.pusher.host, signed_path)
class GoogleAppEngineChannel(Channel):
def send_request(self, signed_path, data_string):
from google.appengine.api import urlfetch
response = urlfetch.fetch(
url=self.get_absolute_path(signed_path),
payload=data_string,
method=urlfetch.POST,
headers={'Content-Type': 'application/json'}
)
return response.status_code, response.content
# App Engine NDB channel, outer try import/except as it uses decorator
try:
from google.appengine.ext import ndb
class GaeNdbChannel(GoogleAppEngineChannel):
@ndb.tasklet
def trigger_async(self, event, data={}, socket_id=None):
"""Async trigger that in turn calls send_request_async"""
json_data = json.dumps(data, cls=self.pusher.encoder)
status = yield self.send_request_async(self.signed_query(event, json_data, socket_id), json_data)
if status == 202:
raise ndb.Return(True)
elif status == 401:
raise AuthenticationError
elif status == 404:
raise NotFoundError
else:
raise Exception("Unexpected return status %s" % status)
@ndb.tasklet
def send_request_async(self, query_string, data_string):
"""Send request and yield while waiting for future result"""
ctx = ndb.get_context()
secure = 's' if self.pusher.port == 443 else ''
absolute_url = 'http%s://%s%s?%s' % (secure, self.pusher.host, self.path, query_string)
result = yield ctx.urlfetch(
url=absolute_url,
payload=data_string,
method='POST',
headers={'Content-Type': 'application/json'},
validate_certificate=bool(secure),
)
raise ndb.Return(result.status_code)
except ImportError:
pass
class TornadoChannel(Channel):
def trigger(self, event, data={}, socket_id=None, callback=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.callback = callback
return super(TornadoChannel, self).trigger(event, data, socket_id, timeout=timeout)
def send_request(self, signed_path, data_string, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
timeout = None if timeout == socket._GLOBAL_DEFAULT_TIMEOUT else timeout
import tornado.httpclient
absolute_url = self.get_absolute_path(signed_path)
request = tornado.httpclient.HTTPRequest(absolute_url, method='POST', body=data_string, request_timeout=timeout)
client = tornado.httpclient.AsyncHTTPClient()
client.fetch(request, callback=self.callback)
# Returning 202 to avoid Channel errors. Actual error handling takes place in callback.
return 202, ""
class AuthenticationError(Exception):
pass
class NotFoundError(Exception):
pass
class AppDisabledOrMessageQuotaError(Exception):
pass
class UnexpectedReturnStatusError(Exception):
pass
channel_type = Channel
|
{
"content_hash": "d9d1c6d430b39897f177d5f9789b910d",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 137,
"avg_line_length": 38.2807881773399,
"alnum_prop": 0.6233431990734783,
"repo_name": "cloud9209/chatting",
"id": "f43a4813abdaf80bd1e1a345756364d98d322064",
"size": "7771",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/pusher/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Python",
"bytes": "950147"
}
],
"symlink_target": ""
}
|
"""Extensions to Jax/Flax core functions for Mixture of Experts training."""
from typing import Sequence
import jax.numpy as jnp
from flaxformer.types import Array
def scatter_nd(indices: Array, updates: Array, shape: Sequence[int]) -> Array:
"""JAX implementation of tf.scatter_nd.
See https://www.tensorflow.org/api_docs/python/tf/scatter_nd, and
https://github.com/google/jax/discussions/3658.
Notes:
- If multiple indices point to the same position, the output value at this
position is accumulated.
- Indices falling outside of the created array are quietly ignored.
Args:
indices: [num_items, n_dims] array of indices to update.
updates: [num_items, ...] array of new data points.
shape: Dimensions of the output array.
Returns:
An array of shape `shape` and the same type as `updates`, with updated
values at given indices.
"""
zeros = jnp.zeros(shape, updates.dtype)
# Following `tf.scatter_nd`'s API, the inner vectors of `indices` have `n_dim`
# values which index into `zeros`. We unpack it into arrays for each
# dimension. This code is equivalent to `tf.unstack(indices, axis=-1)`.
key = tuple(jnp.moveaxis(indices, -1, 0))
return zeros.at[key].add(updates)
|
{
"content_hash": "58417ad1299e27d423c0c13497c56709",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 35.25714285714286,
"alnum_prop": 0.7163695299837926,
"repo_name": "google/flaxformer",
"id": "6040b5ed51dc733a0174fcc282adf904a3b233ea",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "flaxformer/architectures/moe/scatter_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1504920"
}
],
"symlink_target": ""
}
|
import time, datetime, os
from collections import OrderedDict
import csv, json, re, sys
import requests
import random
import operator
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.finance import quotes_historical_yahoo_ochl
from matplotlib.dates import HourLocator, MinuteLocator, SecondLocator, DateFormatter
from matplotlib.patches import Ellipse
ctrl = {}
class Quote(object):
def dump(self):
print self.dt, self.o, self.h, self.l, self.c, self.v
def __init__(self, dt, o, h, l, c, v):
assert(h >= l)
self.dt = dt # time stamp
self.o = o; # open privde
self.h = h; # high
self.l = l; # low
self.c = c; # close
self.v = v; # volume
def get_day(self):
return self.dt.day
def get_median(self):
return (self.o + self.c) / 2
# Return the median price of this quote wrt ref price.
def get_ratio(self,ref_score):
return ((self.get_median() - ref_score) / ref_score)
def get_normalized_dt(self):
return datetime.datetime(1971, 1, 1, \
self.dt.hour, self.dt.minute, self.dt.second)
#
# Class Plot
#
class Plot(object):
def __init__(self, chart_type):
self.markers = ['o', 'v', '^', 's', 'p', '*', 'h', 'H', 'D', 'd']
self.ls = ['dashed', 'dashdot', 'dotted']
self.hours = HourLocator() # every hour
self.minutes = MinuteLocator() # every minute
self.seconds = SecondLocator() # every second
self.hoursFmt = DateFormatter('%H')
self.fig, self.ax = plt.subplots(figsize=(20, 10))
self.chart_type = chart_type
return
def plot_scores(self, dates, scores, mfc, marker, quote):
self.ax.plot_date(dates, scores,
ls=random.choice(self.ls), marker=marker,
markersize=5.0, markerfacecolor=mfc,
label=str(quote.dt.month)+'/'+str(quote.dt.day))
self.ax.text(quote.dt, quote.c, str(quote.c), fontsize=12, color='g')
return
def plot_buys(self, buy_quotes, quotes_2_scores):
if not buy_quotes:
return
norm_dates = [q.get_normalized_dt() for q in buy_quotes]
scores = [score for quote, score in quotes_2_scores.iteritems() \
for buy_quote in buy_quotes if quote.dt == buy_quote.dt]
size = [200.0 for _ in buy_quotes]
self.ax.scatter(norm_dates, scores, s=size, color='b', alpha=0.8)
return
def plot_future(self, future_scores):
(dates, scores) = zip(*future_scores)
self.ax.plot_date(dates, scores,
ls=random.choice(self.ls), marker='D',
markersize=5.0, markerfacecolor='g',
label='future')
return
def format_ticks(self):
self.ax.xaxis.set_major_locator(self.hours)
self.ax.xaxis.set_major_formatter(self.hoursFmt)
self.ax.xaxis.set_minor_locator(self.minutes)
self.ax.autoscale_view()
# format the coords message box
def price(x):
return '$%.3f' % x
self.ax.fmt_xdata = DateFormatter('%H-%M-%S')
self.ax.fmt_ydata = price
self.ax.grid(True)
self.fig.autofmt_xdate()
plt.legend(loc='best', shadow=True)
plt.tick_params(axis='y', which='both', labelleft='on', labelright='on')
return
def format_labels(self, interval_seconds):
plt.ylabel(self.chart_type)
plt.xlabel('Interval ' + str(interval_seconds / 60.0) + ' min')
return
def format_title(self, title):
plt.title(title)
return
def show(self):
sys.stdout.flush()
plt.show()
return
def annotate(self, x, y, xytext):
self.ax.annotate('%.3f' % y, xy=(x, y), xycoords='data',
bbox=dict(boxstyle="round4", fc="w", alpha=0.75),
xytext=xytext, textcoords='offset points', size=14,
arrowprops=dict(arrowstyle="fancy",
fc="0.3", ec="none",
patchB=Ellipse((2, -1), 0.5, 0.5),
connectionstyle="angle3,angleA=0,angleB=-90"),
)
return
#
#
#
class Stock(object):
def __repr__(self):
return "Stock()"
def __str__(self):
return self.symbol
def __init__(self, symbol, interval_seconds, buys, sells):
self.symbol = symbol
# The page_num, or key, of self.book is the yr_mo_day, and the
# content of each page is a list of quotes on that day.
self.book = OrderedDict()
self.interval_seconds = interval_seconds
self.knn_candidate_set = set()
# Create buys and sells records, and convert them
# from string to datetime type
self.buys = []
self.sells = []
for buy in buys:
self.buys.append(datetime.datetime.strptime(buy, "%Y-%m-%d %H:%M:%S"))
for sell in sells:
self.sells.append(datetime.datetime.strptime(sell, "%Y-%m-%d %H:%M:%S"))
self.buys.sort()
self.sells.sort()
return
# Given a list of quotes, return a sub-list of it that have buy events
def get_buy_quotes(self, quotes):
assert(quotes)
results = []
for buy in self.buys:
if (buy > quotes[-1].dt) or (buy < quotes[0].dt):
continue
for quote in quotes:
if quote.dt >= buy:
results.append(quote)
break;
return results;
# Given a quote, return the page_num, or key, where this qutoe should
# belong to.
def get_page_num_str(self, quote):
return str(quote.dt.year) + '_' + str(quote.dt.month) + '_' + str(quote.dt.day)
# Append a new quote according to its day.
def append(self, quote):
# page_num is the key of self.book
page_num = self.get_page_num_str(quote)
if not self.book.has_key(page_num):
self.book[page_num] = []
self.book[page_num].append(quote)
return
def dump(self):
print self.symbol, len(self.quotes)
for quote in self.quotes:
quote.dump()
def __repr__(self):
return self.to_csv()
#
# Compute KNN candidates that meet today's criteria.
#
def prepare_knn_candidate_set(self):
if len(self.book) < 2:
return 0
open_price = self.get_today_open_price();
prev_close_price = self.get_yesterday_close_price();
ref_ratio = (open_price - prev_close_price) / prev_close_price
print "KNN ratio: ", prev_close_price, open_price, ref_ratio
prev_close_price = 1
for page_num, quotes in self.book.iteritems():
open_price = quotes[0].o
ratio = (open_price - prev_close_price) / prev_close_price
if (abs(ref_ratio) <= 0.04) and (abs(ratio) <= 0.04):
# in [-0.04, 0.04] range
self.knn_candidate_set.add(quotes[0].dt);
elif (ref_ratio > 0.04) and (ratio > 0.04):
# in [0.04, +] range
self.knn_candidate_set.add(quotes[0].dt);
elif (ref_ratio < 0.04) and (ratio < 0.04):
# in [-, -0.04] range
self.knn_candidate_set.add(quotes[0].dt);
else:
self.knn_candidate_set.add(quotes[0].dt);
prev_close_price = quotes[-1].c
return len(self.knn_candidate_set)
#
# Should we consider these quotes a valid candidate for KNN
#
def is_knn_candidate(self, quotes):
return quotes[0].dt in self.knn_candidate_set;
#
# Given a sequence of quotes, retun a list of
# scores depending on ChartType.
#
def compute_display_scores(self, quotes, chart_type):
# Use close price as score
if chart_type == 'close':
return [q.c for q in quotes]
# Use median price
if chart_type == 'median':
return [q.get_median() for q in quotes]
# K-nearest neighbors
if chart_type == 'knn':
if self.is_knn_candidate(quotes):
return [q.get_ratio(quotes[0].o) for q in quotes]
else:
return []
# Default will use close price as score
return [q.c for q in quotes]
def get_first_page(self):
return self.book.itervalues().next()
def get_latest_quote(self):
page_num = self.book.keys()[-1];
return self.book[page_num][-1]
def get_today_open_price(self):
assert(len(self.book) >= 1)
page_num = self.book.keys()[-1];
return self.book[page_num][0].o
def get_yesterday_close_price(self):
assert(len(self.book) >= 2)
page_num = self.book.keys()[-2];
return self.book[page_num][-1].c
def is_last_page(self, page_num):
return page_num == self.book.keys()[-1];
def get_local_time(self):
now = datetime.datetime.now()
local_hr = now.hour + ctrl['UTC']
local_day = now.day
if local_hr < 0:
local_hr += 24
local_day -= 1
return (local_day, local_hr, now.minute, now.second)
def get_num_days_ago(self, quote):
return (datetime.datetime.now() - quote.dt).days
def is_market_closed(self):
(local_day, local_hr, local_min, local_sec) = self.get_local_time()
last_quote_dt = self.get_latest_quote().dt
if local_day != last_quote_dt.day:
return True
if local_hr < 6 or local_hr >= 13:
return True
return False
def is_market_open(self):
return not self.is_market_closed()
def write2csv(self):
if not self.book:
return
fname = self.symbol + ".csv"
print "Create", fname
with open(fname, 'wb') as f:
first_page = self.get_first_page();
keys = first_page[0].__dict__.keys()
w = csv.DictWriter(f, fieldnames=keys)
w.writeheader()
last_quote_dt = datetime.datetime.fromtimestamp(0);
for page_num, quotes in self.book.iteritems():
# Make sure quotes are listed in order
assert(last_quote_dt < quotes[0].dt)
last_quote_dt = quotes[0].dt
for quote in quotes:
w.writerow(quote.__dict__)
return
#
# Return reference datetime which shows where the quote is at this moment
#
def get_ref_datetime(self) :
(_, local_hr, local_min, local_sec) = self.get_local_time()
if self.is_market_open():
ref_datetime = datetime.datetime(1971, 1, 1, \
local_hr, local_min, local_sec)
else:
ref_datetime = datetime.datetime(1971, 1, 1, 6, 20)
return ref_datetime
#
# Compute today's future scores based on historical scores, volume, and
# today's opening score.
#
def compute_future_scores(self, hist, today_opening_score, rel):
future = []
hist_opening_scores = hist.itervalues().next()
future_score = today_opening_score
def geomean(nums):
return reduce(lambda x, y: x*y, nums)**(1.0/len(nums))
for dt, scores_and_volume in hist.iteritems():
# An effective_record is a tuple of (hist_score, volume, hist_opening_score)
effective_records = [(a[0], float(a[1]), b[0]) for (a, b) in \
zip(scores_and_volume, hist_opening_scores) if rel(a[0], b[0])]
total_volume = sum([rec[1] for rec in effective_records])
if today_opening_score < 1.0:
# No need to compute ratio again if chart type is knn
delta_score = sum(([(rec[0] - rec[2]) * (rec[1] / total_volume) for rec in effective_records]))
future_score = today_opening_score + delta_score
else:
# Use ratio wrt hist_opening
ratio = sum(([(rec[0] / rec[2]) * (rec[1] / total_volume) for rec in effective_records]))
future_score = today_opening_score * ratio
future.append((dt, future_score))
return future
# Plot the history for current symbol
def plot(self, chart_type='close'):
if not self.book:
print "Not enough data to plot"
return
# Prepare data for KNN
if chart_type == 'knn':
if self.prepare_knn_candidate_set() < 2:
print "No candidate for KNN plot"
return
begin_time = time.time()
plot = Plot(chart_type);
num_days = len(self.book)
gradient = 1.0;
ref_datetime = self.get_ref_datetime()
print "Reference time:", ref_datetime
last_quote_dt = datetime.datetime.fromtimestamp(0);
opening_score = 0.0
#
# Walk thru each day
#
# historical[normalized_dt] is a list of scores happened at that time.
# We use historcial later to estimate future scores
historical = OrderedDict()
for page_num, quotes in self.book.iteritems():
scores = self.compute_display_scores(quotes, chart_type)
if not scores:
continue;
# Keep the mapping for later reference.
quotes_2_scores = OrderedDict(zip(quotes, scores))
opening_score = scores[0]
# Update historical
for quote, score in quotes_2_scores.iteritems():
_norm_dt = quote.get_normalized_dt()
if not historical.has_key(_norm_dt):
historical[_norm_dt] = []
historical[_norm_dt].append((score, quote.v))
# Make sure quotes are listed in ascending order
assert(last_quote_dt < quotes[0].dt)
last_quote_dt = quotes[0].dt
# Normalize the year/month/day, since the chart only cares about hr/min/sec
norm_dates = [q.get_normalized_dt() for q in quotes]
# Quotes from last page worths more attention.
if self.is_last_page(page_num):
mfc = "red"
marker = 'D'
else:
mfc = str(gradient)
marker = random.choice(plot.markers)
# Only show details for the past 7 days
if self.get_num_days_ago(quotes[0]) > 7:
continue
plot.plot_scores(norm_dates, scores, mfc, marker, quotes[0])
#
# Highlight the buys using big green dots
#
plot.plot_buys(self.get_buy_quotes(quotes), quotes_2_scores)
# Adjust gradient for next page's quotes
gradient -= (1.0 / float(num_days - 1));
# Compute future scores (upper bounds and lower bounds, based on historical data
future_scores_lb = self.compute_future_scores(historical, opening_score, operator.le)
future_scores_ub = self.compute_future_scores(historical, opening_score, operator.ge)
# Plot future scores
plot.plot_future(future_scores_ub)
plot.plot_future(future_scores_lb)
#
# Print out prediction before showing the chart
#
latest_quote = self.get_latest_quote();
print self.symbol, "now @", latest_quote.c
end_time = time.time()
print 'Process time: %0.3f ms' % ((end_time-begin_time)*1000.0)
# format the ticks and labels
plot.format_ticks()
plot.format_labels(self.interval_seconds)
title = self.symbol + " in last " + str(num_days) + " days" + " @" + str(latest_quote.c)
plot.format_title(title)
#
# Annotate the min/max points
#
(x1, y1) = max(future_scores_ub, key=operator.itemgetter(1))
(x2, y2) = min(future_scores_lb, key=operator.itemgetter(1))
plot.annotate(x1, y1, (-80, 60))
plot.annotate(x2, y2, (-80, -60))
# Flush whatever we have, and draw it!
plot.show()
return
#
# Collect intraday quote for a symbol.
#
def CollectIntradayQuote(record, interval_seconds, num_days):
symbol = record["Symbol"]
stock = Stock(symbol, interval_seconds, buys=record.get("Buy", []),
sells=record.get("Sell", []))
url = ctrl['URL']
url += "q={0}&i={1}&p={2}d&f=d,o,h,l,c,v".format(symbol,interval_seconds,num_days)
print "Query", url
csv = requests.get(url).text.encode('utf-8').split('\n')
_, timezone_offset = csv[6].split('=')
# Adjust timezone wrt UTC
timezone_offset = (0 * float(timezone_offset)) + (ctrl["UTC"] * 60 * 60)
for row in csv[7:]:
fields = row.split(',')
if len(fields) != 6:
continue;
# COLUMNS=DATE,CLOSE,HIGH,LOW,OPEN,VOLUME
offset = fields[0]
if offset.startswith('a'):
day = int(offset[1:])
offset = 0
else:
offset = int(offset)
dt = datetime.datetime.fromtimestamp(day+(interval_seconds*offset)+timezone_offset)
# Create a new quote
quote = Quote(dt, float(fields[4]), float(fields[2]), float(fields[3]), \
float(fields[1]), int(fields[5]))
# Append this new quote to current stock
stock.append(quote)
stock.write2csv()
return stock
#
# main()
#
try:
with open('stock.json') as f:
ctrl = json.load(f);
except IOError as e:
sys.exit( "I/O error({0}): {1}".format(e.errno, e.strerror) + ": stock.json")
for record in ctrl["Records"]:
stock = CollectIntradayQuote(record, ctrl["Interval"], ctrl["Days"])
stock.plot(record['ChartType'].lower())
|
{
"content_hash": "be9f413074fb1c23723743626b62ea39",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 111,
"avg_line_length": 33.5,
"alnum_prop": 0.5453544087679683,
"repo_name": "liu12295/stock",
"id": "6e6dfcc962f1a489b0168e34ecdc6599e004f8b8",
"size": "19297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19151"
}
],
"symlink_target": ""
}
|
__all__ = ['imread', 'imsave']
from ...util.dtype import convert
try:
import imread as _imread
except ImportError:
raise ImportError("Imread could not be found"
"Please refer to http://pypi.python.org/pypi/imread/ "
"for further instructions.")
def imread(fname, dtype=None):
"""Load an image from file.
Parameters
----------
fname : str
Name of input file
"""
im = _imread.imread(fname)
if dtype is not None:
im = convert(im, dtype)
return im
def imsave(fname, arr, format_str=None):
"""Save an image to disk.
Parameters
----------
fname : str
Name of destination file.
arr : ndarray of uint8 or uint16
Array (image) to save.
format_str: str,optional
Format to save as.
Notes
-----
Currently, only 8-bit precision is supported.
"""
return _imread.imsave(fname, arr, formatstr=format_str)
|
{
"content_hash": "56a06d39fbd7ee6701a83e1ab7d60304",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 22.46511627906977,
"alnum_prop": 0.5807453416149069,
"repo_name": "michaelpacer/scikit-image",
"id": "9311f18f03368eed3b0ca8b3a109741de8b833ca",
"size": "966",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "skimage/io/_plugins/imread_plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "76670"
},
{
"name": "Makefile",
"bytes": "525"
},
{
"name": "Python",
"bytes": "2232486"
}
],
"symlink_target": ""
}
|
"""Tree-based minimization algorithms."""
import copy
import inspect
import numbers
import numpy as np
from collections import Iterable
from sklearn.base import clone
from sklearn.base import is_regressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.utils import check_random_state
from .acquisition import _gaussian_acquisition
from .callbacks import check_callback
from .callbacks import VerboseCallback
from .learning import ExtraTreesRegressor
from .learning import GradientBoostingQuantileRegressor
from .learning import RandomForestRegressor
from .space import Space
from .utils import create_result
def _tree_minimize(func, dimensions, base_estimator, n_calls,
n_points, n_random_starts, specs, x0=None, y0=None,
random_state=None, acq="EI", xi=0.01, kappa=1.96,
verbose=False, callback=None):
rng = check_random_state(random_state)
space = Space(dimensions)
# Initialize with provided points (x0 and y0) and/or random points
if x0 is None:
x0 = []
elif not isinstance(x0[0], list):
x0 = [x0]
if not isinstance(x0, list):
raise ValueError("`x0` should be a list, but got %s" % type(x0))
n_init_func_calls = len(x0) if y0 is None else 0
n_total_init_calls = n_random_starts + n_init_func_calls
if n_calls <= 0:
raise ValueError("Expected `n_calls` > 0, got %d" % n_calls)
if n_random_starts < 0:
raise ValueError(
"Expected `n_random_starts` >= 0, got %d" % n_random_starts)
if n_random_starts == 0 and not x0:
raise ValueError("Either set `n_random_starts` > 0, or provide `x0`")
if n_calls < n_total_init_calls:
raise ValueError(
"Expected `n_calls` >= %d, got %d" % (n_total_init_calls, n_calls))
callbacks = check_callback(callback)
if verbose:
callbacks.append(VerboseCallback(
n_init=n_init_func_calls, n_random=n_random_starts,
n_total=n_calls))
if y0 is None and x0:
y0 = []
for i, x in enumerate(x0):
y0.append(func(x))
if callbacks:
curr_res = create_result(x0[:i + 1], y0, space, rng, specs)
for c in callbacks:
c(curr_res)
elif x0:
if isinstance(y0, Iterable):
y0 = list(y0)
elif isinstance(y0, numbers.Number):
y0 = [y0]
else:
raise ValueError(
"`y0` should be an iterable or a scalar, got %s" % type(y0))
if len(x0) != len(y0):
raise ValueError("`x0` and `y0` should have the same length")
if not all(map(np.isscalar, y0)):
raise ValueError("`y0` elements should be scalars")
else:
y0 = []
# Random function evaluations.
X_rand = space.rvs(n_samples=n_random_starts, random_state=rng)
Xi = x0 + X_rand
yi = y0
for i, x in enumerate(X_rand):
yi.append(func(x))
if callbacks:
curr_res = create_result(
x0 + X_rand[:i + 1], yi, space, rng, specs)
for c in callbacks:
c(curr_res)
if np.ndim(yi) != 1:
raise ValueError("`func` should return a scalar")
# Tree-based optimization loop
models = []
n_model_iter = n_calls - n_total_init_calls
for i in range(n_model_iter):
rgr = clone(base_estimator)
rgr.fit(space.transform(Xi), yi)
models.append(rgr)
# `rgr` predicts constants for each leaf which means that the EI
# has zero gradient over large distances. As a result we can not
# use gradient based optimizers like BFGS, so using random sampling
# for the moment.
X = space.transform(space.rvs(n_samples=n_points,
random_state=rng))
values = _gaussian_acquisition(
X=X, model=rgr, y_opt=np.min(yi), method=acq,
xi=xi, kappa=kappa)
next_x = X[np.argmin(values)]
next_x = space.inverse_transform(next_x.reshape((1, -1)))[0]
yi.append(func(next_x))
Xi.append(next_x)
if callbacks:
curr_res = create_result(Xi, yi, space, rng, specs)
for c in callbacks:
c(curr_res)
return create_result(Xi, yi, space, rng, specs, models)
def gbrt_minimize(func, dimensions, base_estimator=None, n_calls=100,
n_points=1000, n_random_starts=10, x0=None, y0=None,
n_jobs=1, random_state=None, acq="EI", xi=0.01, kappa=1.96,
verbose=False, callback=None):
"""Sequential optimization using gradient boosted trees.
Gradient boosted regression trees are used to model the (very)
expensive to evaluate function `func`. The model is improved
by sequentially evaluating the expensive function at the next
best point. Thereby finding the minimum of `func` with as
few evaluations as possible.
The total number of evaluations, `n_calls`, are performed like the
following. If `x0` is provided but not `y0`, then the elements of `x0`
are first evaluated, followed by `n_random_starts` evaluations.
Finally, `n_calls - len(x0) - n_random_starts` evaluations are
made guided by the surrogate model. If `x0` and `y0` are both
provided then `n_random_starts` evaluations are first made then
`n_calls - n_random_starts` subsequent evaluations are made
guided by the surrogate model.
Parameters
----------
* `func` [callable]:
Function to minimize. Should take a array of parameters and
return the function values.
* `dimensions` [list, shape=(n_dims,)]:
List of search space dimensions.
Each search dimension can be defined either as
- a `(upper_bound, lower_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(upper_bound, lower_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
* `base_estimator` [`GradientBoostingQuantileRegressor`]:
The regressor to use as surrogate model
* `n_calls` [int, default=100]:
Number of calls to `func`.
* `n_random_starts` [int, default=10]:
Number of evaluations of `func` with random initialization points
before approximating the `func` with `base_estimator`.
* `n_points` [int, default=1000]:
Number of points to sample when minimizing the acquisition function.
* `x0` [list, list of lists or `None`]:
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
* `y0` [list, scalar or `None`]:
Evaluation of initial input points.
- If it is a lists, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
* `n_jobs` [int, default=1]:
The number of jobs to run in parallel for `fit`.
If -1, then the number of jobs is set to the number of cores.
* `random_state` [int, RandomState instance, or None (default)]:
Set random state to something other than None for reproducible
results.
* `acq` [string, default=`"LCB"`]:
Function to minimize over the forest posterior. Can be either
- `"LCB"` for lower confidence bound,
- `"EI"` for expected improvement,
- `"PI"` for probability of improvement.
* `xi` [float, default=0.01]:
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
* `kappa` [float, default=1.96]:
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
* `verbose` [boolean, default=False]:
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
* `callback` [callable, list of callables, optional]
If callable then `callback(res)` is called after each call to `func`.
If list of callables, then each callable in the list is called.
Returns
-------
* `res` [`OptimizeResult`, scipy object]:
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
- `rng` [RandomState instance]: State of the random state
at the end of minimization.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
"""
# Save call args
specs = {"args": copy.copy(inspect.currentframe().f_locals),
"function": inspect.currentframe().f_code.co_name}
# Check params
rng = check_random_state(random_state)
# Default estimator
if base_estimator is None:
gbrt = GradientBoostingRegressor(n_estimators=30, loss='quantile')
base_estimator = GradientBoostingQuantileRegressor(base_estimator=gbrt,
n_jobs=n_jobs,
random_state=rng)
return _tree_minimize(func, dimensions, base_estimator,
n_calls=n_calls,
n_points=n_points, n_random_starts=n_random_starts,
x0=x0, y0=y0, random_state=random_state, xi=xi,
kappa=kappa, acq=acq, specs=specs, callback=callback)
return res
def forest_minimize(func, dimensions, base_estimator='et', n_calls=100,
n_points=1000, n_random_starts=10, x0=None, y0=None,
n_jobs=1, random_state=None, acq="EI", xi=0.01, kappa=1.96,
verbose=False, callback=None):
"""Sequential optimisation using decision trees.
A tree based regression model is used to model the expensive to evaluate
function `func`. The model is improved by sequentially evaluating
the expensive function at the next best point. Thereby finding the
minimum of `func` with as few evaluations as possible.
The total number of evaluations, `n_calls`, are performed like the
following. If `x0` is provided but not `y0`, then the elements of `x0`
are first evaluated, followed by `n_random_starts` evaluations.
Finally, `n_calls - len(x0) - n_random_starts` evaluations are
made guided by the surrogate model. If `x0` and `y0` are both
provided then `n_random_starts` evaluations are first made then
`n_calls - n_random_starts` subsequent evaluations are made
guided by the surrogate model.
Parameters
----------
* `func` [callable]:
Function to minimize. Should take a array of parameters and
return the function values.
* `dimensions` [list, shape=(n_dims,)]:
List of search space dimensions.
Each search dimension can be defined either as
- a `(upper_bound, lower_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(upper_bound, lower_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
* `base_estimator` [string or `Regressor`, default=`"et"`]:
The regressor to use as surrogate model. Can be either
- `"rf"` for random forest regressor
- `"et"` for extra trees regressor
- instance of regressor with support for `return_std` in its predict
method
The predefined models are initilized with good defaults. If you
want to adjust the model parameters pass your own instance of
a regressor which returns the mean and standard deviation when
making predictions.
* `n_calls` [int, default=100]:
Number of calls to `func`.
* `n_random_starts` [int, default=10]:
Number of evaluations of `func` with random initialization points
before approximating the `func` with `base_estimator`.
* `n_points` [int, default=1000]:
Number of points to sample when minimizing the acquisition function.
* `x0` [list, list of lists or `None`]:
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
* `y0` [list, scalar or `None`]:
Evaluation of initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
* `n_jobs` [int, default=1]:
The number of jobs to run in parallel for `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
* `random_state` [int, RandomState instance, or None (default)]:
Set random state to something other than None for reproducible
results.
* `acq` [string, default=`"LCB"`]:
Function to minimize over the forest posterior. Can be either
- `"LCB"` for lower confidence bound,
- `"EI"` for expected improvement,
- `"PI"` for probability of improvement.
* `xi` [float, default=0.01]:
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
* `kappa` [float, default=1.96]:
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
* `verbose` [boolean, default=False]:
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
* `callback` [callable, optional]
If provided, then `callback(res)` is called after call to func.
Returns
-------
* `res` [`OptimizeResult`, scipy object]:
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
"""
# Save call args + rng
specs = {"args": copy.copy(inspect.currentframe().f_locals),
"function": inspect.currentframe().f_code.co_name}
# Check params
rng = check_random_state(random_state)
# Default estimator
if isinstance(base_estimator, str):
if base_estimator not in ("rf", "et"):
raise ValueError(
"Valid values for the base_estimator parameter"
" are: 'rf' or 'et', not '%s'" % base_estimator)
if base_estimator == "rf":
base_estimator = RandomForestRegressor(n_estimators=100,
min_samples_leaf=3,
n_jobs=n_jobs,
random_state=rng)
elif base_estimator == "et":
base_estimator = ExtraTreesRegressor(n_estimators=100,
min_samples_leaf=3,
n_jobs=n_jobs,
random_state=rng)
else:
if not is_regressor(base_estimator):
raise ValueError("The base_estimator parameter has to either"
" be a string or a regressor instance."
" '%s' is neither." % base_estimator)
return _tree_minimize(func, dimensions, base_estimator,
n_calls=n_calls,
n_points=n_points, n_random_starts=n_random_starts,
specs=specs, x0=x0, y0=y0, random_state=random_state,
acq=acq, xi=xi, kappa=kappa, verbose=verbose,
callback=callback)
|
{
"content_hash": "e7457e7e6152172fbd3bd8276bb91edf",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 94,
"avg_line_length": 40.01559020044544,
"alnum_prop": 0.6081705348694829,
"repo_name": "glouppe/scikit-optimize",
"id": "a7f8963f435a984b51f7ddbbec075ebf436e1656",
"size": "17967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skopt/forest_opt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Mako",
"bytes": "33771"
},
{
"name": "Python",
"bytes": "129974"
},
{
"name": "Shell",
"bytes": "2469"
}
],
"symlink_target": ""
}
|
from suplemon.suplemon_module import Module
class LStrip(Module):
def run(self, app, editor, args):
# TODO: move cursors in sync with line contents
line_nums = editor.get_lines_with_cursors()
for n in line_nums:
line = editor.lines[n]
line.data = line.data.lstrip()
module = {
"class": LStrip,
"name": "lstrip",
}
|
{
"content_hash": "569ad981d0844b16d052e8142ff75943",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 25.133333333333333,
"alnum_prop": 0.6021220159151194,
"repo_name": "trylle/suplemon",
"id": "fa3dadaa913046dcbd5ef652c44af9d7af672674",
"size": "400",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "suplemon/modules/lstrip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "156035"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MarketPostComment'
db.create_table('market_marketpostcomment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('post', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketBlogPost'])),
('comment', self.gf('django.db.models.fields.TextField')()),
('commented_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('market', ['MarketPostComment'])
def backwards(self, orm):
# Deleting model 'MarketPostComment'
db.delete_table('market_marketpostcomment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.marketblogpost': {
'Meta': {'object_name': 'MarketBlogPost'},
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'posted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'market.marketcategory': {
'Meta': {'object_name': 'MarketCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'market.marketmailinglistmember': {
'Meta': {'object_name': 'MarketMailingListMember'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'market.marketpostcategory': {
'Meta': {'object_name': 'MarketPostCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'market.marketpostcomment': {
'Meta': {'object_name': 'MarketPostComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'commented_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketBlogPost']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'market.marketsubcategory': {
'Meta': {'unique_together': "(('parent', 'slug'),)", 'object_name': 'MarketSubCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subcategories'", 'null': 'True', 'to': "orm['market.MarketCategory']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60', 'db_index': 'True'})
}
}
complete_apps = ['market']
|
{
"content_hash": "38a469e7415d4d33194561bcc9913332",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 181,
"avg_line_length": 69.0725806451613,
"alnum_prop": 0.5522475189725627,
"repo_name": "codepython/CollectorCity-Market-Place",
"id": "7f910aefef63f1bc1b31b400ae5565338d0a661d",
"size": "8583",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "marketplaces/apps/market/migrations/0005_auto__add_marketpostcomment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "863646"
},
{
"name": "HTML",
"bytes": "475154"
},
{
"name": "JavaScript",
"bytes": "693720"
},
{
"name": "Python",
"bytes": "1860719"
},
{
"name": "Shell",
"bytes": "1174"
}
],
"symlink_target": ""
}
|
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[nowdownload.py] get_video_url (page_url='%s')" % page_url)
'''
<a href="http://f02.nowdownload.co/dl/91efaa9ec507ef4de023cd62bb9a0fe2/50ab76ac/6711c9c90ebf3_family.guy.s11e02.italian.subbed.hdtv.xvid_gannico.avi" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>
'''
data = scrapertools.cache_page( page_url )
logger.debug("[nowdownload.py] data:" + data)
try:
url = scrapertools.get_match(data,'<a href="([^"]*)" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>')
except:
#$.get("/api/token.php?token=7e1ab09df2775dbea02506e1a2651883");
token = scrapertools.get_match(data,'(/api/token.php\?token=[^"]*)')
logger.debug("[nowdownload.py] token:" + token)
d= scrapertools.cache_page( "http://www.nowdownload.co"+ token )
url = scrapertools.get_match(data,'expiryText: \'<a class="btn btn-danger" href="([^"]*)')
logger.debug("[nowdownload.py] url_1:" + url)
data = scrapertools.cache_page("http://www.nowdownload.co" + url )
logger.debug("[nowdownload.py] data:" + data)
#<a href="http://f03.nowdownload.co/dl/8ec5470153bb7a2177847ca7e1638389/50ab71b3/f92882f4d33a5_squadra.antimafia_palermo.oggi.4x01.episodio.01.ita.satrip.xvid_upz.avi" class="btn btn-success">Click here to download !</a>
url = scrapertools.get_match(data,'<a href="([^"]*)" class="btn btn-success">Click here to download !</a>')
logger.debug("[nowdownload.py] url_final:" + url)
video_urls = [url]
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://www.nowdownload.co/dl/9gwahc3577hj9
#http://www.nowdownload.eu/dl/srv4g94wk6j7b
patronvideos = '(nowdownload.\w{2}/dl/[a-z0-9]+)'
logger.info("[nowdownload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[nowdownload]"
url = "http://www."+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowdownload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
{
"content_hash": "fe1d90761dc70b548296aa023d43c0c9",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 234,
"avg_line_length": 44.758620689655174,
"alnum_prop": 0.6548536209553159,
"repo_name": "jose36/plugin.video.Jmdl2",
"id": "4e84c2a8ceca670d263a87fd513d827cf519c7e8",
"size": "2861",
"binary": false,
"copies": "44",
"ref": "refs/heads/master",
"path": "servers/nowdownload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "602821"
}
],
"symlink_target": ""
}
|
""" Module to test exercise3.py """
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
import mock
from exercise3 import diagnose_car
# test passed!!
def test_accepted_inputs(capsys):
with mock.patch("__builtin__.raw_input", side_effect=["Y", "Y"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Clean terminals and try starting again.\n"
with mock.patch("__builtin__.raw_input", side_effect=["Y", "N"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Replace cables and try again.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "Y"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Replace the battery.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "N", "Y"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Check spark plug connections.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "N", "N", "N"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Engine is not getting enough fuel. Clean fuel pump.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "N", "N", "Y", "N"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Check to ensure the choke is opening and closing.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "N", "N", "Y", "Y"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Get it in for service.\n"
|
{
"content_hash": "6716edec80b9ee10ec7513c79af005a7",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 84,
"avg_line_length": 34.5625,
"alnum_prop": 0.5786618444846293,
"repo_name": "eden-r/inf1340_2015_asst1",
"id": "a9952a2f6ab30e9279ce9a032331ed37a7f2c9cc",
"size": "1682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_exercise3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15685"
}
],
"symlink_target": ""
}
|
"""
Django settings for engine project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vm*)jij1kwx93!9@h@+(x2%$j-j35h7@#b-j=+934aqu&3lycf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'example',
'devour',
'rest_framework',
'shell_plus'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'engine.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'engine.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
KAFKA_CONFIG = {
'client': {
'hosts': os.environ.get('KAFKA'),
'zookeeper_hosts': os.environ.get('ZOOKEEPER')
},
'consumer_routes': {
'simple_message': 'example.microservices.simple_message.consumers.SimpleMessageConsumer',
'problems': 'example.microservices.math.consumers.BalancedMathConsumer'
}
}
|
{
"content_hash": "380992fc7efe3eeb7a148391d870679d",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 97,
"avg_line_length": 26.022222222222222,
"alnum_prop": 0.6783376031881583,
"repo_name": "brandoshmando/devour",
"id": "42675268ae10d1348f0f9fb2747eaa4688d61a6f",
"size": "3513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/django-example/engine/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85240"
}
],
"symlink_target": ""
}
|
import codecs
import os
import sys
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ["*.py", "*.pyc", "*$py.class", "*~", ".*", "*.bak"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build", "./dist", "EGG-INFO", "*.egg-info"
]
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don"t match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won"t be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren"t
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append((fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
PACKAGE = "gaeforms"
DESCRIPTION = "A automated form validation and transformation based on App Engine Models"
NAME = PACKAGE
AUTHOR = "Renzo Nuccitelli"
AUTHOR_EMAIL = "renzo.n@gmail.com"
URL = "https://github.com/renzon/gaeforms"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="BSD",
url=URL,
packages=find_packages(exclude=["tests.*", "tests"]),
package_data=find_package_data(PACKAGE, only_in_packages=False),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Paste",
],
zip_safe=False,
install_requires=['pytz>=2014.4',
'Babel>=2.3.4']
)
|
{
"content_hash": "4764fe6bc7676ff15a38e28c6ff48c8f",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 89,
"avg_line_length": 35.67883211678832,
"alnum_prop": 0.5552373158756138,
"repo_name": "renzon/gaeforms",
"id": "ea215663022d44fae2b039ccb5a93c28dbf30f7f",
"size": "4888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90365"
}
],
"symlink_target": ""
}
|
from ztag.annotation import *
class VerisIndustriesAnnotation(Annotation):
protocol = protocols.MODBUS
subprotocol = protocols.MODBUS.DEVICE_ID
port = None
def process(self, obj, meta):
vendor = obj["mei_response"]["objects"]["vendor"].lower()
if "veris" in vendor:
meta.global_metadata.manufacturer = Manufacturer.VERIS
return meta
|
{
"content_hash": "21d947bab55af944fb9ffe6f384044fc",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 26.266666666666666,
"alnum_prop": 0.6598984771573604,
"repo_name": "zmap/ztag",
"id": "fcacdbb6a3905a6302c2e60f4db8e80dfe292772",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ztag/annotations/veris_industries_mei.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "604209"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import pytest
from .utils import decode_psd
@pytest.mark.parametrize('filename', ['masks.psd', 'masks2.psd'])
def test_file_with_masks_is_parsed(filename):
psd = decode_psd(filename)
for channels in psd.layer_and_mask_data.layers.channel_image_data:
assert len(channels) >= 3
|
{
"content_hash": "4d113295e60482aeca32b04567a40214",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 70,
"avg_line_length": 33.3,
"alnum_prop": 0.7207207207207207,
"repo_name": "EvgenKo423/psd-tools",
"id": "11560a1254f332dbdd4d691558d98505cfba48e5",
"size": "357",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_layer_masks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201437"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
}
|
"""Support for command line covers."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
import voluptuous as vol
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverEntity
from homeassistant.const import (
CONF_COMMAND_CLOSE,
CONF_COMMAND_OPEN,
CONF_COMMAND_STATE,
CONF_COMMAND_STOP,
CONF_COVERS,
CONF_FRIENDLY_NAME,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.reload import setup_reload_service
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import call_shell_with_timeout, check_output_or_log
from .const import CONF_COMMAND_TIMEOUT, DEFAULT_TIMEOUT, DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
COVER_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COMMAND_CLOSE, default="true"): cv.string,
vol.Optional(CONF_COMMAND_OPEN, default="true"): cv.string,
vol.Optional(CONF_COMMAND_STATE): cv.string,
vol.Optional(CONF_COMMAND_STOP, default="true"): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_COMMAND_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA)}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up cover controlled by shell commands."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
devices: dict[str, Any] = config.get(CONF_COVERS, {})
covers = []
for device_name, device_config in devices.items():
value_template: Template | None = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
covers.append(
CommandCover(
device_config.get(CONF_FRIENDLY_NAME, device_name),
device_config[CONF_COMMAND_OPEN],
device_config[CONF_COMMAND_CLOSE],
device_config[CONF_COMMAND_STOP],
device_config.get(CONF_COMMAND_STATE),
value_template,
device_config[CONF_COMMAND_TIMEOUT],
device_config.get(CONF_UNIQUE_ID),
)
)
if not covers:
_LOGGER.error("No covers added")
return
add_entities(covers)
class CommandCover(CoverEntity):
"""Representation a command line cover."""
def __init__(
self,
name: str,
command_open: str,
command_close: str,
command_stop: str,
command_state: str | None,
value_template: Template | None,
timeout: int,
unique_id: str | None,
) -> None:
"""Initialize the cover."""
self._attr_name = name
self._state: int | None = None
self._command_open = command_open
self._command_close = command_close
self._command_stop = command_stop
self._command_state = command_state
self._value_template = value_template
self._timeout = timeout
self._attr_unique_id = unique_id
self._attr_should_poll = bool(command_state)
def _move_cover(self, command: str) -> bool:
"""Execute the actual commands."""
_LOGGER.info("Running command: %s", command)
returncode = call_shell_with_timeout(command, self._timeout)
success = returncode == 0
if not success:
_LOGGER.error(
"Command failed (with return code %s): %s", returncode, command
)
return success
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
if self.current_cover_position is not None:
return self.current_cover_position == 0
return None
@property
def current_cover_position(self) -> int | None:
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._state
def _query_state(self) -> str | None:
"""Query for the state."""
if self._command_state:
_LOGGER.info("Running state value command: %s", self._command_state)
return check_output_or_log(self._command_state, self._timeout)
if TYPE_CHECKING:
return None
def update(self) -> None:
"""Update device state."""
if self._command_state:
payload = str(self._query_state())
if self._value_template:
payload = self._value_template.render_with_possible_json_value(payload)
self._state = int(payload)
def open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
self._move_cover(self._command_open)
def close_cover(self, **kwargs: Any) -> None:
"""Close the cover."""
self._move_cover(self._command_close)
def stop_cover(self, **kwargs: Any) -> None:
"""Stop the cover."""
self._move_cover(self._command_stop)
|
{
"content_hash": "0934496f30d1f1b2e9d3b19c7dde9bf6",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 87,
"avg_line_length": 32.260355029585796,
"alnum_prop": 0.6254585473220836,
"repo_name": "nkgilley/home-assistant",
"id": "8298201228f45d3e5533361838ed5b44a43bd245",
"size": "5452",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/command_line/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import aiohttp.web
import aiohttp.web_request
class HealthController:
@staticmethod
async def handle_health_check(request: aiohttp.web_request.Request) -> aiohttp.web.Response:
return aiohttp.web.Response(text='TriggearIsOk', reason=f'Host {request.host} asked')
|
{
"content_hash": "e6bc89b3b4a6f73cb521710347d3ba23",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 96,
"avg_line_length": 35.125,
"alnum_prop": 0.7580071174377224,
"repo_name": "futuresimple/triggear",
"id": "e29fb7635e86d0fa03416c5505022f64c548580c",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/controllers/health_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "363"
},
{
"name": "Python",
"bytes": "261446"
}
],
"symlink_target": ""
}
|
"""virt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from panel import views as p_views
urlpatterns = [
url(r'^logout/', auth_views.logout, name='logout'),
url(r'^panel/$', include('panel.urls')),
url(r'^panel/create', p_views.vmcreate, name='create'),
url(r'^panel/state', p_views.button_action, name='state'),
url(r'^admin/', admin.site.urls),
url(r'^$', include('portal.urls')),
]
|
{
"content_hash": "b0de0836766a25ebc66747f144f2f25c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 40.035714285714285,
"alnum_prop": 0.687778768956289,
"repo_name": "erikkn/vmx-hosting",
"id": "e199f18e5372070e30974dc479fe97cb1ed17055",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virt/virt/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3861"
},
{
"name": "HTML",
"bytes": "21379"
},
{
"name": "JavaScript",
"bytes": "4815"
},
{
"name": "Python",
"bytes": "37088"
},
{
"name": "Shell",
"bytes": "45"
}
],
"symlink_target": ""
}
|
def cargarListas(nombrearchivo,lista):
try:
archivo = open(nombrearchivo, "rt")
while True:
linea = archivo.readline()
if not linea:
break
linea = linea[:-1]
listaNombre, Articulos = linea.split("=")
if Articulos.strip() == "":
listaArticulos = []
else:
listaArticulos = Articulos.split(",")
lista.append([listaNombre,listaArticulos])
archivo.close()
except:
print("")
return lista
def salvarListas(nommbrearchivo,listas):
try:
archivo = open(nommbrearchivo, "wt")
for lista in listas:
listaarchivo = ""
for articulos in lista[1]:
listaarchivo += "{0},".format(articulos)
archivo.write(lista[0] + "=" + listaarchivo[:-1] + "\n")
archivo.close()
except:
print("\nError al Guardar Archivo")
input()
def getListaNombre(lista,lista_actual):
if len(lista) == 0:
return '** LISTA VACIA **'
else:
return lista[lista_actual][0]
def agregarLista(lista,listaNombre):
lista.append([listaNombre,[]])
return lista
def agregarArticulo(lista,listaNombre):
lista.append(listaNombre)
return lista
def borrarLista(lista,listaNumero):
lista.pop(listaNumero)
return lista
|
{
"content_hash": "3e54d467df7a393b4fc2b025b00e6754",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 68,
"avg_line_length": 26.923076923076923,
"alnum_prop": 0.5571428571428572,
"repo_name": "jlermauip/uip-prog3",
"id": "34a0a409ab4899b80abab1abb576c3080199a882",
"size": "1402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tareas/Tarea5/listas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "685"
},
{
"name": "JavaScript",
"bytes": "10918"
},
{
"name": "Python",
"bytes": "47256"
},
{
"name": "Smarty",
"bytes": "3680"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
from sft.Actions import Actions
class ActionHistory(object):
__metaclass__ = ABCMeta
ACTION_WIDTH = len(Actions.all)
@abstractmethod
def get_size(self):
pass
@abstractmethod
def get_history(self, all_actions):
pass
@abstractmethod
def new_action(self, action):
pass
@abstractmethod
def new_episode(self):
pass
|
{
"content_hash": "109b79d83d5096d54429b5e73ecbbf4b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 39,
"avg_line_length": 15.04,
"alnum_prop": 0.7367021276595744,
"repo_name": "kevinkepp/look-at-this",
"id": "c227163a11d67131842e95cc47e82de07b31e69e",
"size": "376",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sft/agent/ah/ActionHistory.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def render(context, obj, args):
"""
Try to render an attribute from an object. It will first look for a function called 'render_<attr_name>' on the
view, If this doesnt exist it will look for an attribute with the attribute name, if this doesnt exist it will
fallback to the default value supplied or None if no default was supplied
"""
splitargs = args.split(',')
try:
attribute, default = splitargs
except ValueError:
attribute, default = args, ''
try:
attr = context['view'].__getattribute__('render_' + attribute)
except AttributeError:
attr = context['view'].__dict__.get(attribute, None)
if attr:
return attr(obj)
try:
attr = obj.__getattribute__(attribute)
except AttributeError:
attr = obj.__dict__.get(attribute, default)
if hasattr(attr, '__call__'):
return attr.__call__()
else:
return attr
@register.filter
def get_selectable_pages(page, pages_before_after):
# if we have few enough pages that we wouldn't have any breaks when we select the middle page give the full range
# we also allow for the break occuring on the 2nd or penultimate pages
if page.paginator.num_pages <= (5 + 2 * pages_before_after):
return page.paginator.page_range
# if the pages in the limit include the second pages starting at 1 otherwise have 1 and a break
if page.number - pages_before_after <= 3:
pages = [i for i in range(1, page.number)]
else:
pages = [1, None, ] + [i for i in range(page.number - pages_before_after, page.number)]
# include the current page
pages.append(page.number)
# if the pages in the limit include the penultimate pages starting up to the final page are returned
# otherwise have a break and then the final page
if page.number + pages_before_after >= page.paginator.num_pages - 2:
pages += [i for i in range(page.number + 1, page.paginator.num_pages + 1)]
else:
pages += [i for i in range(page.number + 1, page.number + pages_before_after + 1)] + [None, page.paginator.num_pages]
return pages
|
{
"content_hash": "7a1eebb2a597f971ff3bc8dcdc335d90",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 125,
"avg_line_length": 36.80327868852459,
"alnum_prop": 0.6610244988864142,
"repo_name": "wildfish/django-directory",
"id": "f7a1544ffc4bd5f5b9090b6ee00028b29906b77e",
"size": "2245",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "directory/templatetags/directory_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2931"
},
{
"name": "Python",
"bytes": "36997"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.apps import AppConfig
class AuthentificationConfig(AppConfig):
name = 'authentification'
|
{
"content_hash": "60f799f97945d37c11533d5541987f49",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 40,
"avg_line_length": 21.142857142857142,
"alnum_prop": 0.7837837837837838,
"repo_name": "rachidoulasri/django_projectwebpage",
"id": "b3a8f5069a03b8635ffb7f9cc3cdf597bd408405",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "authentification/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11267"
},
{
"name": "CSS",
"bytes": "50162"
},
{
"name": "HTML",
"bytes": "30467"
},
{
"name": "JavaScript",
"bytes": "1092528"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "52109"
}
],
"symlink_target": ""
}
|
"""
Script to fetch DBpedia data
"""
import sys, time
from urllib.request import urlopen
from urllib.parse import unquote
import json
def main():
for line in sys.stdin.readlines():
line = line.strip()
norm = unquote(line)
url = line.replace('/resource/', '/data/') + '.json'
time.sleep(1)
# print(url)
try:
resp = urlopen(url)
if resp.code == 200:
data = json.loads(resp.read())[norm]
print(json.dumps({'coverage': line,
'lon': data['http://www.w3.org/2003/01/geo/wgs84_pos#long'][0]['value'],
'lat': data['http://www.w3.org/2003/01/geo/wgs84_pos#lat'][0]['value']},
sort_keys=True))
except:
print(json.dumps({'coverage': line}))
False
if __name__ == "__main__":
main()
|
{
"content_hash": "73357ddf6dca68a1c6cf13abeeac8268",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 106,
"avg_line_length": 30.6,
"alnum_prop": 0.4891067538126362,
"repo_name": "ViralTexts/vt-passim",
"id": "8c69c145e96a1b753d54c424c6c2636b9d3204c9",
"size": "940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/dbdata.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "9433"
},
{
"name": "Makefile",
"bytes": "18200"
},
{
"name": "Python",
"bytes": "96293"
},
{
"name": "Scala",
"bytes": "94268"
},
{
"name": "Shell",
"bytes": "4216"
}
],
"symlink_target": ""
}
|
import numpy as np
#PARAMS DE TEST
def sommePonderee(utilite_marginale, poids):
"""Evaluation par somme ponderee"""
u = np.array(utilite_marginale)
p = np.array(poids)
print(" U ----", u, '-----P', p)
return np.dot(u, p)
|
{
"content_hash": "4a9d8c2cb31406676a47f3e2780b6d05",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 25.3,
"alnum_prop": 0.5889328063241107,
"repo_name": "SimoRihani/ProjetLS",
"id": "342d52e04806ee7f6f8742657a5c1decec799744",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FlaskApp/Methode1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "24295"
},
{
"name": "Python",
"bytes": "18133"
},
{
"name": "Shell",
"bytes": "108"
}
],
"symlink_target": ""
}
|
"""
We need three stack migrations for 0.8.0 - there is an issue with fixing data and migrating
schema at the same time
"""
from __future__ import unicode_literals
from django.db import migrations
def blank_to_null(model_name, field_name):
def func(apps, schema_editor):
model_cls = apps.get_model('stacks', model_name)
for obj in model_cls.objects.filter(**{field_name: ''}):
setattr(obj, field_name, None)
obj.save()
return func
def null_to_blank(model_name, field_name):
def func(apps, schema_editor):
model_cls = apps.get_model('stacks', model_name)
for obj in model_cls.objects.filter(**{field_name: None}):
setattr(obj, field_name, '')
obj.save()
return func
def fix_host_fields_forwards(apps, schema_editor):
# Nothing to do yet
pass
def fix_host_fields_reverse(apps, schema_editor):
Host = apps.get_model('stacks', 'Host')
BHD = apps.get_model('blueprints', 'BlueprintHostDefinition')
# Fix the fields on the host
for host in Host.objects.all():
bhd = BHD.objects.get(hosts=host)
host.availability_zone = bhd.zone
host.cloud_image = bhd.cloud_image
host.instance_size = bhd.size
host.subnet_id = bhd.subnet_id
host.save()
class Migration(migrations.Migration):
dependencies = [
('blueprints', '0003_0_8_0_migrations'),
('stacks', '0002_0_8_0_migrations'),
]
operations = [
# Convert the dns fields from empty -> null
migrations.RunPython(blank_to_null('Host', 'provider_public_dns'), null_to_blank('Host', 'provider_public_dns')),
migrations.RunPython(blank_to_null('Host', 'provider_private_dns'), null_to_blank('Host', 'provider_private_dns')),
# Fix all the host fields and delete them (this is so we can go in reverse)
migrations.RunPython(fix_host_fields_forwards, fix_host_fields_reverse),
]
|
{
"content_hash": "3796250516ff0fc195f9a375d814ded7",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 123,
"avg_line_length": 28.536231884057973,
"alnum_prop": 0.6373793803961402,
"repo_name": "clarkperkins/stackdio",
"id": "8d4a4284d76ed0ebb2d2817881ad854aa323dc15",
"size": "2041",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stackdio/api/stacks/migrations/0003_0_8_0_migrations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6462"
},
{
"name": "HTML",
"bytes": "200474"
},
{
"name": "JavaScript",
"bytes": "365621"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "1034237"
},
{
"name": "SaltStack",
"bytes": "4594"
},
{
"name": "Scheme",
"bytes": "2371"
},
{
"name": "Shell",
"bytes": "6131"
}
],
"symlink_target": ""
}
|
"""
This actually does a write in Wikidata
"""
from wikidataintegrator import wdi_login, wdi_core, wdi_helpers
from scheduled_bots.geneprotein import HelperBot
from scheduled_bots.geneprotein.ProteinBot import main, Protein, PROPS
from pymongo import MongoClient
from scheduled_bots.local import WDUSER, WDPASS
def _test_write_one_protein(qid, entrezgene, taxid):
coll = MongoClient().wikidata_src.mygene
metadata_coll = MongoClient().wikidata_src.mygene_sources
metadata = metadata_coll.find_one()
doc_filter = {'_id': entrezgene}
print("total number of records: {}".format(coll.find(doc_filter).count()))
main(coll, taxid=taxid, metadata=metadata, fast_run=False, write=True, doc_filter=doc_filter)
fn = wdi_core.WDItemEngine.logger.handlers[0].baseFilename
log = open(fn).read()
assert qid in log
assert "WARNING" not in log and "ERROR" not in log
def test_write_one_human_protein():
qid = "Q21109414"
taxid = '9606'
entrezgene = '1877'
_test_write_one_protein(qid, entrezgene, taxid)
def test_write_one_microbe_protein():
qid = "Q23433065"
taxid = '243277'
entrezgene = '2614876'
_test_write_one_protein(qid, entrezgene, taxid)
def test_write_another_microbe_protein():
qid = "Q30106073"
taxid = '243161'
entrezgene = '1246473'
_test_write_one_protein(qid, entrezgene, taxid)
def test_write_one_yeast_protein():
qid = "Q27547347"
taxid = '559292'
entrezgene = '856002'
_test_write_one_protein(qid, entrezgene, taxid)
def test_write_one_mouse_protein():
qid = "Q21990557"
taxid = '10090'
entrezgene = '19744'
_test_write_one_protein(qid, entrezgene, taxid)
def validate_all_human_protein():
# runs all proteins through the validator
# and generates a log file
coll = MongoClient().wikidata_src.mygene
metadata_coll = MongoClient().wikidata_src.mygene_sources
metadata = metadata_coll.find_one()
doc_filter = {'taxid': 9606, 'entrezgene': {'$exists': True}}
docs = coll.find(doc_filter)
print("total number of records: {}".format(coll.find(doc_filter).count()))
validate_type = 'eukaryotic'
docs = HelperBot.validate_docs(docs, validate_type, 'P351')
records = HelperBot.tag_mygene_docs(docs, metadata)
_ = list(records)
|
{
"content_hash": "1881f0b8a6365fb2d48d554e25e1d2c2",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 97,
"avg_line_length": 30.38157894736842,
"alnum_prop": 0.6916414032048506,
"repo_name": "SuLab/scheduled-bots",
"id": "8d1d2680eff2e6cec5a6c7ae18b314653d923ad9",
"size": "2309",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scheduled_bots/geneprotein/test_ProteinBot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1296"
},
{
"name": "Jupyter Notebook",
"bytes": "1049300"
},
{
"name": "Python",
"bytes": "709603"
},
{
"name": "Shell",
"bytes": "5313"
}
],
"symlink_target": ""
}
|
from os.path import realpath, normpath, exists
from multiprocessing import Process
import cv2
import imutils
import numpy as np
from detect_picture_utils import Broadcaster, Listener, log
class EmotionRecognizer(Process, Broadcaster, Listener):
def __init__(self):
Process.__init__(self)
Broadcaster.__init__(self, 'emotion')
Listener.__init__(self)
@staticmethod
def draw(info):
if info is None:
return
cv2.imshow("Emotion Probabilities", info)
def run(self):
from keras.models import load_model
self.model = load_model('epoch_75.hdf5')
path = normpath(realpath(cv2.__file__) + '../../../../../share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
if not exists(path):
path = '/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml'
self.detector = cv2.CascadeClassifier(path)
while True:
data = self.recv()
if data[0] != 'video_feed':
continue
frame = self.recognize(data[1])
self.broadcast(frame)
def recognize(self, frame):
from keras.preprocessing.image import img_to_array
EMOTIONS = ["angry", "scared", "happy", "sad", "surprised", "neutral"]
frame = imutils.resize(frame, width=300)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the input frame, then clone the frame so that
# we can draw on it
rects = self.detector.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
canvas = None
# ensure at least one face was found before continuing
if len(rects) > 0:
canvas = np.zeros((220, 300, 3), dtype="uint8")
# determine the largest face area
rect = sorted(rects, reverse=True,
key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = rect
# extract the face ROI from the image, then pre-process
# it for the network
roi = gray[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (48, 48))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
# make a prediction on the ROI, then lookup the class
# label
preds = self.model.predict(roi)[0]
label = EMOTIONS[preds.argmax()]
# loop over the labels + probabilities and draw them
for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
# construct the label text
text = "{}: {:.2f}%".format(emotion, prob * 100)
# draw the label + probability bar on the canvas
w = int(prob * 300)
cv2.rectangle(canvas, (5, (i * 35) + 5),
(w, (i * 35) + 35), (0, 0, 255), -1)
cv2.putText(canvas, text, (10, (i * 35) + 23),
cv2.FONT_HERSHEY_SIMPLEX, 0.45,
(255, 255, 255), 2)
# show our classifications + probabilities
return canvas
|
{
"content_hash": "5789416dd6d507a2b313ca1b1fe91b56",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 128,
"avg_line_length": 36.49438202247191,
"alnum_prop": 0.5504926108374384,
"repo_name": "CyberLabs-BR/face_detect",
"id": "607a18fac6138ee022dd343b4a8de133452c40c2",
"size": "3270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emotion_recognizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141928"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
}
|
""" Actors execute tasks from the database and upload the results. """
from __future__ import print_function
import simcity
from .util import Timer
from couchdb.http import ResourceConflict
try:
from Queue import Empty as QueueEmpty
except ImportError:
from queue import Empty as QueueEmpty
from multiprocessing import cpu_count, Process, Manager
class JobActor(object):
"""
Executes tasks as a single job with multiple processes
"""
def __init__(self, iterator, worker_cls, task_db=None, parallelism=None,
job_db=None, config=None):
"""
@param iterator: the iterator to get the tasks from.
"""
self.iterator = iterator
self.worker_cls = worker_cls
if task_db is None:
task_db = simcity.get_task_database()
self.task_db = task_db
if job_db is None:
job_db = simcity.get_job_database()
self.job_db = job_db
if config is None:
config = simcity.get_config()
self.config = config.section('Execution')
if parallelism is None:
parallelism = self.config.get('parallelism', 1)
if parallelism == '*':
self.parallelism = cpu_count()
else:
self.parallelism = min(cpu_count(), int(parallelism))
self.manager = Manager()
self.task_q = self.manager.Queue()
self.result_q = self.manager.Queue()
self.queued_semaphore = self.manager.Semaphore(self.parallelism)
self.workers = [worker_cls(i, self.config, self.task_q, self.result_q,
self.queued_semaphore)
for i in range(self.parallelism)]
self.tasks_processed = self.manager.Value('i', 0)
self.job = None
self.collector = CollectActor(
self.task_db, self.parallelism, self.result_q,
self.tasks_processed)
def run(self, maxtime=None, avg_time_factor=0.0):
"""Run method of the actor, executes the application code by iterating
over the available tasks in CouchDB.
"""
time = Timer()
self.prepare_env()
self.collector.start()
for w in self.workers:
w.start()
try:
for task in self.iterator:
self.set_task_parallelism(task)
for _ in range(task['parallelism']):
self.queued_semaphore.acquire()
processed = self.tasks_processed.value
if maxtime is not None and processed > 0:
will_elapse = ((avg_time_factor + processed) *
time.elapsed() / processed)
if will_elapse > maxtime:
break
self.task_q.put(task)
for _ in range(self.parallelism):
self.queued_semaphore.acquire()
finally:
self.cleanup_env()
def set_task_parallelism(self, task):
""" Determine the preferred parallelism of a task and set it
in the parallelism property. """
if 'parallelism' not in task:
task['parallelism'] = 1
elif task['parallelism'] == '*':
task['parallelism'] = self.parallelism
else:
task['parallelism'] = min(int(task['parallelism']),
self.parallelism)
def prepare_env(self):
""" Prepares the current job by registering it as started in the
database. """
self.job = simcity.start_job(
database=self.job_db, properties={'parallelism': self.parallelism})
def cleanup_env(self):
""" Cleans up the current job by registering it as finished. """
try:
for _ in self.workers:
self.task_q.put(None)
except IOError:
pass
for w in self.workers:
w.join()
self.collector.join()
self.job['tasks_processed'] = self.tasks_processed.value
simcity.finish_job(self.job, database=self.job_db)
class CollectActor(Process):
""" Collects finished tasks from the JobActor """
def __init__(self, task_db, parallelism, result_q, tasks_processed):
super(CollectActor, self).__init__()
self.result_q = result_q
self.task_db = task_db
self.is_done = False
self.tasks_processed = tasks_processed
self.parallelism = parallelism
self.workers_done = 0
def run(self):
""" In the new process, create a new database connection and put
finished jobs there. """
self.task_db = self.task_db.copy()
while self.workers_done < self.parallelism:
try:
task = self.result_q.get()
if task is None:
self.workers_done += 1
continue
save_task(task, self.task_db)
self.tasks_processed.value += 1
except QueueEmpty:
pass
except EOFError:
self.workers_done = self.parallelism
def save_task(task, task_db):
""" Save task to database. """
saved = False
while not saved:
try:
task_db.save(task)
saved = True
except ResourceConflict:
# simply overwrite changes - model results are more
# important
new_task = task_db.get(task.id)
task['_rev'] = new_task.rev
|
{
"content_hash": "84330d680bab04567f43ab5702d6ea57",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 32.89820359281437,
"alnum_prop": 0.5575172915908263,
"repo_name": "indodutch/sim-city-client",
"id": "88bc21d9db0ec9202e86316bdb865ac034041651",
"size": "6106",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "simcity/actors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3669"
},
{
"name": "Python",
"bytes": "195426"
},
{
"name": "Shell",
"bytes": "1553"
}
],
"symlink_target": ""
}
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..exceptions import NotFittedError
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
# normalize doc_topic_distr
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
{
"content_hash": "17e917621f3a97dbcefea42e47ad9a2b",
"timestamp": "",
"source": "github",
"line_count": 712,
"max_line_length": 83,
"avg_line_length": 37.58005617977528,
"alnum_prop": 0.5702433008184774,
"repo_name": "imaculate/scikit-learn",
"id": "d7adedbeedb13e3c8bcdbce822711ecc559cd27b",
"size": "26757",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sklearn/decomposition/online_lda.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "399679"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6591204"
},
{
"name": "Shell",
"bytes": "9216"
}
],
"symlink_target": ""
}
|
import os
import sys
import signal
from multiprocessing import util, process
__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler']
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Try making some callable types picklable
#
from pickle import Pickler
class ForkingPickler(Pickler):
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
#def _reduce_builtin_function_or_method(m):
# return getattr, (m.__self__, m.__name__)
#ForkingPickler.register(type(list().append), _reduce_builtin_function_or_method)
#ForkingPickler.register(type(int().__add__), _reduce_builtin_function_or_method)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
#
# Unix
#
if sys.platform != 'win32':
import time
exit = os._exit
duplicate = os.dup
close = os.close
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, flag)
except os.error:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError, e:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
return False
#
# Windows
#
else:
import thread
import msvcrt
import _subprocess
import time
from _multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
#try:
# from cPickle import dump, load, HIGHEST_PROTOCOL
#except ImportError:
from pickle import load, HIGHEST_PROTOCOL
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
exit = win32.ExitProcess
close = win32.CloseHandle
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if sys.executable.lower().endswith('pythonservice.exe'):
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = thread._local()
def __init__(self, process_obj):
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
ht.Close()
close(rhandle)
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity==() and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.forking import main; main()'
return [_python_exe, '-c', prog, '--multiprocessing-fork']
def main():
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from .util import _logger, _log_to_stderr
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if not os.path.isabs(main_path) and \
process.ORIGINAL_DIR is not None:
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Make (Pipe)Connection picklable
#
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError(
'By default %s objects can only be shared between processes\n'
'using inheritance' % type(conn).__name__
)
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable)
ForkingPickler.register(Connection, reduce_connection)
ForkingPickler.register(PipeConnection, reduce_connection)
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name != 'ipython':
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
|
{
"content_hash": "1fa1b89d2e95107ffdabc5c7810ea1ec",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 86,
"avg_line_length": 30.572340425531916,
"alnum_prop": 0.5408866309416104,
"repo_name": "MalloyPower/parsing-python",
"id": "5e04725a1e20c09b166b28c2ba17006e637b88ae",
"size": "14538",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.7/Lib/multiprocessing/forking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='brokenaxes',
version='0.5.0',
description='Create broken axes',
long_description=long_description,
long_description_content_type="text/markdown",
author='Ben Dichter',
url='https://github.com/bendichter/brokenaxes',
author_email='ben.dichter@gmail.com',
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Framework :: Matplotlib',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='data visualization',
#packages=find_packages(exclude=['docs']),
py_modules=["brokenaxes"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['matplotlib>3.4'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
# 'dev': ['check-manifest'],
'test': ['pytest'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
|
{
"content_hash": "3c00a2bddc5237a526c655089e7ceb2d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 31.901639344262296,
"alnum_prop": 0.6500513874614594,
"repo_name": "bendichter/brokenaxes",
"id": "a98b46a3270bb6755cc41ae3d9cb41190e88f3d6",
"size": "1946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21131"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('event_mapper', '0029_auto_20150618_1416'),
]
operations = [
migrations.CreateModel(
name='Province',
fields=[
('name', models.CharField(help_text=b'The name of the province or state.', max_length=50, verbose_name=b'')),
('polygon_geometry', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
('id', models.AutoField(serialize=False, primary_key=True)),
('country', models.ForeignKey(to='event_mapper.Country')),
],
options={
'verbose_name_plural': 'Provinces',
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='country',
name='id',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
migrations.AlterField(
model_name='country',
name='name',
field=models.CharField(help_text=b'The name of the country.', max_length=50, verbose_name=b''),
preserve_default=True,
),
]
|
{
"content_hash": "4eb4fbb17d56aeb999dda5b5ce4b0aea",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 125,
"avg_line_length": 34.256410256410255,
"alnum_prop": 0.5643712574850299,
"repo_name": "timlinux/watchkeeper",
"id": "ba66f62830eb85407923b84d29c21bae6e787b3d",
"size": "1360",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "django_project/event_mapper/migrations/0030_auto_20150619_2116.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "10908"
},
{
"name": "HTML",
"bytes": "102053"
},
{
"name": "JavaScript",
"bytes": "240577"
},
{
"name": "Makefile",
"bytes": "12495"
},
{
"name": "Python",
"bytes": "254321"
},
{
"name": "Shell",
"bytes": "4602"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Generated by Django 1.9 on 2016-01-17 00:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sites', '0005_auto_20150823_2041'),
]
operations = [
migrations.RemoveField(
model_name='site',
name='slug',
),
]
|
{
"content_hash": "71aaa19fe5d6e8557b30772cfedd0b7d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 45,
"avg_line_length": 21,
"alnum_prop": 0.6115288220551378,
"repo_name": "AlmostBetterNetwork/podmaster-host",
"id": "633690e89b168c1313f109c376fed034ea6aa4ee",
"size": "423",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sites/migrations/0006_remove_site_slug.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "59857"
},
{
"name": "HTML",
"bytes": "130751"
},
{
"name": "JavaScript",
"bytes": "46479"
},
{
"name": "Python",
"bytes": "200422"
}
],
"symlink_target": ""
}
|
import io
import util
import typedbytes2
|
{
"content_hash": "ca808a2cbab6883046d818c8cf9cbb19",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 18,
"avg_line_length": 10.5,
"alnum_prop": 0.8333333333333334,
"repo_name": "arbenson/mrtsqr",
"id": "2d42304d7e533a871ee2d5634a8697a360f2fe0a",
"size": "924",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dumbo/hyy-python-hadoop/hadoop/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2012"
},
{
"name": "C++",
"bytes": "76142"
},
{
"name": "Java",
"bytes": "21697"
},
{
"name": "Makefile",
"bytes": "1309"
},
{
"name": "Python",
"bytes": "292758"
},
{
"name": "Shell",
"bytes": "13614"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative
from pymavlink import mavutil
from Find_serial_ports import serial_ports
from Use_Sensors import engage_target
import logging
import time
import math
import serial
sensor_port = serial_ports()
ser = serial.Serial(sensor_port[0],57600)
logging.basicConfig(filename='Flight1.log', format = '%(levelname)s:%(asctime)s: %(message)s', level=logging.INFO)
import argparse
parser = argparse.ArgumentParser(description='Control Copter and send commands in GUIDED mode ')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, SITL automatically started and used.")
args = parser.parse_args()
connection_string = args.connect
sitl = None
#Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print 'Connecting to vehicle on: %s' % connection_string
vehicle = connect(connection_string, wait_ready=True)
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print "Basic pre-arm checks"
# Don't let the user try to arm until autopilot is ready
while not vehicle.is_armable:
print " Waiting for vehicle to initialise..."
time.sleep(1)
print "Arming motors"
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed:
print " Waiting for arming..."
time.sleep(1)
print "Taking off!"
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto (otherwise the command
# after Vehicle.simple_takeoff will execute immediately).
while True:
print " Altitude: ", vehicle.location.global_relative_frame.alt
if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.
print "Reached target altitude"
break
time.sleep(1)
def send_ned_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors and
for the specified duration.
This uses the SET_POSITION_TARGET_LOCAL_NED command with a type mask enabling only
velocity components
(http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/#set_position_target_local_ned).
Note that from AC3.3 the message should be re-sent every second (after about 3 seconds
with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified
velocity persists until it is canceled. The code below should work on either version
(sending the message multiple times does not cause problems).
See the above link for information on the type_mask (0=enable, 1=ignore).
At time of writing, acceleration and yaw bits are ignored.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
vehicle.send_mavlink(msg)
# send command to vehicle on 1 Hz cycle
# for x in range(0,duration):
# vehicle.send_mavlink(msg)
# time.sleep(1)
def update_pos():
position_log = vehicle.location.local_frame
heading_log = "vehicle heading: %s" % vehicle.heading
print(vehicle.location.local_frame)
print (heading_log)
logging.info(position_log)
logging.info(heading_log)
def log_Sensors():
log_msg_sensors = str(just_log_data.target_info)
log_header = "x,y,w,h,Lidar,pan_angle,tilt_angle,obs_avoid,dist_target"
logging.info(log_header)
logging.info(log_msg_sensors)
update_pos()
#Arm and take of to altitude of 5 meters
arm_and_takeoff(3)
xyz_coord = str.split(ser.readline(),',')
xyz_coord = xyz_coord[0:9]
just_log_data = engage_target(xyz_coord)
log_Sensors()
time.sleep(1)
for x in range(0,5):
send_ned_velocity(0, 1, 0, 1)
for x in range(0,10):
xyz_coord = str.split(ser.readline(),',')
xyz_coord = xyz_coord[0:9]
update_pos()
#Need to verify port
just_log_data = engage_target(xyz_coord)
log_Sensors()
time.sleep(.1)
for x in range(0,10):
send_ned_velocity(0, 0, 0, 1)
for x in range(0,10):
xyz_coord = str.split(ser.readline(),',')
xyz_coord = xyz_coord[0:9]
update_pos()
#Need to verify port
just_log_data = engage_target(xyz_coord)
log_Sensors()
time.sleep(.1)
print("Setting LAND mode...")
vehicle.mode = VehicleMode("LAND")
#Close vehicle object before exiting script
print "Close vehicle object"
vehicle.close()
# Shut down simulator if it was started.
if sitl is not None:
sitl.stop()
print("Completed")
|
{
"content_hash": "561b798dce4a1c6e856598b109fadc17",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 115,
"avg_line_length": 32.05780346820809,
"alnum_prop": 0.6601153984853949,
"repo_name": "sassystacks/DASS",
"id": "1bacb09d3a9ce3fe4a0f517867638dc1d2476db1",
"size": "5546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Simulation/Flight1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "46963"
},
{
"name": "Python",
"bytes": "165470"
}
],
"symlink_target": ""
}
|
''' Parameter settings for SFselect training.
Models of the scaled SFS for classifying neutral evolution from selective sweeps.
'''
####################################################################################
#################################### General #######################################
####################################################################################
# prefix of path to simulation data dirs
sim_dir_pref = "/home/rronen/Documents/selection/data/sim_500_2.4e-07"
# first & last simulation, used by reader to construct input file names
first_sim, last_sim = 0, 500
# case & control SFS vector types
case_type = "case_xpSFS" # can be: case, case_xpSFS
cont_type = "cont_xpSFS" # can be: cont1, cont_xpSFS
# switch for learning from demographic simulations
demographic = False
# default name for data (SFS vectors) file
data_file = "sfs_vectors.pck"
# treatment of fixed mutations
ignore_xp_fixed = True # ignore fixed SNPs if fixed in both populations, otherwise keep
ignore_all_fixed = False # ignore fixed SNPs, used for strict theta purposes
# maximal number of frequency bins for binned SFS. This is exact number of bins
# unless there are fewer haplotypes in the input sample, which shouldn't happen
max_bins = 10 # default: 10 (also tried 8,16,20)
max_bins_case = 7 # default: 7 (also tried 6, 9,11)
max_bins_cont = 7 # default: 7 (also tried 6, 9,11)
# times points post selection
times = [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500]
times += [i for i in range(600,4001,100)]
# selection coefficients
selection = [0.005, 0.01, 0.02, 0.04, 0.08]
# starting frequency of beneficial allele
# ignore, here for complience with soft sweep file names
start_f = [0.0]
# cross-validation fold
K = 20
# classification error term
c_grid = [0.1] # [0.01, 0.1, 1.0]
# effective pop size N_e (haplotypes)
N = 2000
# site frequency folding, as in take min(f, 1-f)
# WARNING: not properly tested, use with caution
fold_freq = False
####################################################################################
################################## for soft sweep ##################################
####################################################################################
# sim_dir_pref = "../data/sim_soft_500_2.4e-07" # soft sweep
# selection = [0.05]
# start_f = [0.5] # 0.0, 0.1, 0.2, 0.3, 0.4, 0.5
# c_grid = [1.0]
####################################################################################
############################ for demographic scenario ##############################
####################################################################################
#times = [60,80,100,150,200]
#selection = [0.20]
#sim_dir_pref = "sim_500_demographic"
#demographic = True
####################################################################################
############################ for mean scaled SFS plots #############################
####################################################################################
#selection = [0.08]
#times = [150, 250, 1000,2000]
#max_bins = 130
|
{
"content_hash": "62e87aca0421e1f1f60490237ad68862",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 89,
"avg_line_length": 37.28915662650602,
"alnum_prop": 0.4849757673667205,
"repo_name": "rronen/SFselect",
"id": "d2d2e1436a62df73438e070dfbb94c9887396306",
"size": "3096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70434"
}
],
"symlink_target": ""
}
|
"""Tests for nn module."""
import collections
from functools import partial
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import scipy.stats
from jax import core
from jax._src import test_util as jtu
from jax.test_util import check_grads
from jax import nn
from jax import random
import jax
import jax.numpy as jnp
from jax.config import config
config.parse_flags_with_absl()
class NNFunctionsTest(jtu.JaxTestCase):
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testSoftplusGrad(self):
check_grads(nn.softplus, (1e-8,), order=4,
rtol=1e-2 if jtu.device_under_test() == "tpu" else None)
def testSoftplusGradZero(self):
check_grads(nn.softplus, (0.,), order=1,
rtol=1e-2 if jtu.device_under_test() == "tpu" else None)
def testSoftplusGradInf(self):
self.assertAllClose(
1., jax.grad(nn.softplus)(float('inf')))
def testSoftplusGradNegInf(self):
check_grads(nn.softplus, (-float('inf'),), order=1,
rtol=1e-2 if jtu.device_under_test() == "tpu" else None)
def testSoftplusGradNan(self):
check_grads(nn.softplus, (float('nan'),), order=1,
rtol=1e-2 if jtu.device_under_test() == "tpu" else None)
@parameterized.parameters([int, float] + jtu.dtypes.floating + jtu.dtypes.integer)
def testSoftplusZero(self, dtype):
self.assertEqual(jnp.log(dtype(2)), nn.softplus(dtype(0)))
def testReluGrad(self):
rtol = 1e-2 if jtu.device_under_test() == "tpu" else None
check_grads(nn.relu, (1.,), order=3, rtol=rtol)
check_grads(nn.relu, (-1.,), order=3, rtol=rtol)
jaxpr = jax.make_jaxpr(jax.grad(nn.relu))(0.)
self.assertGreaterEqual(len(jaxpr.jaxpr.eqns), 2)
def testSoftplusValue(self):
val = nn.softplus(89.)
self.assertAllClose(val, 89., check_dtypes=False)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testEluGrad(self):
check_grads(nn.elu, (1e4,), order=4, eps=1.)
def testEluValue(self):
val = nn.elu(1e4)
self.assertAllClose(val, 1e4, check_dtypes=False)
def testGluValue(self):
val = nn.glu(jnp.array([1.0, 0.0]), axis=0)
self.assertAllClose(val, jnp.array([0.5]))
@parameterized.parameters(False, True)
def testGeluIntType(self, approximate):
val_float = nn.gelu(jnp.array(-1.0), approximate=approximate)
val_int = nn.gelu(jnp.array(-1), approximate=approximate)
self.assertAllClose(val_float, val_int)
@parameterized.parameters(False, True)
def testGelu(self, approximate):
def gelu_reference(x):
return x * scipy.stats.norm.cdf(x)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((4, 5, 6), jnp.float32)]
self._CheckAgainstNumpy(
gelu_reference, partial(nn.gelu, approximate=approximate), args_maker,
check_dtypes=False, tol=1e-3 if approximate else None)
@parameterized.parameters(*itertools.product(
(jnp.float32, jnp.bfloat16, jnp.float16),
(partial(nn.gelu, approximate=False),
partial(nn.gelu, approximate=True),
nn.relu, nn.softplus, nn.sigmoid)))
def testDtypeMatchesInput(self, dtype, fn):
x = jnp.zeros((), dtype=dtype)
out = fn(x)
self.assertEqual(out.dtype, dtype)
def testEluMemory(self):
# see https://github.com/google/jax/pull/1640
with jax.enable_checks(False): # With checks we materialize the array
jax.make_jaxpr(lambda: nn.elu(jnp.ones((10 ** 12,)))) # don't oom
def testHardTanhMemory(self):
# see https://github.com/google/jax/pull/1640
with jax.enable_checks(False): # With checks we materialize the array
jax.make_jaxpr(lambda: nn.hard_tanh(jnp.ones((10 ** 12,)))) # don't oom
@parameterized.parameters([nn.softmax, nn.log_softmax])
def testSoftmaxWhereMask(self, fn):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
m = jnp.array([True, False, True, True])
x_filtered = jnp.take(x, jnp.array([0, 2, 3]))
out_masked = jnp.take(
fn(x, where=m, initial=-jnp.inf), jnp.array([0, 2, 3]))
out_filtered = fn(x_filtered)
self.assertAllClose(out_masked, out_filtered)
def testStandardizeWhereMask(self):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
m = jnp.array([True, False, True, True])
x_filtered = jnp.take(x, jnp.array([0, 2, 3]))
out_masked = jnp.take(nn.standardize(x, where=m), jnp.array([0, 2, 3]))
out_filtered = nn.standardize(x_filtered)
self.assertAllClose(out_masked, out_filtered)
def testOneHot(self):
actual = nn.one_hot(jnp.array([0, 1, 2]), 3)
expected = jnp.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
self.assertAllClose(actual, expected)
actual = nn.one_hot(jnp.array([1, 2, 0]), 3)
expected = jnp.array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]])
self.assertAllClose(actual, expected)
def testOneHotOutOfBound(self):
actual = nn.one_hot(jnp.array([-1, 3]), 3)
expected = jnp.array([[0., 0., 0.],
[0., 0., 0.]])
self.assertAllClose(actual, expected)
def testOneHotNonArrayInput(self):
actual = nn.one_hot([0, 1, 2], 3)
expected = jnp.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
self.assertAllClose(actual, expected)
def testOneHotCustomDtype(self):
actual = nn.one_hot(jnp.array([0, 1, 2]), 3, dtype=jnp.bool_)
expected = jnp.array([[True, False, False],
[False, True, False],
[False, False, True]])
self.assertAllClose(actual, expected)
def testOneHotConcretizationError(self):
# https://github.com/google/jax/issues/3654
msg = r"in jax.nn.one_hot argument `num_classes`"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
jax.jit(nn.one_hot)(3, 5)
def testOneHotAxis(self):
expected = jnp.array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]]).T
actual = nn.one_hot(jnp.array([1, 2, 0]), 3, axis=0)
self.assertAllClose(actual, expected)
actual = nn.one_hot(jnp.array([1, 2, 0]), 3, axis=-2)
self.assertAllClose(actual, expected)
def testTanhExists(self):
nn.tanh # doesn't crash
def testCustomJVPLeak(self):
# https://github.com/google/jax/issues/8171
@jax.jit
def fwd():
a = jnp.array(1.)
def f(hx, _):
hx = jax.nn.sigmoid(hx + a)
return hx, None
hx = jnp.array(0.)
jax.lax.scan(f, hx, None, length=2)
with jax.checking_leaks():
fwd() # doesn't crash
def testCustomJVPLeak2(self):
# https://github.com/google/jax/issues/8171
# The above test uses jax.nn.sigmoid, as in the original #8171, but that
# function no longer actually has a custom_jvp! So we inline the old def.
@jax.custom_jvp
def sigmoid(x):
one = jnp.float32(1)
return jax.lax.div(one, jax.lax.add(one, jax.lax.exp(jax.lax.neg(x))))
sigmoid.defjvps(lambda g, ans, x: g * ans * (jnp.float32(1) - ans))
@jax.jit
def fwd():
a = jnp.array(1., 'float32')
def f(hx, _):
hx = sigmoid(hx + a)
return hx, None
hx = jnp.array(0., 'float32')
jax.lax.scan(f, hx, None, length=2)
with jax.checking_leaks():
fwd() # doesn't crash
InitializerRecord = collections.namedtuple(
"InitializerRecord",
["name", "initializer", "shapes", "dtypes"])
ALL_SHAPES = [(2,), (2, 2), (2, 3), (3, 2), (2, 3, 4), (4, 3, 2), (2, 3, 4, 5)]
def initializer_record(name, initializer, dtypes, min_dims=2, max_dims=4):
shapes = [shape for shape in ALL_SHAPES
if min_dims <= len(shape) <= max_dims]
return InitializerRecord(name, initializer, shapes, dtypes)
INITIALIZER_RECS = [
initializer_record("uniform", nn.initializers.uniform, jtu.dtypes.floating, 1),
initializer_record("normal", nn.initializers.normal, jtu.dtypes.inexact, 1),
initializer_record("he_normal", nn.initializers.he_normal, jtu.dtypes.inexact),
initializer_record("he_uniform", nn.initializers.he_uniform, jtu.dtypes.inexact),
initializer_record("glorot_normal", nn.initializers.glorot_normal, jtu.dtypes.inexact),
initializer_record("glorot_uniform", nn.initializers.glorot_uniform, jtu.dtypes.inexact),
initializer_record("lecun_normal", nn.initializers.lecun_normal, jtu.dtypes.inexact),
initializer_record("lecun_uniform", nn.initializers.lecun_uniform, jtu.dtypes.inexact),
initializer_record("orthogonal", nn.initializers.orthogonal, jtu.dtypes.floating, 2, 2),
initializer_record("delta_orthogonal", nn.initializers.delta_orthogonal, jtu.dtypes.floating, 4, 4)
]
class NNInitializersTest(jtu.JaxTestCase):
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(initializer=rec.initializer())],
shape=rec.shapes,
dtype=rec.dtypes
)
for rec in INITIALIZER_RECS
))
def testInitializer(self, initializer, shape, dtype):
rng = random.PRNGKey(0)
val = initializer(rng, shape, dtype)
self.assertEqual(shape, jnp.shape(val))
self.assertEqual(jax.dtypes.canonicalize_dtype(dtype), jnp.dtype(val))
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(initializer_provider=rec.initializer)],
shape=rec.shapes,
dtype=rec.dtypes
)
for rec in INITIALIZER_RECS
))
def testInitializerProvider(self, initializer_provider, shape, dtype):
rng = random.PRNGKey(0)
initializer = initializer_provider(dtype=dtype)
val = initializer(rng, shape)
self.assertEqual(shape, jnp.shape(val))
self.assertEqual(jax.dtypes.canonicalize_dtype(dtype), jnp.dtype(val))
def testVarianceScalingMultiAxis(self):
rng = random.PRNGKey(0)
shape = (2, 3, 4, 5)
initializer = nn.initializers.variance_scaling(
scale=1.0, mode='fan_avg', distribution='truncated_normal',
in_axis=(0, 1), out_axis=(-2, -1))
val = initializer(rng, shape)
self.assertEqual(shape, jnp.shape(val))
def testVarianceScalingBatchAxis(self):
rng = random.PRNGKey(0)
shape = (2, 3, 4, 5)
initializer = nn.initializers.variance_scaling(
scale=1.0, mode='fan_avg', distribution='truncated_normal',
in_axis=0, out_axis=(2, 3), batch_axis=1)
val = initializer(rng, shape)
self.assertEqual(shape, jnp.shape(val))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
{
"content_hash": "599236cfcd23ed75d5f21319d3e757ba",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 103,
"avg_line_length": 34.48196721311476,
"alnum_prop": 0.640106494247409,
"repo_name": "google/jax",
"id": "1d65eaa39e2c9f724f242d14d218dd704f1f64f7",
"size": "11099",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/nn_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "25710"
},
{
"name": "C++",
"bytes": "233622"
},
{
"name": "Dockerfile",
"bytes": "1514"
},
{
"name": "Jupyter Notebook",
"bytes": "98807"
},
{
"name": "Python",
"bytes": "7395044"
},
{
"name": "Shell",
"bytes": "17273"
},
{
"name": "Starlark",
"bytes": "88279"
}
],
"symlink_target": ""
}
|
import unittest
from urllib import unquote
import cStringIO as StringIO
from logging.handlers import SysLogHandler
import mock
from test.unit import FakeLogger
from swift.common.utils import get_logger
from swift.common.middleware import proxy_logging
from swift.common.swob import Request
class FakeApp(object):
def __init__(self, body=['FAKE APP'], response_str='200 OK'):
self.body = body
self.response_str = response_str
def __call__(self, env, start_response):
start_response(self.response_str,
[('Content-Type', 'text/plain'),
('Content-Length', str(sum(map(len, self.body))))])
while env['wsgi.input'].read(5):
pass
return self.body
class FakeAppThatExcepts(object):
def __call__(self, env, start_response):
raise Exception("We take exception to that!")
class FakeAppNoContentLengthNoTransferEncoding(object):
def __init__(self, body=['FAKE APP']):
self.body = body
def __call__(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
while env['wsgi.input'].read(5):
pass
return self.body
class FileLikeExceptor(object):
def __init__(self):
pass
def read(self, len):
raise IOError('of some sort')
def readline(self, len=1024):
raise IOError('of some sort')
class FakeAppReadline(object):
def __call__(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain'),
('Content-Length', '8')])
env['wsgi.input'].readline()
return ["FAKE APP"]
def start_response(*args):
pass
class TestProxyLogging(unittest.TestCase):
def _log_parts(self, app, should_be_empty=False):
info_calls = app.access_logger.log_dict['info']
if should_be_empty:
self.assertEquals([], info_calls)
else:
self.assertEquals(1, len(info_calls))
return info_calls[0][0][0].split(' ')
def assertTiming(self, exp_metric, app, exp_timing=None):
timing_calls = app.access_logger.log_dict['timing']
found = False
for timing_call in timing_calls:
self.assertEquals({}, timing_call[1])
self.assertEquals(2, len(timing_call[0]))
if timing_call[0][0] == exp_metric:
found = True
if exp_timing is not None:
self.assertAlmostEqual(exp_timing, timing_call[0][1],
places=4)
if not found:
self.assertTrue(False, 'assertTiming: %s not found in %r' % (
exp_metric, timing_calls))
def assertTimingSince(self, exp_metric, app, exp_start=None):
timing_calls = app.access_logger.log_dict['timing_since']
found = False
for timing_call in timing_calls:
self.assertEquals({}, timing_call[1])
self.assertEquals(2, len(timing_call[0]))
if timing_call[0][0] == exp_metric:
found = True
if exp_start is not None:
self.assertAlmostEqual(exp_start, timing_call[0][1],
places=4)
if not found:
self.assertTrue(False, 'assertTimingSince: %s not found in %r' % (
exp_metric, timing_calls))
def assertNotTiming(self, not_exp_metric, app):
timing_calls = app.access_logger.log_dict['timing']
for timing_call in timing_calls:
self.assertNotEqual(not_exp_metric, timing_call[0][0])
def assertUpdateStats(self, exp_metric, exp_bytes, app):
update_stats_calls = app.access_logger.log_dict['update_stats']
self.assertEquals(1, len(update_stats_calls))
self.assertEquals({}, update_stats_calls[0][1])
self.assertEquals((exp_metric, exp_bytes), update_stats_calls[0][0])
def test_log_request_statsd_invalid_stats_types(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
for url in ['/', '/foo', '/foo/bar', '/v1']:
req = Request.blank(url, environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# get body
''.join(resp)
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([], app.access_logger.log_dict['update_stats'])
def test_log_request_stat_type_bad(self):
for bad_path in ['', '/', '/bad', '/baddy/mc_badderson', '/v1',
'/v1/']:
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank(bad_path, environ={'REQUEST_METHOD': 'GET'})
now = 10000.0
app.log_request(req, 123, 7, 13, now, now + 2.71828182846)
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([], app.access_logger.log_dict['update_stats'])
def test_log_request_stat_type_good(self):
"""
log_request() should send timing and byte-count counters for GET
requests. Also, __call__()'s iter_response() function should
statsd-log time to first byte (calling the passed-in start_response
function), but only for GET requests.
"""
stub_times = []
def stub_time():
return stub_times.pop(0)
path_types = {
'/v1/a': 'account',
'/v1/a/': 'account',
'/v1/a/c': 'container',
'/v1/a/c/': 'container',
'/v1/a/c/o': 'object',
'/v1/a/c/o/': 'object',
'/v1/a/c/o/p': 'object',
'/v1/a/c/o/p/': 'object',
'/v1/a/c/o/p/p2': 'object',
}
with mock.patch("time.time", stub_time):
for path, exp_type in path_types.iteritems():
# GET
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='7654321', response_str='321 Fubar'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'GET',
'wsgi.input': StringIO.StringIO('4321')})
stub_times = [18.0, 20.71828182846]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('7654321', ''.join(iter_response))
self.assertTiming('%s.GET.321.timing' % exp_type, app,
exp_timing=2.71828182846 * 1000)
self.assertTimingSince(
'%s.GET.321.first-byte.timing' % exp_type, app,
exp_start=18.0)
self.assertUpdateStats('%s.GET.321.xfer' % exp_type,
4 + 7, app)
# GET with swift.proxy_access_log_made already set
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='7654321', response_str='321 Fubar'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'GET',
'swift.proxy_access_log_made': True,
'wsgi.input': StringIO.StringIO('4321')})
stub_times = [18.0, 20.71828182846]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('7654321', ''.join(iter_response))
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([],
app.access_logger.log_dict['timing_since'])
self.assertEqual([],
app.access_logger.log_dict['update_stats'])
# PUT (no first-byte timing!)
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='87654321', response_str='314 PiTown'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'PUT',
'wsgi.input': StringIO.StringIO('654321')})
# (it's not a GET, so time() doesn't have a 2nd call)
stub_times = [58.2, 58.2 + 7.3321]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('87654321', ''.join(iter_response))
self.assertTiming('%s.PUT.314.timing' % exp_type, app,
exp_timing=7.3321 * 1000)
self.assertNotTiming(
'%s.GET.314.first-byte.timing' % exp_type, app)
self.assertNotTiming(
'%s.PUT.314.first-byte.timing' % exp_type, app)
self.assertUpdateStats(
'%s.PUT.314.xfer' % exp_type, 6 + 8, app)
def test_log_request_stat_method_filtering_default(self):
method_map = {
'foo': 'BAD_METHOD',
'': 'BAD_METHOD',
'PUTT': 'BAD_METHOD',
'SPECIAL': 'BAD_METHOD',
'GET': 'GET',
'PUT': 'PUT',
'COPY': 'COPY',
'HEAD': 'HEAD',
'POST': 'POST',
'DELETE': 'DELETE',
'OPTIONS': 'OPTIONS',
}
for method, exp_method in method_map.iteritems():
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/v1/a/', environ={'REQUEST_METHOD': method})
now = 10000.0
app.log_request(req, 299, 11, 3, now, now + 1.17)
self.assertTiming('account.%s.299.timing' % exp_method, app,
exp_timing=1.17 * 1000)
self.assertUpdateStats('account.%s.299.xfer' % exp_method,
11 + 3, app)
def test_log_request_stat_method_filtering_custom(self):
method_map = {
'foo': 'BAD_METHOD',
'': 'BAD_METHOD',
'PUTT': 'BAD_METHOD',
'SPECIAL': 'SPECIAL', # will be configured
'GET': 'GET',
'PUT': 'PUT',
'COPY': 'BAD_METHOD', # prove no one's special
}
# this conf var supports optional leading access_
for conf_key in ['access_log_statsd_valid_http_methods',
'log_statsd_valid_http_methods']:
for method, exp_method in method_map.iteritems():
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
conf_key: 'SPECIAL, GET,PUT ', # crazy spaces ok
})
app.access_logger = FakeLogger()
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': method})
now = 10000.0
app.log_request(req, 911, 4, 43, now, now + 1.01)
self.assertTiming('container.%s.911.timing' % exp_method, app,
exp_timing=1.01 * 1000)
self.assertUpdateStats('container.%s.911.xfer' % exp_method,
4 + 43, app)
def test_basic_req(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_basic_req_second_time(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'swift.proxy_access_log_made': True,
'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
self._log_parts(app, should_be_empty=True)
self.assertEquals(resp_body, 'FAKE APP')
def test_multi_segment_resp(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(
['some', 'chunks', 'of data']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.source': 'SOS'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'somechunksof data')
self.assertEquals(log_parts[11], str(len(resp_body)))
self.assertUpdateStats('SOS.GET.200.xfer', len(resp_body), app)
def test_log_headers(self):
for conf_key in ['access_log_headers', 'log_headers']:
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(),
{conf_key: 'yes'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
headers = unquote(log_parts[14]).split('\n')
self.assert_('Host: localhost:80' in headers)
def test_access_log_headers_only(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(), {'log_headers': 'yes',
'access_log_headers_only': 'FIRST, seCond'})
app.access_logger = FakeLogger()
req = Request.blank('/',
environ={'REQUEST_METHOD': 'GET'},
headers={'First': '1',
'Second': '2',
'Third': '3'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
headers = unquote(log_parts[14]).split('\n')
self.assert_('First: 1' in headers)
self.assert_('Second: 2' in headers)
self.assert_('Third: 3' not in headers)
self.assert_('Host: localhost:80' not in headers)
def test_upload_size(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(),
{'log_headers': 'yes'})
app.access_logger = FakeLogger()
req = Request.blank(
'/v1/a/c/o/foo',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': StringIO.StringIO('some stuff')})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[11], str(len('FAKE APP')))
self.assertEquals(log_parts[10], str(len('some stuff')))
self.assertUpdateStats('object.PUT.200.xfer',
len('some stuff') + len('FAKE APP'),
app)
def test_upload_line(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeAppReadline(),
{'log_headers': 'yes'})
app.access_logger = FakeLogger()
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO.StringIO(
'some stuff\nsome other stuff\n')})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[11], str(len('FAKE APP')))
self.assertEquals(log_parts[10], str(len('some stuff\n')))
self.assertUpdateStats('container.POST.200.xfer',
len('some stuff\n') + len('FAKE APP'),
app)
def test_log_query_string(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'x=3'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(unquote(log_parts[4]), '/?x=3')
def test_client_logging(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '1.2.3.4') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
def test_proxy_client_logging(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_FORWARDED_FOR': '4.5.6.7,8.9.10.11'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '4.5.6.7') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_CLUSTER_CLIENT_IP': '4.5.6.7'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '4.5.6.7') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
def test_facility(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(),
{'log_headers': 'yes',
'access_log_facility': 'LOG_LOCAL7'})
handler = get_logger.handler4logger[app.access_logger.logger]
self.assertEquals(SysLogHandler.LOG_LOCAL7, handler.facility)
def test_filter(self):
factory = proxy_logging.filter_factory({})
self.assert_(callable(factory))
self.assert_(callable(factory(FakeApp())))
def test_unread_body(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(['some', 'stuff']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# read first chunk
next(resp)
resp.close() # raise a GeneratorExit in middleware app_iter loop
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[11], '4') # write length
def test_disconnect_on_readline(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeAppReadline(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'wsgi.input': FileLikeExceptor()})
try:
resp = app(req.environ, start_response)
# read body
''.join(resp)
except IOError:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[10], '-') # read length
def test_disconnect_on_read(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(['some', 'stuff']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'wsgi.input': FileLikeExceptor()})
try:
resp = app(req.environ, start_response)
# read body
''.join(resp)
except IOError:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[10], '-') # read length
def test_app_exception(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppThatExcepts(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
try:
app(req.environ, start_response)
except Exception:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '500')
self.assertEquals(log_parts[10], '-') # read length
def test_no_content_length_no_transfer_encoding_with_list_body(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
# test the "while not chunk: chunk = iterator.next()"
body=['', '', 'line1\n', 'line2\n'],
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'line1\nline2\n')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_no_content_length_no_transfer_encoding_with_empty_strings(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
# test the "while not chunk: chunk = iterator.next()"
body=['', '', ''],
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, '')
self.assertEquals(log_parts[11], '-')
def test_no_content_length_no_transfer_encoding_with_generator(self):
class BodyGen(object):
def __init__(self, data):
self.data = data
def __iter__(self):
yield self.data
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
body=BodyGen('abc'),
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'abc')
self.assertEquals(log_parts[11], '3')
def test_req_path_info_popping(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/v1/something', environ={'REQUEST_METHOD': 'GET'})
req.path_info_pop()
self.assertEquals(req.environ['PATH_INFO'], '/something')
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/v1/something')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_ipv6(self):
ipv6addr = '2001:db8:85a3:8d3:1319:8a2e:370:7348'
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.remote_addr = ipv6addr
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], ipv6addr)
self.assertEquals(log_parts[1], ipv6addr)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_log_info_none(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = []
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], '-')
def test_log_info_single(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = ['one']
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], 'one')
def test_log_info_multiple(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = ['one', 'and two']
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], 'one%2Cand%20two')
def test_log_auth_token(self):
auth_token = 'b05bf940-0464-4c0e-8c70-87717d2d73e8'
# Default - no reveal_sensitive_prefix in config
# No x-auth-token header
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
# Has x-auth-token header
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], auth_token)
# Truncate to first 8 characters
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '8'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '8'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], 'b05bf940...')
# Token length and reveal_sensitive_prefix are same (no truncate)
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': str(len(auth_token))})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], auth_token)
# Don't log x-auth-token
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '0'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '0'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '...')
# Avoids pyflakes error, "local variable 'resp_body' is assigned to
# but never used
self.assertTrue(resp_body is not None)
def test_ensure_fields(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
with mock.patch('time.time',
mock.MagicMock(
side_effect=[10000000.0, 10000001.0])):
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(len(log_parts), 20)
self.assertEquals(log_parts[0], '-')
self.assertEquals(log_parts[1], '-')
self.assertEquals(log_parts[2], '26/Apr/1970/17/46/41')
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(log_parts[7], '-')
self.assertEquals(log_parts[8], '-')
self.assertEquals(log_parts[9], '-')
self.assertEquals(log_parts[10], '-')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
self.assertEquals(log_parts[12], '-')
self.assertEquals(log_parts[13], '-')
self.assertEquals(log_parts[14], '-')
self.assertEquals(log_parts[15], '1.0000')
self.assertEquals(log_parts[16], '-')
self.assertEquals(log_parts[17], '-')
self.assertEquals(log_parts[18], '10000000.000000000')
self.assertEquals(log_parts[19], '10000001.000000000')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e143ca7f2aa976735ffb0ab2631c704d",
"timestamp": "",
"source": "github",
"line_count": 747,
"max_line_length": 79,
"avg_line_length": 43.103078982597054,
"alnum_prop": 0.5485744456177403,
"repo_name": "xiaoguoai/ec-dev-swift",
"id": "7e0744a4be779dde73b9a20c691019e19f3daf6b",
"size": "32793",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "test/unit/common/middleware/test_proxy_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "3816353"
},
{
"name": "Shell",
"bytes": "2933"
}
],
"symlink_target": ""
}
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
# One round of beers on me if someone finds out why the backslash
# is needed in the Attributes section so as not to upset sphinx.
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implements fit and transform methods.
The final estimator needs only implements fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps a the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
def predict_log_proba(self, X):
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform."""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
def inverse_transform(self, X):
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
|
{
"content_hash": "e7cc476ed5ad472ff2ef38c6bd477038",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 89,
"avg_line_length": 38.05324074074074,
"alnum_prop": 0.6005231461767747,
"repo_name": "treycausey/scikit-learn",
"id": "8c6483c8ac14d59a58f207b2d4469902c1e0b973",
"size": "16439",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sklearn/pipeline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18150950"
},
{
"name": "C++",
"bytes": "1807769"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Python",
"bytes": "5083789"
},
{
"name": "Shell",
"bytes": "3768"
}
],
"symlink_target": ""
}
|
from functools import total_ordering
import itertools
import re
all_modules = []
@total_ordering
class Module(object):
"""
A module is the basic abstraction in our test runner script. Each module consists of a set
of source files, a set of test commands, and a set of dependencies on other modules. We use
modules to define a dependency graph that let us determine which tests to run based on which
files have changed.
"""
def __init__(self, name, dependencies, source_file_regexes, build_profile_flags=(), environ={},
sbt_test_goals=(), python_test_goals=(), blacklisted_python_implementations=(),
test_tags=(), should_run_r_tests=False, should_run_build_tests=False):
"""
Define a new module.
:param name: A short module name, for display in logging and error messages.
:param dependencies: A set of dependencies for this module. This should only include direct
dependencies; transitive dependencies are resolved automatically.
:param source_file_regexes: a set of regexes that match source files belonging to this
module. These regexes are applied by attempting to match at the beginning of the
filename strings.
:param build_profile_flags: A set of profile flags that should be passed to Maven or SBT in
order to build and test this module (e.g. '-PprofileName').
:param environ: A dict of environment variables that should be set when files in this
module are changed.
:param sbt_test_goals: A set of SBT test goals for testing this module.
:param python_test_goals: A set of Python test goals for testing this module.
:param blacklisted_python_implementations: A set of Python implementations that are not
supported by this module's Python components. The values in this set should match
strings returned by Python's `platform.python_implementation()`.
:param test_tags A set of tags that will be excluded when running unit tests if the module
is not explicitly changed.
:param should_run_r_tests: If true, changes in this module will trigger all R tests.
:param should_run_build_tests: If true, changes in this module will trigger build tests.
"""
self.name = name
self.dependencies = dependencies
self.source_file_prefixes = source_file_regexes
self.sbt_test_goals = sbt_test_goals
self.build_profile_flags = build_profile_flags
self.environ = environ
self.python_test_goals = python_test_goals
self.blacklisted_python_implementations = blacklisted_python_implementations
self.test_tags = test_tags
self.should_run_r_tests = should_run_r_tests
self.should_run_build_tests = should_run_build_tests
self.dependent_modules = set()
for dep in dependencies:
dep.dependent_modules.add(self)
all_modules.append(self)
def contains_file(self, filename):
return any(re.match(p, filename) for p in self.source_file_prefixes)
def __repr__(self):
return "Module<%s>" % self.name
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not (self.name == other.name)
def __hash__(self):
return hash(self.name)
tags = Module(
name="tags",
dependencies=[],
source_file_regexes=[
"common/tags/",
]
)
catalyst = Module(
name="catalyst",
dependencies=[tags],
source_file_regexes=[
"sql/catalyst/",
],
sbt_test_goals=[
"catalyst/test",
],
)
sql = Module(
name="sql",
dependencies=[catalyst],
source_file_regexes=[
"sql/core/",
],
sbt_test_goals=[
"sql/test",
],
)
hive = Module(
name="hive",
dependencies=[sql],
source_file_regexes=[
"sql/hive/",
"bin/spark-sql",
],
build_profile_flags=[
"-Phive",
],
sbt_test_goals=[
"hive/test",
],
test_tags=[
"org.apache.spark.tags.ExtendedHiveTest"
]
)
repl = Module(
name="repl",
dependencies=[hive],
source_file_regexes=[
"repl/",
],
sbt_test_goals=[
"repl/test",
],
)
hive_thriftserver = Module(
name="hive-thriftserver",
dependencies=[hive],
source_file_regexes=[
"sql/hive-thriftserver",
"sbin/start-thriftserver.sh",
],
build_profile_flags=[
"-Phive-thriftserver",
],
sbt_test_goals=[
"hive-thriftserver/test",
]
)
avro = Module(
name="avro",
dependencies=[sql],
source_file_regexes=[
"external/avro",
],
sbt_test_goals=[
"avro/test",
]
)
sql_kafka = Module(
name="sql-kafka-0-10",
dependencies=[sql],
source_file_regexes=[
"external/kafka-0-10-sql",
],
sbt_test_goals=[
"sql-kafka-0-10/test",
]
)
sketch = Module(
name="sketch",
dependencies=[tags],
source_file_regexes=[
"common/sketch/",
],
sbt_test_goals=[
"sketch/test"
]
)
graphx = Module(
name="graphx",
dependencies=[tags],
source_file_regexes=[
"graphx/",
],
sbt_test_goals=[
"graphx/test"
]
)
streaming = Module(
name="streaming",
dependencies=[tags],
source_file_regexes=[
"streaming",
],
sbt_test_goals=[
"streaming/test",
]
)
# Don't set the dependencies because changes in other modules should not trigger Kinesis tests.
# Kinesis tests depends on external Amazon kinesis service. We should run these tests only when
# files in streaming_kinesis_asl are changed, so that if Kinesis experiences an outage, we don't
# fail other PRs.
streaming_kinesis_asl = Module(
name="streaming-kinesis-asl",
dependencies=[tags],
source_file_regexes=[
"external/kinesis-asl/",
"external/kinesis-asl-assembly/",
],
build_profile_flags=[
"-Pkinesis-asl",
],
environ={
"ENABLE_KINESIS_TESTS": "1"
},
sbt_test_goals=[
"streaming-kinesis-asl/test",
]
)
streaming_kafka = Module(
name="streaming-kafka-0-8",
dependencies=[streaming],
source_file_regexes=[
"external/kafka-0-8",
"external/kafka-0-8-assembly",
],
build_profile_flags=[
"-Pkafka-0-8",
],
environ={
"ENABLE_KAFKA_0_8_TESTS": "1"
},
sbt_test_goals=[
"streaming-kafka-0-8/test",
]
)
streaming_kafka_0_10 = Module(
name="streaming-kafka-0-10",
dependencies=[streaming],
source_file_regexes=[
# The ending "/" is necessary otherwise it will include "sql-kafka" codes
"external/kafka-0-10/",
"external/kafka-0-10-assembly",
],
sbt_test_goals=[
"streaming-kafka-0-10/test",
]
)
streaming_flume_sink = Module(
name="streaming-flume-sink",
dependencies=[streaming],
source_file_regexes=[
"external/flume-sink",
],
build_profile_flags=[
"-Pflume",
],
environ={
"ENABLE_FLUME_TESTS": "1"
},
sbt_test_goals=[
"streaming-flume-sink/test",
]
)
streaming_flume = Module(
name="streaming-flume",
dependencies=[streaming],
source_file_regexes=[
"external/flume",
],
build_profile_flags=[
"-Pflume",
],
environ={
"ENABLE_FLUME_TESTS": "1"
},
sbt_test_goals=[
"streaming-flume/test",
]
)
streaming_flume_assembly = Module(
name="streaming-flume-assembly",
dependencies=[streaming_flume, streaming_flume_sink],
source_file_regexes=[
"external/flume-assembly",
],
build_profile_flags=[
"-Pflume",
],
environ={
"ENABLE_FLUME_TESTS": "1"
}
)
mllib_local = Module(
name="mllib-local",
dependencies=[tags],
source_file_regexes=[
"mllib-local",
],
sbt_test_goals=[
"mllib-local/test",
]
)
mllib = Module(
name="mllib",
dependencies=[mllib_local, streaming, sql],
source_file_regexes=[
"data/mllib/",
"mllib/",
],
sbt_test_goals=[
"mllib/test",
]
)
examples = Module(
name="examples",
dependencies=[graphx, mllib, streaming, hive],
source_file_regexes=[
"examples/",
],
sbt_test_goals=[
"examples/test",
]
)
pyspark_core = Module(
name="pyspark-core",
dependencies=[],
source_file_regexes=[
"python/(?!pyspark/(ml|mllib|sql|streaming))"
],
python_test_goals=[
"pyspark.rdd",
"pyspark.context",
"pyspark.conf",
"pyspark.broadcast",
"pyspark.accumulators",
"pyspark.serializers",
"pyspark.profiler",
"pyspark.shuffle",
"pyspark.tests",
"pyspark.util",
]
)
pyspark_sql = Module(
name="pyspark-sql",
dependencies=[pyspark_core, hive],
source_file_regexes=[
"python/pyspark/sql"
],
python_test_goals=[
"pyspark.sql.types",
"pyspark.sql.context",
"pyspark.sql.session",
"pyspark.sql.conf",
"pyspark.sql.catalog",
"pyspark.sql.column",
"pyspark.sql.dataframe",
"pyspark.sql.group",
"pyspark.sql.functions",
"pyspark.sql.readwriter",
"pyspark.sql.streaming",
"pyspark.sql.udf",
"pyspark.sql.window",
"pyspark.sql.tests",
]
)
pyspark_streaming = Module(
name="pyspark-streaming",
dependencies=[
pyspark_core,
streaming,
streaming_kafka,
streaming_flume_assembly,
streaming_kinesis_asl
],
source_file_regexes=[
"python/pyspark/streaming"
],
environ={
"ENABLE_FLUME_TESTS": "1",
"ENABLE_KAFKA_0_8_TESTS": "1"
},
python_test_goals=[
"pyspark.streaming.util",
"pyspark.streaming.tests",
]
)
pyspark_mllib = Module(
name="pyspark-mllib",
dependencies=[pyspark_core, pyspark_streaming, pyspark_sql, mllib],
source_file_regexes=[
"python/pyspark/mllib"
],
python_test_goals=[
"pyspark.mllib.classification",
"pyspark.mllib.clustering",
"pyspark.mllib.evaluation",
"pyspark.mllib.feature",
"pyspark.mllib.fpm",
"pyspark.mllib.linalg.__init__",
"pyspark.mllib.linalg.distributed",
"pyspark.mllib.random",
"pyspark.mllib.recommendation",
"pyspark.mllib.regression",
"pyspark.mllib.stat._statistics",
"pyspark.mllib.stat.KernelDensity",
"pyspark.mllib.tree",
"pyspark.mllib.util",
"pyspark.mllib.tests",
],
blacklisted_python_implementations=[
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there
]
)
pyspark_ml = Module(
name="pyspark-ml",
dependencies=[pyspark_core, pyspark_mllib],
source_file_regexes=[
"python/pyspark/ml/"
],
python_test_goals=[
"pyspark.ml.classification",
"pyspark.ml.clustering",
"pyspark.ml.evaluation",
"pyspark.ml.feature",
"pyspark.ml.fpm",
"pyspark.ml.image",
"pyspark.ml.linalg.__init__",
"pyspark.ml.recommendation",
"pyspark.ml.regression",
"pyspark.ml.stat",
"pyspark.ml.tuning",
"pyspark.ml.tests",
],
blacklisted_python_implementations=[
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there
]
)
sparkr = Module(
name="sparkr",
dependencies=[hive, mllib],
source_file_regexes=[
"R/",
],
should_run_r_tests=True
)
docs = Module(
name="docs",
dependencies=[],
source_file_regexes=[
"docs/",
]
)
build = Module(
name="build",
dependencies=[],
source_file_regexes=[
".*pom.xml",
"dev/test-dependencies.sh",
],
should_run_build_tests=True
)
yarn = Module(
name="yarn",
dependencies=[],
source_file_regexes=[
"resource-managers/yarn/",
"common/network-yarn/",
],
build_profile_flags=["-Pyarn"],
sbt_test_goals=[
"yarn/test",
"network-yarn/test",
],
test_tags=[
"org.apache.spark.tags.ExtendedYarnTest"
]
)
mesos = Module(
name="mesos",
dependencies=[],
source_file_regexes=["resource-managers/mesos/"],
build_profile_flags=["-Pmesos"],
sbt_test_goals=["mesos/test"]
)
kubernetes = Module(
name="kubernetes",
dependencies=[],
source_file_regexes=["resource-managers/kubernetes"],
build_profile_flags=["-Pkubernetes"],
sbt_test_goals=["kubernetes/test"]
)
# The root module is a dummy module which is used to run all of the tests.
# No other modules should directly depend on this module.
root = Module(
name="root",
dependencies=[build], # Changes to build should trigger all tests.
source_file_regexes=[],
# In order to run all of the tests, enable every test profile:
build_profile_flags=list(set(
itertools.chain.from_iterable(m.build_profile_flags for m in all_modules))),
sbt_test_goals=[
"test",
],
python_test_goals=list(itertools.chain.from_iterable(m.python_test_goals for m in all_modules)),
should_run_r_tests=True,
should_run_build_tests=True
)
|
{
"content_hash": "84c7f819785cab38abec0b86c86347ce",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 100,
"avg_line_length": 24.368705035971225,
"alnum_prop": 0.5964277806480183,
"repo_name": "eyalfa/spark",
"id": "2aa355504bf290f847ebdab6f255e7c2840e5f9a",
"size": "14334",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "dev/sparktestsupport/modules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "35042"
},
{
"name": "Batchfile",
"bytes": "30285"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23956"
},
{
"name": "Dockerfile",
"bytes": "7157"
},
{
"name": "HTML",
"bytes": "65141"
},
{
"name": "HiveQL",
"bytes": "1823425"
},
{
"name": "Java",
"bytes": "3372693"
},
{
"name": "JavaScript",
"bytes": "144886"
},
{
"name": "Makefile",
"bytes": "9395"
},
{
"name": "PLpgSQL",
"bytes": "163419"
},
{
"name": "PowerShell",
"bytes": "3756"
},
{
"name": "Python",
"bytes": "2715827"
},
{
"name": "R",
"bytes": "1131137"
},
{
"name": "Roff",
"bytes": "20789"
},
{
"name": "SQLPL",
"bytes": "30039"
},
{
"name": "Scala",
"bytes": "26996418"
},
{
"name": "Shell",
"bytes": "189256"
},
{
"name": "Thrift",
"bytes": "33605"
},
{
"name": "q",
"bytes": "146878"
}
],
"symlink_target": ""
}
|
'''
Created on Jul 23, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
import scipy.io as spio
# F = spio.loadmat('incCondGo.mat')
# numRuns = F['goTo'].shape[0]
F = {'goTo':np.arange(1800)}
D = {'solverType':'splitField', 'flavor':'TE', 'numRuns':1800, 'expt':'incConds', 'numProcs':16}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
# noFreqs,noPhis,bkg = np.meshgrid(range(1,7), range(1,7), range(100))
noFreqs,noPhis,bkg = np.mgrid[1:7,1:7,0:50]
noFreqs = noFreqs.flatten()
noPhis = noPhis.flatten()
bkg = bkg.flatten()
D['freqs'] = np.round(np.logspace(np.log10(1000), np.log10(50000), noFreqs[parseNumber]))
D['inc'] = (np.linspace(-75,75,noPhis[parseNumber])*np.pi/180.0)
D['bkgNo'] = bkg[parseNumber]+100;
D['numProcs'] = len(D['freqs'])*len(D['inc'])
if parseNumber in F['goTo']:
print 'here we go'
else:
D['numProcs'] = 0
return D
|
{
"content_hash": "0615aca11760831ae0d059f0587ef664",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 96,
"avg_line_length": 29.89090909090909,
"alnum_prop": 0.6721411192214112,
"repo_name": "daStrauss/subsurface",
"id": "c34a366505f1443126b84278dc875ab515ba858f",
"size": "1645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/expts/numberFrequencies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "295580"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/weapon/shared_rifle_berserker.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "d489f3d79d76711a94ef210ed8b6f301",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 77,
"avg_line_length": 23.53846153846154,
"alnum_prop": 0.696078431372549,
"repo_name": "anhstudios/swganh",
"id": "5a707da674bd8b9985630804690b81668695b794",
"size": "451",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/weapon/shared_rifle_berserker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""This hybrid library/listener can be used to verify messages that
have been logged and keywords have been called.
This works by listening for log messages and keywords via the
listener interface, and saving them in a cache. Keywords are
provided for doing assertions on called keywords and for resetting
the cache.
The keyword cache is reset for each test case to help keep it
from growing too large.
"""
import re
class TestListener(object):
__test__ = False # this class should be ignored by pytest
ROBOT_LIBRARY_SCOPE = "TEST SUITE"
ROBOT_LISTENER_API_VERSION = 2
def __init__(self):
self.ROBOT_LIBRARY_LISTENER = self
self.message_log = []
self.keyword_log = []
self.message_logging_enabled = True
def _log_message(self, message):
"""Called whenever a message is added to the log"""
if self.message_logging_enabled:
self.message_log.append(message)
def _start_test(self, name, attrs):
self.reset_test_listener_keyword_log()
def _end_keyword(self, name, attrs):
attrs_subset = {name: attrs[name] for name in ("status", "args")}
self.keyword_log.append((name, attrs_subset))
def reset_test_listener_keyword_log(self):
"""Reset the keyword cache
This can be used to reset the cache in the middle of a
testcase so that the 'Assert keyword Status' keyword will only
apply to keywords called from this point onwards.
"""
self.keyword_log.clear()
def reset_test_listener_message_log(self):
self.message_log = []
def assert_keyword_status(self, expected_status, keyword_name, *args):
"""Assert that all keyword with the given name and args have the given status
Keyword names need to be passed in as fully qualified names
exactly as they appear in the logs.
expected_status should be either PASS or FAIL
Example
Log Hello, world
Assert keyword status PASS BuiltIn.log Hello, world
"""
keyword_was_found = False
for name, attrs in self.keyword_log:
if name == keyword_name and args == tuple(attrs["args"]):
keyword_was_found = True
if attrs["status"] != expected_status:
message = (
f"Status of keyword {keyword_name} with args {args} "
f"expected to be {expected_status} but was {attrs['status']}"
)
raise AssertionError(message)
if not keyword_was_found:
raise AssertionError(
f"No keyword with name '{keyword_name}' with args '{args}' was found"
)
def assert_robot_log(self, message_pattern, log_level=None):
"""Assert that a message matching the regex pattern was emitted"""
for message in self.message_log:
# note: message is a dictionary with the following keys:
# 'timestamp', 'message', 'level', 'html'
if re.search(message_pattern, message["message"], re.MULTILINE):
if log_level is None or message["level"] == log_level:
return True
raise AssertionError(
"Could not find a robot log message matching the pattern '{}'".format(
message_pattern
)
)
|
{
"content_hash": "117cf8d228eafe41ea8fab1c9c4e71a1",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 85,
"avg_line_length": 36.795698924731184,
"alnum_prop": 0.6075394506136762,
"repo_name": "SalesforceFoundation/CumulusCI",
"id": "0894da4ceb3e8392506632d6a400342f57252d0a",
"size": "3422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/robotframework/tests/salesforce/TestListener.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "754354"
},
{
"name": "RobotFramework",
"bytes": "9330"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
}
|
from statsd.connection import Connection
from statsd.client import Client
from statsd.timer import Timer
from statsd.gauge import Gauge
from statsd.average import Average
from statsd.raw import Raw
from statsd.counter import Counter, increment, decrement
__all__ = [
'Client',
'Connection',
'Timer',
'Counter',
'Gauge',
'Average',
'Raw',
'increment',
'decrement',
]
__name__ = 'python-statsd'
__version__ = '1.5.7'
__author__ = 'Rick van Hattem'
__author_email__ = 'Rick.van.Hattem@Fawo.nl'
__description__ = ('''statsd is a client for Etsy's node-js statsd server. '''
'''A proxy for the Graphite stats collection and graphing server.''')
__url__ ='https://github.com/WoLpH/python-statsd'
|
{
"content_hash": "17152e4e127a246c3da83b6a977b117e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 26.178571428571427,
"alnum_prop": 0.6725784447476125,
"repo_name": "LiquidGalaxy/lg-root-fs",
"id": "1f31a92e199a8d8767592cfd8070c0e4f63048fb",
"size": "733",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "home/lg/bin/statsd/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32143"
},
{
"name": "Lua",
"bytes": "27848"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Perl",
"bytes": "24428"
},
{
"name": "Python",
"bytes": "23672"
},
{
"name": "Shell",
"bytes": "147850"
}
],
"symlink_target": ""
}
|
import sys, re, operator, string
#
# The event management substrate
#
class EventManager:
def __init__(self):
self._subscriptions = {}
def subscribe(self, event_type, handler):
if event_type in self._subscriptions:
self._subscriptions[event_type].append(handler)
else:
self._subscriptions[event_type] = [handler]
def publish(self, event):
event_type = event[0]
if event_type in self._subscriptions:
for h in self._subscriptions[event_type]:
h(event)
#
# The application entities
#
class DataStorage:
""" Models the contents of the file """
def __init__(self, event_manager):
self._event_manager = event_manager
self._event_manager.subscribe('load', self.load)
self._event_manager.subscribe('start', self.produce_words)
def load(self, event):
path_to_file = event[1]
with open(path_to_file) as f:
self._data = f.read()
pattern = re.compile('[\W_]+')
self._data = pattern.sub(' ', self._data).lower()
def produce_words(self, event):
data_str = ''.join(self._data)
for w in data_str.split():
self._event_manager.publish(('word', w))
self._event_manager.publish(('eof', None))
class StopWordFilter:
""" Models the stop word filter """
def __init__(self, event_manager):
self._stop_words = []
self._event_manager = event_manager
self._event_manager.subscribe('load', self.load)
self._event_manager.subscribe('word', self.is_stop_word)
def load(self, event):
with open('../stop_words.txt') as f:
self._stop_words = f.read().split(',')
self._stop_words.extend(list(string.ascii_lowercase))
def is_stop_word(self, event):
word = event[1]
if word not in self._stop_words:
self._event_manager.publish(('valid_word', word))
class WordFrequencyCounter:
""" Keeps the word frequency data """
def __init__(self, event_manager):
self._word_freqs = {}
self._event_manager = event_manager
self._event_manager.subscribe('valid_word', self.increment_count)
self._event_manager.subscribe('print', self.print_freqs)
def increment_count(self, event):
word = event[1]
if word in self._word_freqs:
self._word_freqs[word] += 1
else:
self._word_freqs[word] = 1
def print_freqs(self, event):
word_freqs = sorted(self._word_freqs.items(), key=operator.itemgetter(1), reverse=True)
for (w, c) in word_freqs[0:25]:
print(w, '-', c)
class WordFrequencyApplication:
def __init__(self, event_manager):
self._event_manager = event_manager
self._event_manager.subscribe('run', self.run)
self._event_manager.subscribe('eof', self.stop)
def run(self, event):
path_to_file = event[1]
self._event_manager.publish(('load', path_to_file))
self._event_manager.publish(('start', None))
def stop(self, event):
self._event_manager.publish(('print', None))
#
# The main function
#
em = EventManager()
DataStorage(em), StopWordFilter(em), WordFrequencyCounter(em)
WordFrequencyApplication(em)
em.publish(('run', sys.argv[1]))
|
{
"content_hash": "995d76e0b14fb19dfbee2fc2c34ca199",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 95,
"avg_line_length": 32.18446601941748,
"alnum_prop": 0.6024132730015083,
"repo_name": "kranthikumar/exercises-in-programming-style",
"id": "4ada5470d04ee320e3c88fcfa4e0f3b46d38a48d",
"size": "3337",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "16-bulletin-board/tf-16.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3561"
},
{
"name": "Clojure",
"bytes": "1635"
},
{
"name": "Java",
"bytes": "4859"
},
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "86840"
},
{
"name": "Ruby",
"bytes": "249"
},
{
"name": "Scala",
"bytes": "2958"
},
{
"name": "Shell",
"bytes": "1994"
}
],
"symlink_target": ""
}
|
print("Running animation_simulation.py")
from setup import *
from Person import *
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# plot configuration
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on = False, xlim = (-1,1), ylim = (-1,1))
ax.set_xticklabels([])
ax.set_yticklabels([])
box = ax.get_position()
susceptible_coords = ax.plot([], [], label='Susceptible')[0]
infective_coords= ax.plot([], [], label='Infective')[0]
remove_coords = ax.plot([], [], label='Remove')[0]
dead_coords = ax.plot([], [], label='Dead')[0]
def init():
# initialize the populations
susceptibles = [Person(SUSCEPTIBLE) for _ in range(num_susceptibles)]
infectives = [Person(INFECTIVE) for _ in range(num_infectives)]
removes = [Person(REMOVE) for _ in range(num_removes)]
dead = [Person(DEAD) for _ in range(num_dead)]
# track the total population in one list (pointers to Person objects)
global people
people = susceptibles + infectives + removes + dead
# start with blank data
susceptible_coords.set_data([], [])
infective_coords.set_data([], [])
remove_coords.set_data([], [])
dead_coords.set_data([], [])
return susceptible_coords, infective_coords, remove_coords, dead_coords
def update_coordinates(coordinate_obj, data, color, legend):
# check to make sure that the data is not empty
if len(np.shape(data)) == 2:
coordinate_obj.set_data(data[:,0], data[:,1])
else:
coordinate_obj.set_data([], [])
# data styling
coordinate_obj.set_label(legend)
coordinate_obj.set_alpha(0.5)
coordinate_obj.set_marker('o')
coordinate_obj.set_markerfacecolor(color)
coordinate_obj.set_markersize(10)
coordinate_obj.set_linestyle('')
def animate(iteration):
global people, box
# identify coordinates for each group of Person objects
susceptibles = np.array([person.loc for person in people \
if person.status is SUSCEPTIBLE])
infectives = np.array([person.loc for person in people \
if person.status is INFECTIVE])
removes = np.array([person.loc for person in people \
if person.status is REMOVE])
dead = np.array([person.loc for person in people \
if person.status is DEAD])
# update the figure with new coordinates and statuses
update_coordinates(susceptible_coords, susceptibles, 'green', 'Susceptible')
update_coordinates(infective_coords, infectives, 'red', 'Infective')
update_coordinates(remove_coords, removes, 'blue', 'Remove')
update_coordinates(dead_coords, dead, 'black', 'Dead')
# adjust the plot so that the legend is off to the bottom
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=5)
# actually update the simulation
for person in people:
person.update_status(people, radius)
person.update_location(movement_speed)
return susceptible_coords, infective_coords, remove_coords, dead_coords
# make an animation!
ani = animation.FuncAnimation(fig, animate, frames=iterations, interval=0,
blit=True, init_func=init)
plt.show()
|
{
"content_hash": "b9d3ab2b83b3a1d97916df3decbeff26",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 35.956989247311824,
"alnum_prop": 0.6644736842105263,
"repo_name": "kraemerd17/flu-propagation-models",
"id": "d162588fe580859c162166c783c5e4307fb70b78",
"size": "3344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulation/animation_simulation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "111020"
},
{
"name": "Matlab",
"bytes": "248859"
},
{
"name": "Python",
"bytes": "47814"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
# clicking on Welcome link.
driver = webdriver.Firefox()
driver.get("http://www.practiceselenium.com/")
driver.find_element_by_link_text("Check Out").click()
assert "Check Out" in driver.title
driver.close()
|
{
"content_hash": "0df335e72859588f4197cff6ed286ca4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 53,
"avg_line_length": 35.75,
"alnum_prop": 0.7832167832167832,
"repo_name": "bishnucit/Python-Preludes",
"id": "bf730adb1852f12cf3a15058247ad75810b90121",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/Seleniu",
"path": "6.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7266"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from haystack import indexes
from .models import Event
from talks.events.models import EVENT_IN_PREPARATION, EVENT_PUBLISHED
class EventIndex(indexes.SearchIndex, indexes.Indexable):
# text: multiple fields use to do full-text search
text = indexes.MultiValueField(document=True, stored=False)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description', null=True)
slug = indexes.CharField(model_attr='slug', null=False)
start = indexes.DateTimeField(model_attr='start', faceted=True)
speakers = indexes.MultiValueField(faceted=True, null=True)
department = indexes.CharField(faceted=True, null=True)
location = indexes.CharField(faceted=True, null=True)
topics = indexes.MultiValueField(faceted=True, null=True)
is_published = indexes.BooleanField(null=False)
is_cancelled = indexes.BooleanField(null=False)
group = indexes.CharField(faceted=True, null=True)
group_slug = indexes.CharField(null=True)
lists = indexes.MultiValueField(faceted=True, null=True)
def get_model(self):
return Event
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
#return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())
return self.get_model().objects.all()
def prepare(self, obj):
"""Overriding the prepare() method of SearchIndex in order to add our most complicated fields
It is done in prepare() rather than the individual prepare_FIELD() to avoid having to do
multiple calls to the APIs...
"""
self.prepared_data = super(EventIndex, self).prepare(obj)
topics = obj.api_topics
topics_pref_labels = []
topic_alt_labels = []
if topics:
for topic in topics:
topics_pref_labels.append(topic.get('prefLabel', ''))
topic_alt_labels.extend(topic.get('altLabels', []))
if obj.department_organiser:
api_dept = obj.api_organisation
else:
api_dept = None
if obj.location:
api_loc = obj.api_location
else:
api_loc = None
speakers_names = [speaker.name for speaker in obj.speakers.all()]
# Speakers
self.prepared_data[self.speakers.index_fieldname] = speakers_names
# Department organiser
if api_dept:
self.prepared_data[self.department.index_fieldname] = api_dept.get('name', '')
# Location
if api_loc:
self.prepared_data[self.location.index_fieldname] = api_loc.get('name', '')
# Topics
if topics_pref_labels:
self.prepared_data[self.topics.index_fieldname] = topics_pref_labels
# Published status
self.prepared_data[self.is_published.index_fieldname] = obj.is_published
self.prepared_data[self.is_cancelled.index_fieldname] = obj.is_cancelled
# Series name
if obj.group:
self.prepared_data[self.group.index_fieldname] = obj.group.title
self.prepared_data[self.group_slug.index_fieldname] = obj.group.slug
# lists
lists_names = [list.title for list in obj.public_collections_containing_this_event.all()]
self.prepared_data[self.lists.index_fieldname] = lists_names
full_text_content = [] # used when searching full text
if obj.title:
full_text_content.append(obj.title)
if obj.description:
full_text_content.append(obj.description)
if topics_pref_labels:
full_text_content.extend(topics_pref_labels)
if topic_alt_labels:
full_text_content.extend(topic_alt_labels)
if obj.group:
full_text_content.append(obj.group.title)
full_text_content.extend(speakers_names)
full_text_content.extend(lists_names)
self.prepared_data[self.text.index_fieldname] = full_text_content
return self.prepared_data
|
{
"content_hash": "ac6b3324fba42290d9796a4496941da8",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 101,
"avg_line_length": 38.443396226415096,
"alnum_prop": 0.6522699386503068,
"repo_name": "ox-it/talks.ox",
"id": "c227bb20033d2d96883088454a827f4e53a6df87",
"size": "4075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talks/events/search_indexes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23891"
},
{
"name": "Dockerfile",
"bytes": "750"
},
{
"name": "HTML",
"bytes": "117234"
},
{
"name": "JavaScript",
"bytes": "98316"
},
{
"name": "Makefile",
"bytes": "417"
},
{
"name": "Python",
"bytes": "312877"
},
{
"name": "RobotFramework",
"bytes": "18436"
}
],
"symlink_target": ""
}
|
import urllib
from oslo.config import cfg
from designate import exceptions
from designate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BaseView(object):
"""
The Views are responsible for coverting to/from the "internal" and
"external" representations of collections and resources. This includes
adding "links" and adding/removing any other wrappers returned/received
as part of the API call.
For example, in the V2 API, we did s/domain/zone/. Adapting a record
resources "domain_id" <-> "zone_id" is the responsibility of a View.
"""
_resource_name = None
_collection_name = None
def __init__(self):
super(BaseView, self).__init__()
self.base_uri = CONF['service:api']['api_base_uri'].rstrip('/')
def list(self, context, request, items, parents=None):
"""View of a list of items"""
result = {
"links": self._get_collection_links(request, items, parents)
}
if 'detail' in request.GET and request.GET['detail'] == 'yes':
result[self._collection_name] = self.list_detail(context, request,
items)
else:
result[self._collection_name] = self.list_basic(context, request,
items)
return result
def list_basic(self, context, request, items):
"""Non-detailed list of items"""
return [self.show_basic(context, request, i) for i in items]
def list_detail(self, context, request, items):
"""Detailed list of items"""
return [self.show_detail(context, request, i) for i in items]
def show(self, context, request, item):
"""Show a single item"""
result = {}
if 'detail' in request.GET and request.GET['detail'] == 'yes':
result[self._resource_name] = self.show_detail(context, request,
item)
else:
result[self._resource_name] = self.show_basic(context, request,
item)
return result
def show_basic(self, context, request, item):
"""Non-detailed view of a item"""
raise NotImplementedError()
def show_detail(self, context, request, item):
"""Detailed view of a item"""
return self.show_basic(context, request, item)
def _load(self, context, request, body, valid_keys):
"""Extract a "central" compatible dict from an API call"""
result = {}
item = body[self._resource_name]
error_keys = []
# Copy keys which need no alterations
for k in item:
if k in valid_keys:
result[k] = item[k]
else:
error_keys.append(k)
if error_keys:
error_message = str.format(
'Provided object does not match schema. Keys {0} are not '
'valid in the request body',
error_keys)
raise exceptions.InvalidObject(error_message)
return result
def _get_resource_links(self, request, item, parents=None):
return {
"self": self._get_resource_href(request, item, parents),
}
def _get_collection_links(self, request, items, parents=None):
# TODO(kiall): Next and previous links should only be included
# when there are more/previous items.. This is what nova
# does.. But I think we can do better.
params = request.GET
result = {
"self": self._get_collection_href(request, parents),
}
# See above
# if 'marker' in params:
# result['previous'] = self._get_previous_href(request, items,
# parents)
if 'limit' in params and int(params['limit']) == len(items):
result['next'] = self._get_next_href(request, items, parents)
return result
def _get_base_href(self, parents=None):
href = "%s/v2/%s" % (self.base_uri, self._collection_name)
return href.rstrip('?')
def _get_resource_href(self, request, item, parents=None):
base_href = self._get_base_href(parents)
href = "%s/%s" % (base_href, item['id'])
return href.rstrip('?')
def _get_collection_href(self, request, parents=None, extra_params=None):
params = request.GET
if extra_params is not None:
params.update(extra_params)
base_href = self._get_base_href(parents)
href = "%s?%s" % (base_href, urllib.urlencode(params))
return href.rstrip('?')
def _get_next_href(self, request, items, parents=None):
# Prepare the extra params
extra_params = {
'marker': items[-1]['id']
}
return self._get_collection_href(request, parents, extra_params)
def _get_previous_href(self, request, items, parents=None):
# Prepare the extra params
extra_params = {
'marker': items[0]['id']
}
return self._get_collection_href(request, parents, extra_params)
|
{
"content_hash": "e97e5de2d042ebee45e13a6734651939",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 78,
"avg_line_length": 32.838509316770185,
"alnum_prop": 0.559674673728012,
"repo_name": "melodous/designate",
"id": "653645263986a47efb777924db708e6cea8c832b",
"size": "5936",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/juno",
"path": "designate/api/v2/views/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "1335614"
},
{
"name": "Ruby",
"bytes": "2332"
},
{
"name": "Shell",
"bytes": "7341"
}
],
"symlink_target": ""
}
|
import boto3
import json
import logging
import os
import sys
import urllib.request
import sentry_sdk
from sentry_sdk.integrations.logging import LoggingIntegration
from urllib.error import HTTPError, URLError
from botocore.exceptions import ClientError
from datetime import datetime
class KubecostReportsExporter:
def __init__(self) -> None:
self.s3 = boto3.client('s3')
self.log_level = os.environ.get('LOG_LEVEL', 'debug')
self.logger = self.set_log_level(self.log_level)
self.parse_env_vars()
self.enable_sentry_logging()
def enable_sentry_logging(self):
sentry_logging = LoggingIntegration(
level=logging.INFO, # Capture info and above as breadcrumbs
event_level=logging.ERROR # Send errors as events
)
if self.sentry_dsn:
sentry_sdk.init(dsn=self.sentry_dsn, integrations=[sentry_logging])
def set_log_level(self, log_level):
levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
logger = logging.getLogger("kubecost-reports-exporter")
logger.setLevel(levels[log_level])
return logger
def parse_env_vars(self):
self.bucket_acl = os.environ.get('BUCKET_ACL', 'private')
self.region = os.environ.get('AWS_REGION', 'eu-west-1')
self.create_bucket = eval(os.environ.get("CREATE_BUCKET", 'False'))
self.account_id = os.environ.get('ACCOUNT_CANONICAL_ID')
self.tags = os.environ.get('TAGS', '')
self.sentry_dsn = os.environ.get('SENTRY_DSN')
self.request_timeout = int(os.environ.get('REQUEST_TIMEOUT', '3000'))
try:
self.cluster_name = os.environ['CLUSTER_NAME']
self.report_type = os.environ['REPORT_TYPE']
self.kubecost_endpoint = os.environ['KUBECOST_ENDPOINT']
self.kubecost_url = os.environ['KUBECOST_URL']
self.bucket_name = os.environ['BUCKET_NAME']
except KeyError as ke:
self.logger.error(
f"KeyError: environment variable {str(ke)} is not set ")
sys.exit(1)
def create_s3_bucket(self):
self.logger.info(f'Creating bucket {self.bucket_name}')
try:
self.s3.create_bucket(
ACL=self.bucket_acl, Bucket=self.bucket_name,
CreateBucketConfiguration={'LocationConstraint': self.region}
)
except ClientError as e:
code = e.response['Error']['Code']
if code == 'BucketAlreadyExists' or code == 'BucketAlreadyOwnedByYou':
self.logger.warning(
f"Bucket {self.bucket_name} already exists"
)
else:
self.logger.error(str(e))
sys.exit(1)
def build_filename(self):
day_prefix = datetime.utcnow().strftime("%Y_%m_%d")
time_prefix = datetime.utcnow().strftime("%Y%m%d_%H_%M_%S")
return f"{self.cluster_name}/{self.report_type}/{day_prefix}/aws_kubecost_{self.report_type}_{time_prefix}.json"
# function calling the kubecost svc to get JSON response with the cost data
def get_cost_report(self, url):
try:
with urllib.request.urlopen(url, timeout=self.request_timeout) as f:
if f.getcode() != 200:
self.logger.error(
f"Error: Failed to reports from {self.cluster_name}")
sys.exit(1)
return f.read().decode('utf-8')
except HTTPError as error:
self.logger.error(
'Data not retrieved because %s\nURL: %s', error, url)
sys.exit(1)
except URLError as error:
self.logger.error(
'Failed to retrive cost reports %s\nURL: %s', error, url)
sys.exit(1)
# Uploading the JSON string to S3 bucket
def upload_report_to_aws(self, bucket, s3_file_name, content, account_id):
self.logger.info(f'Uploading reports for cluster {self.cluster_name}')
try:
# if account id is not provided upload without access grant
if not account_id:
self.s3.put_object(
Bucket=bucket,
Body=content,
Key=s3_file_name,
ServerSideEncryption='AES256'
)
else:
# provide object access to bucket owner
self.s3.put_object(
Bucket=bucket,
Body=content,
Key=s3_file_name,
ServerSideEncryption='AES256',
GrantFullControl=f'id="{account_id}"'
)
self.logger.info("Reports uploaded Successful")
except ClientError as e:
self.logger.error(str(e))
sys.exit(1)
def parse_tags_from_env(self, tags):
parsed_tags = {}
if not tags:
return parsed_tags
tags_array = tags.split(",")
for tag in tags_array:
tag = tag.split("=")
parsed_tags[tag[0]] = tag[1]
return parsed_tags
def start(self):
if self.create_bucket:
self.create_s3_bucket()
# check that the json has data
report = self.get_cost_report(
self.kubecost_endpoint + self.kubecost_url)
if not bool(json.loads(report)["data"]):
self.logger.info("Error: Skipping upload, report has no data")
return
self.upload_report_to_aws(
self.bucket_name, self.build_filename(),
report,
self.account_id
)
if __name__ == "__main__":
KubecostReportsExporter().start()
|
{
"content_hash": "8cba82037091c4887386777778b9182d",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 120,
"avg_line_length": 35.64848484848485,
"alnum_prop": 0.5637538252295138,
"repo_name": "deliveryhero/helm-charts",
"id": "b90929f4ea11273620012b6a19ef79c8b58686ae",
"size": "5906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stable/kubecost-reports-exporter/docker/cost-exporter.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "233"
},
{
"name": "Mustache",
"bytes": "59474"
},
{
"name": "Open Policy Agent",
"bytes": "6322"
},
{
"name": "Python",
"bytes": "12690"
},
{
"name": "Shell",
"bytes": "10489"
},
{
"name": "Smarty",
"bytes": "41581"
}
],
"symlink_target": ""
}
|
""" Outline nodes.
We use them for re-formulations and for inlining of code. They are expressions
that get their value from return statements in their code body. They do not
own anything by themselves. It's just a way of having try/finally for the
expressions.
"""
from .NodeBases import ChildrenHavingMixin, ExpressionChildrenHavingBase
class ExpressionOutlineBody(ExpressionChildrenHavingBase):
""" Outlined code.
This is for a call to a piece of code to be executed in a specific
context. It contains an exclusively owned function body, that has
no other references, and can be considered part of the calling
context.
It must return a value, to use as expression value.
"""
kind = "EXPRESSION_OUTLINE_BODY"
named_children = (
"body",
)
def __init__(self, provider, name, body, source_ref):
assert name != ""
ExpressionChildrenHavingBase.__init__(
self,
values = {
"body" : body
},
source_ref = source_ref
)
self.provider = provider
self.name = name
self.temp_scope = None
# Hack: This allows some APIs to work although this is not yet
# officially a child yet. Important during building.
self.parent = provider
def getDetails(self):
return {
"provider" : self.provider,
"name" : self.name
}
getBody = ChildrenHavingMixin.childGetter("body")
setBody = ChildrenHavingMixin.childSetter("body")
def getOutlineTempScope(self):
# We use our own name as a temp_scope, cached from the parent, if the
# scope is None.
if self.temp_scope is None:
self.temp_scope = self.provider.allocateTempScope(self.name)
return self.temp_scope
def allocateTempVariable(self, temp_scope, name):
if temp_scope is None:
temp_scope = self.getOutlineTempScope()
return self.provider.allocateTempVariable(
temp_scope = temp_scope,
name = name
)
def allocateTempScope(self, name):
# Let's scope the temporary scopes by the outline they come from.
return self.provider.allocateTempScope(
name = self.name + '$' + name
)
def computeExpressionRaw(self, constraint_collection):
owning_module = self.getParentModule()
# Make sure the owning module is added to the used set. This is most
# important for helper functions, or modules, which otherwise have
# become unused.
from nuitka.ModuleRegistry import addUsedModule
addUsedModule(owning_module)
abort_context = constraint_collection.makeAbortStackContext(
catch_breaks = False,
catch_continues = False,
catch_returns = True,
catch_exceptions = False
)
with abort_context:
body = self.getBody()
result = body.computeStatementsSequence(
constraint_collection = constraint_collection
)
if result is not body:
self.setBody(result)
body = result
return_collections = constraint_collection.getFunctionReturnCollections()
constraint_collection.mergeMultipleBranches(return_collections)
if body.getStatements()[0].isStatementReturn():
return (
body.getStatements()[0].getExpression(),
"new_expression",
"Outline is now simple expression, use directly."
)
# TODO: Function outline may become too trivial to outline and return
# collections may tell us something.
return self, None, None
|
{
"content_hash": "db5a006d96721192770bfa0a2c757a76",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 85,
"avg_line_length": 30.725806451612904,
"alnum_prop": 0.6146981627296588,
"repo_name": "wfxiang08/Nuitka",
"id": "c15e6d36e3a15f62cd7f53c08d110fd3dbd05b1e",
"size": "4590",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nuitka/nodes/OutlineNodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5518"
},
{
"name": "Batchfile",
"bytes": "1810"
},
{
"name": "C",
"bytes": "36149"
},
{
"name": "C++",
"bytes": "441058"
},
{
"name": "Python",
"bytes": "4431574"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
}
|
import os
import re
import os.path
import argparse
import paths
import numpy as np
from PIL import Image, ImageFont, ImageOps, ImageEnhance
from PIL.ImageDraw import Draw
contrast_ratio = 3
def resize(img,hMax):
(wIm,hIm) = img.size
ratio = hMax/float(hIm)
(wNewIm,hNewIm) = (int(wIm*ratio),hMax)
newIm = img.resize((wNewIm,hNewIm),Image.ANTIALIAS)
return newIm
def fill_border(img,left,right,hMax):
arr = np.asarray(img)
col = 255
l=np.full((hMax,left), col, dtype=np.uint8)
r=np.full((hMax,right), col, dtype=np.uint8)
arr=np.c_[l,arr,r]
img=Image.fromarray(arr)
return img
def crop_and_save_line(image,ymin,ymax,name):
xmin = 0
xmax = image.size[0]
line_image = image.crop((xmin,ymin,xmax,ymax))
line_image=resize(line_image,32)
line_image=fill_border(line_image,16,16,32)
contrast=ImageEnhance.Contrast(line_image)
line_image=contrast.enhance(contrast_ratio)
if paths.previewPath():
line_image.save(os.path.join(paths.previewPath() , name))
return line_image
def refine_line_bounds(image):
ymin = 0
ymax = image.size[1]
contrast = ImageEnhance.Contrast(image)
line_np = 255 - np.array(contrast.enhance(2))
histo = np.square(np.mean(line_np,axis=(1)))
prob = histo / np.sum(histo)
y = np.asarray(range(ymin,ymax))
y_mean = np.dot(prob,y)
s = (y - y_mean)**2
s = np.sqrt(np.dot(prob,s))
ymax = min(ymax,int(round(y_mean+2*s)))
ymin = max(ymin,int(round(y_mean-1.2*s)))
return ymin, ymax
def preprocessOne(f):
img = Image.open(f)
img = ImageOps.grayscale(img)
ymin, ymax = 0, img.size[1]
baseName=os.path.basename(f)
img = crop_and_save_line(img,ymin,ymax, baseName)
return baseName,img
|
{
"content_hash": "3f4f8bb1dc1223079b19359363bf2f5b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 65,
"avg_line_length": 28.43548387096774,
"alnum_prop": 0.659103800340329,
"repo_name": "mvpossum/deep-learning",
"id": "3c5345dbdb2629b8c625fe49bf3a7e2c0f733128",
"size": "1763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tp4/entrega/preproceso.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "230911"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
}
|
import unittest as ut
import pypsignifit.psignidata as pd
import swignifit.swignifit_raw as sft
import numpy as np
import sys
def approximatedly_equal ( x, y, eps=1e-4 ):
e = abs ( x-y ) > eps
if not e==0:
sys.stderr.write ( "%g != %g\n" % (x,y) )
return e
class TestPsiInference ( ut.TestCase ):
def setUp ( self ):
self.pinf = pd.PsiInference ()
self.pinf.estimate = [2.,1.,.02]
def test_evaluate ( self ):
evaluated = self.pinf.evaluate ( [1,2],[2.,1.,.02] )
self.assertEqual ( approximatedly_equal ( evaluated[0], 0.62909188225759771), 0 )
self.assertEqual ( approximatedly_equal ( evaluated[1], 0.74), 0 )
evaluated = self.pinf.evaluate ( [1,2] )
self.assertEqual ( approximatedly_equal ( evaluated[0], 0.62909188225759771), 0 )
self.assertEqual ( approximatedly_equal ( evaluated[1], 0.74), 0 )
def test_getThres ( self ):
self.assertRaises ( NotImplementedError, self.pinf.getThres, .5 )
self.pinf.data = [[1,2,3]]
evaluated = self.pinf.getThres ( .5 )
self.assertEqual ( approximatedly_equal ( evaluated, 2.), 0 )
evaluated = self.pinf.getThres ( .3 )
self.assertEqual ( approximatedly_equal ( evaluated, 1.1527021396127963), 0 )
def test_repr ( self ):
self.assertEqual ( self.pinf.__repr__(), "< PsiInference object >" )
def test_properties ( self ):
self.assertEqual ( self.pinf.desc, 'sigmoid: logistic\ncore: ab\nnAFC: 2' )
self.assertEqual ( self.pinf.label, "Psychometric function fit" )
self.assertEqual ( self.pinf.color, "b" )
self.assertEqual ( self.pinf.linestyle, "-" )
self.assertEqual ( self.pinf.marker, "o" )
self.assertEqual ( self.pinf.linewidth, 1 )
class TestBootstrapInference ( ut.TestCase ):
def setUp ( self ):
sft.setSeed(0)
nafc = 2
stimulus_intensities = [0.0,2.0,4.0,6.0,8.0,10.0]
number_of_correct = [34,32,40,48,50,48]
number_of_trials = [50]*len(stimulus_intensities)
data = zip(stimulus_intensities,number_of_correct,number_of_trials)
self.parametric = pd.BootstrapInference ( data, priors=("","","Beta(2,30)"), parametric=True )
self.nonparametric = pd.BootstrapInference ( data, priors=("","","Beta(2,30)"), parametric=False )
def test_map ( self ):
map1 = self.parametric.estimate
map2 = self.nonparametric.estimate
should_be = [ 2.7373, 1.40406, 0.020320093764199146 ]
for val1,val2,val3 in zip(map1,map2,should_be):
self.assertEqual ( approximatedly_equal ( val1, val2 ), 0 )
self.assertEqual ( approximatedly_equal ( val1, val3 ), 0 )
def test_boots ( self ):
self.parametric.sample ()
self.nonparametric.sample ()
parci = self.parametric.getCI(1)
nprci = self.nonparametric.getCI(1)
self.assertEqual ( approximatedly_equal ( parci[0], 1.69 ), 0 )
self.assertEqual ( approximatedly_equal ( parci[1], 3.8539 ), 0 )
self.assertEqual ( approximatedly_equal ( nprci[0], 1.11463 ), 0 )
self.assertEqual ( approximatedly_equal ( nprci[1], 4.05597 ), 0 )
self.assertEqual ( self.parametric.nsamples, 2000 )
self.assertEqual ( self.nonparametric.nsamples, 2000 )
self.assertEqual ( approximatedly_equal ( self.parametric.deviance, 8.1689126711025022 ), 0 )
self.assertEqual ( approximatedly_equal ( self.nonparametric.deviance, 8.1689126711025022 ), 0 )
def test_sensitivity ( self ):
self.parametric.sensitivity_analysis (verbose=False)
parci = [ 1.5905, 3.87779 ]
extci = self.parametric.getCI(1)
for par,ext in zip(parci,extci):
self.assertEqual ( approximatedly_equal ( par, ext ), 0 )
def test_keywordhandling ( self ):
self.assertRaises ( ValueError, pd.BootstrapInference, self.parametric.data, shape="logistic" )
def test_pickling ( self ):
import pickle
string = pickle.dumps ( self.parametric )
unpickled_parametric = pickle.loads ( string )
string = pickle.dumps ( self.nonparametric )
unpickled_nonparametric = pickle.loads ( string )
class TestBayesInference ( ut.TestCase ):
def setUp ( self ):
sft.setSeed(0)
nafc = 2
stimulus_intensities = [0.0,2.0,4.0,6.0,8.0,10.0]
number_of_correct = [34,32,40,48,50,48]
number_of_trials = [50]*len(stimulus_intensities)
data = zip(stimulus_intensities,number_of_correct,number_of_trials)
self.mcmc = pd.BayesInference ( data, priors=("Gauss(0,100)","Gamma(1.01,200)","Beta(2,30)") )
def test_all ( self ):
mapest = self.mcmc.mapestimate
meanest = self.mcmc.estimate
map_target = [ 2.73973931, 6.15554732, 0.02034599]
mean_target =[ 2.64938, 6.44707, 0.027297]
steps_made = self.mcmc._steps
steps = [ 0.726551, 2.45564, 0.013264]
burnin = 0
thinning = 1
nsamples = 600
for k in xrange ( 3 ):
self.assertEqual ( approximatedly_equal ( mapest[k], map_target[k] ), 0 )
self.assertEqual ( approximatedly_equal ( meanest[k], mean_target[k] ), 0 )
self.assertEqual ( approximatedly_equal ( steps_made[k], steps[k] ), 0 )
self.assertEqual ( approximatedly_equal ( self.mcmc.bayesian_p(),0.126667 ), 0 )
self.assertEqual ( burnin, self.mcmc.burnin )
self.assertEqual ( thinning, self.mcmc.thin )
self.assertEqual ( nsamples, self.mcmc.nsamples )
target_rpd = -0.0244598
target_rkd = -0.362064
self.assertEqual ( approximatedly_equal ( self.mcmc.Rpd, target_rpd ), 0 )
self.assertEqual ( approximatedly_equal ( self.mcmc.Rkd, target_rkd ), 0 )
target_dr = (1.64122, -0.675137, -0.709666, 0.925372, 2.00248, -0.376286)
target_thres = [ 1.03761, 2.64938, 4.26115]
target_deviance = 8.66087
for dr,tdr in zip ( self.mcmc.devianceresiduals, target_dr ):
self.assertEqual ( approximatedly_equal ( dr, tdr ), 0 )
for th,tth in zip ( self.mcmc.thres, target_thres ):
self.assertEqual ( approximatedly_equal ( th, tth ), 0 )
self.assertEqual ( approximatedly_equal ( self.mcmc.deviance, target_deviance ), 0 )
# Randomly check single samples
target_mcRpd = [ -0.162011225773 , -0.658780099748 , 0.142264200236 ]
target_mcRkd = [ -0.519220509587 , -0.969883465483 , -0.199933951214 ]
target_mcthres = [ [ 1.20085248224 , 2.73973931207 , 4.27862614189 ], [ 2.87682033438 , 3.73674348349 , 4.5966666326 ], [ 0.70405560915 , 2.68052230561 , 4.65698900207 ] ]
indices = [10,50,100]
for k in xrange ( 3 ):
self.assertEqual ( approximatedly_equal ( self.mcmc.mcRpd[indices[k]], target_mcRpd[k] ), 0 )
self.assertEqual ( approximatedly_equal ( self.mcmc.mcRkd[indices[k]], target_mcRkd[k] ), 0 )
for l in xrange ( 3 ):
self.assertEqual ( approximatedly_equal ( self.mcmc.mcthres[indices[k]][l], target_mcthres[k][l] ), 0 )
def test_keywordhandling ( self ):
self.assertRaises ( ValueError, pd.BayesInference, self.mcmc.data, shape="logistic" )
def test_pickling ( self ):
import pickle
string = pickle.dumps ( self.mcmc )
unpickled_mcmc = pickle.loads ( string )
self.mcmc = unpickled_mcmc
class Testcheck_kwargs ( ut.TestCase ):
def test_checking ( self ):
self.assertRaises ( ValueError, pd.check_kwargs, {"test": 1}, "Some text" )
docstr = """:Parameters:
*test* :
useless documentation
*anotherprm* :
dummy parameter
*prm0* :
should work with numbers, too
*prmCamelCase* :
should work with capitals, too
*prm_with_underscores* :
and should work with underscores
*prmtype* : float
and should work with type specifications
"""
self.assertEqual ( 0, pd.check_kwargs ( {"test": 1}, docstr ) )
self.assertEqual ( "notavailable", pd.check_kwargs ( {"notavailable": 1}, docstr ) )
self.assertEqual ( 0, pd.check_kwargs ( {"prm0": 1}, docstr ) )
self.assertEqual ( 0, pd.check_kwargs ( {"prmCamelCase": 1}, docstr ) )
self.assertEqual ( 0, pd.check_kwargs ( {"prm_with_underscores": 1}, docstr ) )
self.assertEqual ( 0, pd.check_kwargs ( {"prmtype": 1}, docstr ) )
self.assertEqual ( "notin1", pd.check_kwargs ( {"notin1": 1, "test": 1}, docstr ) )
self.assertEqual ( "notin1", pd.check_kwargs ( {"test": 1, "notin1": 1}, docstr ) )
self.assertEqual ( "notin1", pd.check_kwargs ( {"test": 1, "notin1": 1, "notin2": 1}, docstr ) )
if __name__ == "__main__":
ut.main()
# suite = ut.TestLoader().loadTestsFromTestCase(TestBayesInference)
# ut.TextTestRunner ().run(suite)
|
{
"content_hash": "3c43f11812ca06416f603250b66a287c",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 179,
"avg_line_length": 49.1968085106383,
"alnum_prop": 0.5966050383825279,
"repo_name": "esc/Psignifit-3.x",
"id": "6e2893a951a885b66365248411ffa987dbc2803b",
"size": "9272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/psignidatatest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1835"
},
{
"name": "C++",
"bytes": "385427"
},
{
"name": "Inno Setup",
"bytes": "7857"
},
{
"name": "Makefile",
"bytes": "14574"
},
{
"name": "Matlab",
"bytes": "46120"
},
{
"name": "Python",
"bytes": "408054"
},
{
"name": "R",
"bytes": "42383"
},
{
"name": "Shell",
"bytes": "787"
},
{
"name": "TeX",
"bytes": "24029"
}
],
"symlink_target": ""
}
|
"""Auto-generated file, do not edit by hand. CI metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CI = PhoneMetadata(id='CI', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[14]\\d{2,3}', possible_length=(3, 4)),
toll_free=PhoneNumberDesc(national_number_pattern='1(?:1[01]|[78]0)', example_number='110', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:1[01]|[78]0)', example_number='110', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1[01]|[78]0)|4443', example_number='110', possible_length=(3, 4)),
carrier_specific=PhoneNumberDesc(national_number_pattern='444\\d', example_number='4440', possible_length=(4,)),
sms_services=PhoneNumberDesc(national_number_pattern='444\\d', example_number='4440', possible_length=(4,)),
short_data=True)
|
{
"content_hash": "0c32e6881ee2b640ef66e1e8586f2b98",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 126,
"avg_line_length": 85.18181818181819,
"alnum_prop": 0.7246531483457844,
"repo_name": "daviddrysdale/python-phonenumbers",
"id": "996c27b2ad384d7158e895b32964c8f90f833aad",
"size": "937",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "python/phonenumbers/shortdata/region_CI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3898"
},
{
"name": "Makefile",
"bytes": "9034"
},
{
"name": "Python",
"bytes": "22052087"
},
{
"name": "Ruby",
"bytes": "237"
}
],
"symlink_target": ""
}
|
import os
import shutil
from plenum.cli.constants import NO_ENV
from plenum.common.util import createDirIfNotExists
from sovrin_client.client.wallet.wallet import Wallet
def testRestoreWalletFromMinimalGoLive(aliceCLI):
fileName = "wallet_from_minimal_go_live"
curPath = os.path.dirname(os.path.realpath(__file__))
walletFilePath = os.path.join(curPath, fileName)
noEnvKeyringsDir = os.path.join(aliceCLI.getWalletsBaseDir(), NO_ENV)
createDirIfNotExists(noEnvKeyringsDir)
shutil.copy2(walletFilePath, noEnvKeyringsDir)
targetWalletFilePath = os.path.join(noEnvKeyringsDir, fileName)
restored = aliceCLI.restoreWalletByPath(targetWalletFilePath)
assert restored and isinstance(aliceCLI.activeWallet, Wallet)
|
{
"content_hash": "8239d5cb9efe04aeb720779a61de0802",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 41.5,
"alnum_prop": 0.7938420348058902,
"repo_name": "keenondrums/sovrin-node",
"id": "203c3486f2f2cd5069aa1a8dbf5221b6c96de440",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sovrin_client/test/cli/test_restore_wallet_from_mgl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "1088655"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "15720"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from mparts.manager import Task
from mparts.host import HostInfo, SourceFileProvider, STDERR
from mparts.util import Progress
from support import ResultsProvider, SetCPUs, IXGBE, FileSystem, \
ExplicitSystemMonitor
import postgres
import os, time, signal, re, math
__all__ = []
# XXX This warmup sometimes isn't enough. Watch the log for when it's
# ready, then warmup.
WARMUP = 5
DURATION = 15
__all__.append("PostgresLoad")
class PostgresLoad(Task, ResultsProvider, SourceFileProvider,
postgres.PGOptsProvider):
__info__ = ["host", "trial", "clients", "rows", "partitions",
"batchSize", "randomWritePct", "*sysmonOut"]
def __init__(self, host, trial, pg, cores, clients, rows, partitions,
batchSize, randomWritePct, sysmon):
Task.__init__(self, host = host, trial = trial)
ResultsProvider.__init__(self, cores)
# XXX Use this elsewhere
self.setInfoAttrs(PostgresLoad, locals())
self.pg = pg
self.sysmon = sysmon
self.__dbname = "pg%d-%d" % (self.rows, self.partitions)
self.__pgload = self.queueSrcFile(host, "pgload")
self.queueSrcFile(host, "libload")
if trial != 0:
# Only start one PostgresLoad object per data point
self.start = None
def getPGOpts(self, pg):
return {"listen_addresses": "*"}
def __cmd(self, *args):
return [os.path.join(self.__pgload, "pgload"),
"-p", self.pg.conninfo(self.host,
dbname = self.__dbname)] + list(args)
def start(self):
# Build (We rely on the host's libpq here! Without copying
# all of Postgres over and building it, there's no way to
# reliably get a proper libpq. We can't even statically link
# pgload locally and send that over because the glibc name
# system broke static linking. Besides, we're not picky about
# our libpq.)
self.host.r.run(["make", "-C", self.__pgload],
stdout = STDERR)
# Construct table names
if self.partitions == 0:
tables = ["simplebench"]
else:
tables = ["simplebench%d" % n for n in range(self.partitions)]
# Create database
if not self.pg.dbExists(self.__dbname):
self.pg.createDB(self.__dbname)
# Populate database
if not self.pg.tableExists(tables[0], self.__dbname):
self.host.r.run(
self.__cmd("create", "--rows=%d" % self.rows,
"--partitions=%d" % self.partitions),
stdout = STDERR)
# Prefetch
with Progress("Prefetching tables"):
for t in tables:
self.pg.psql("select * from " + t, dbname = self.__dbname,
discard = True)
self.pg.psql("vacuum " + t, dbname = self.__dbname,
discard = True)
def wait(self):
cmd = self.__cmd("bench")
for arg in ["rows", "partitions", "clients", "batchSize",
"randomWritePct"]:
cmd.extend(["--" + arg.lower(), str(getattr(self, arg))])
# Run
logPath = self.host.getLogPath(self)
l = self.host.r.run(cmd, stdout = logPath, wait = False)
# Wait for warmup duration
time.sleep(WARMUP)
# Start monitoring
l.kill(signal.SIGUSR1)
self.sysmon.startMonitor()
# Check that pgload hasn't died on us, rather than find out at
# the end of the run
l.wait(poll = True)
# Wait for run duration
time.sleep(DURATION)
# Stop monitoring
l.kill(signal.SIGUSR2)
self.sysmonOut = self.sysmon.stopMonitor()
# Cleanup pgload
time.sleep(1)
l.kill(signal.SIGINT)
l.wait()
# Get result
log = self.host.r.readFile(logPath)
ms = re.findall("(?m)^\[SIG\] ([0-9]+) total queries", log)
if len(ms) != 1:
raise RuntimeError("Expected 1 query count in log, got %d",
len(ms))
self.setResults(int(ms[-1]), "query", "queries",
self.sysmonOut["time.real"])
def getBuild(cfg):
if cfg.sleep not in ["sysv", "posix"]:
raise ValueError(
"Postgres sleep mode must be sysv or posix, got %r" % cfg.sleep)
build = "pg-%s" % cfg.sleep
if cfg.lwScale:
build += "-lwscale"
if cfg.lockScale:
if not cfg.lwScale:
raise ValueError("lockscale requires lwscale")
build += "-lockscale"
return build
class PostgresRunner(object):
def __str__(self):
return "postgres"
@staticmethod
def run(m, cfg):
host = cfg.primaryHost
loadgen = cfg.postgresClient
m += host
m += loadgen
m += HostInfo(host)
# Creating the db takes time, so we don't clean the file
# system. We avoid any cruft that may be there already by
# putting the DB in a subdirectory.
fs = FileSystem(host, cfg.fs, clean = False)
m += fs
dbdir = fs.path + "0/postgres"
pgPath = os.path.join(cfg.benchRoot, "postgres")
pgBuild = getBuild(cfg)
pgOpts = {"shared_buffers": postgres.PGVal(cfg.bufferCache, "MB")}
log2NumLockPartitions = int(math.log(cfg.lockPartitions, 2))
if cfg.lockPartitions != 1 << log2NumLockPartitions:
raise ValueError("numLockPartitions must be a power of 2, got %r" %
cfg.numLockPartitions)
pgOpts["log2_num_lock_partitions"] = log2NumLockPartitions
if cfg.sleep == "sysv":
pgOpts["semas_per_set"] = cfg.semasPerSet
pg = postgres.Postgres(host, pgPath, pgBuild, dbdir,
malloc = cfg.malloc, **pgOpts)
m += postgres.InitDB(host, pg).addTrust(loadgen)
m += pg
if cfg.hotplug:
# Because the number of cores and the number of clients is
# the same, we don't strictly need hotplug
m += SetCPUs(host = host, num = cfg.cores)
# XXX Make configurable (at least iface name)
# m += IXGBE(host, "eth0", queues = "n*NCPU/(NRX if rx else NTX)")
# The ixgbe driver assigns flows to queues sequentially.
# Since we only have cfg.cores flows, make sure a sequential
# assignment spans all the online cores. However, this does
# not spread things out if we have more queues than cores.
m += IXGBE(host, "eth0", queues = "n%min(NCPU, NRX if rx else NTX)")
sysmon = ExplicitSystemMonitor(host)
m += sysmon
for trial in range(cfg.trials):
m += PostgresLoad(loadgen, trial, pg, cfg.cores, cfg.cores,
cfg.rows, cfg.partitions, cfg.batchSize,
cfg.randomWritePct, sysmon)
m.run()
__all__.append("runner")
runner = PostgresRunner()
|
{
"content_hash": "26ebdea17be81f337fc7bfc97f46dabc",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 79,
"avg_line_length": 36.37948717948718,
"alnum_prop": 0.5661122074992951,
"repo_name": "KMU-embedded/mosbench-ext",
"id": "90e7a6b78d95c211df1e59528cc7d614baacc90c",
"size": "7094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "postgres/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "8491"
},
{
"name": "Awk",
"bytes": "45243"
},
{
"name": "Batchfile",
"bytes": "15130"
},
{
"name": "C",
"bytes": "38923116"
},
{
"name": "C++",
"bytes": "644544"
},
{
"name": "CSS",
"bytes": "38896"
},
{
"name": "DTrace",
"bytes": "12271"
},
{
"name": "Erlang",
"bytes": "312670"
},
{
"name": "Frege",
"bytes": "146785"
},
{
"name": "Groff",
"bytes": "255736"
},
{
"name": "HTML",
"bytes": "1026176"
},
{
"name": "Lex",
"bytes": "149807"
},
{
"name": "Makefile",
"bytes": "368369"
},
{
"name": "Objective-C",
"bytes": "20461"
},
{
"name": "PLpgSQL",
"bytes": "808278"
},
{
"name": "Perl",
"bytes": "336526"
},
{
"name": "Perl6",
"bytes": "11115"
},
{
"name": "Prolog",
"bytes": "11284"
},
{
"name": "Python",
"bytes": "198848"
},
{
"name": "SQLPL",
"bytes": "105796"
},
{
"name": "Shell",
"bytes": "982753"
},
{
"name": "SourcePawn",
"bytes": "6894"
},
{
"name": "TeX",
"bytes": "2582"
},
{
"name": "XS",
"bytes": "4040"
},
{
"name": "XSLT",
"bytes": "10992"
},
{
"name": "Yacc",
"bytes": "569728"
}
],
"symlink_target": ""
}
|
from zope.interface import Interface, Attribute
class IPlugin(Interface):
"""Base interface for all plugins and all plugin interfaces. Classes that do
not implement ``IPlugin`` can not be used as plugin objects.
"""
name = Attribute("Unique plugin object name")
class IStorable(IPlugin):
"""Interface for all plugins containing data that has to be stored and
fetched when plugin is being loaded
"""
def dump():
"""Returns string that will be stored"""
def load(data):
"""Called with data - string returned by last `dump` call"""
class IInitialize(IPlugin):
"""Initialize plugin when loading"""
def initialize():
"""Method called after loading the plugin"""
class IFinalize(IPlugin):
"""Finalize plugin when unloading"""
def finalize():
"""Method called just before the plugin unload"""
class ICustomChannelsHandler(IPlugin):
"""Handle singals for choosen channnels"""
def accepts_channel(channel):
"""Returns ``True`` if plugin is allowed to handle events for given
channel. Returns ``False`` elsewhere.
"""
class IPeriodic(IPlugin):
"""Call periodic action handler"""
sleep_time = Attribute("Time between calling `periodic_handler`")
def periodic_handler(protocols):
"""Callback handler for periodic actions"""
class IActionHandler(IPlugin):
"""Handle choosen IRC message"""
def accepts_action(action):
"""Returns ``True`` if plugins is allowed to handle given action,
returns ``False`` elsewhere.
"""
def handle_action(protocol, action, user, message):
"""Handle action"""
class ILineReceiver(IPlugin):
"""Plugin that handles all messages sended by server"""
def handle_line(protocol, line):
"""Handle line reviced by given protocol instance"""
|
{
"content_hash": "6041c11827e42dd4eacf10ee0c12406e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 80,
"avg_line_length": 25.2972972972973,
"alnum_prop": 0.6629273504273504,
"repo_name": "gflerm/python-irc-bot",
"id": "bd3080c80ce91bf87636ebedccddb450bce9ad68",
"size": "1916",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/core/plugins/interface.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
<Program Name>
test_secondary.py
<Purpose>
Unit testing for uptane/clients/secondary.py
Much of this is modified from test_primary.py.
<Copyright>
See LICENSE for licensing information.
"""
from __future__ import unicode_literals
import uptane # Import before TUF modules; may change tuf.conf values.
import unittest
import os.path
import time
import shutil
import hashlib
import iso8601
from six.moves.urllib.error import URLError
import tuf
import tuf.formats
import tuf.conf
import tuf.client.updater
import uptane.formats
import uptane.clients.secondary as secondary
import uptane.common # verify sigs, create client dir structure, convert key
import uptane.encoding.asn1_codec as asn1_codec
from uptane.encoding.asn1_codec import DATATYPE_TIME_ATTESTATION
from uptane.encoding.asn1_codec import DATATYPE_ECU_MANIFEST
# For temporary convenience:
import demo # for generate_key, import_public_key, import_private_key
SAMPLES_DIR = os.path.join(uptane.WORKING_DIR, 'samples')
TEST_DIRECTOR_ROOT_FNAME = os.path.join(
SAMPLES_DIR, 'metadata_samples_long_expiry', 'initial_w_no_update',
'full_metadata_archive', 'director', 'metadata',
'root.' + tuf.conf.METADATA_FORMAT)
TEST_IMAGE_REPO_ROOT_FNAME = os.path.join(
SAMPLES_DIR, 'metadata_samples_long_expiry', 'initial_w_no_update',
'full_metadata_archive', 'imagerepo', 'metadata',
'root.' + tuf.conf.METADATA_FORMAT)
TEST_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'tests', 'test_data')
TEST_PINNING_FNAME = os.path.join(TEST_DATA_DIR, 'pinned.json')
TEMP_CLIENT_DIRS = [
os.path.join(TEST_DATA_DIR, 'temp_test_secondary0'),
os.path.join(TEST_DATA_DIR, 'temp_test_secondary1'),
os.path.join(TEST_DATA_DIR, 'temp_test_secondary2')]
# I'll initialize these in the __init__ test, and use this for the simple
# non-damaging tests so as to avoid creating objects all over again.
secondary_instances = [None, None, None]
# Changing these values would require producing new signed test data from the
# Timeserver (in the case of nonce) or a Secondary (in the case of the others).
nonce = 5
vins = ['democar', 'democar', '000']
ecu_serials = ['TCUdemocar', '00000', '00000']
# Set starting firmware fileinfo (that this ECU had coming from the factory)
# It will serve as the initial firmware state for the Secondary clients.
factory_firmware_fileinfo = {
'filepath': '/secondary_firmware.txt',
'fileinfo': {
'hashes': {
'sha512': '706c283972c5ae69864b199e1cdd9b4b8babc14f5a454d0fd4d3b35396a04ca0b40af731671b74020a738b5108a78deb032332c36d6ae9f31fae2f8a70f7e1ce',
'sha256': '6b9f987226610bfed08b824c93bf8b2f59521fce9a2adef80c495f363c1c9c44'},
'length': 37}}
expected_updated_fileinfo = {
'filepath': '/TCU1.1.txt',
'fileinfo': {
'custom': {'ecu_serial': 'TCUdemocar'},
'hashes': {
'sha512': '94d7419b8606103f363aa17feb875575a978df8e88038ea284ff88d90e534eaa7218040384b19992cc7866f5eca803e1654c9ccdf3b250d6198b3c4731216db4',
'sha256': '56d7cd56a85e34e40d005e1f79c0e95d6937d5528ac0b301dbe68d57e03a5c21'},
'length': 17}}
def destroy_temp_dir():
# Clean up anything that may currently exist in the temp test directories.
for client_dir in TEMP_CLIENT_DIRS:
if os.path.exists(client_dir):
shutil.rmtree(client_dir)
class TestSecondary(unittest.TestCase):
"""
"unittest"-style test class for the Secondary module in the reference
implementation
Note that these tests are NOT entirely independent of each other.
Several of them build on the results of previous tests. This is an unusual
pattern but saves code and works at least for now.
"""
# Class variables
secondary_ecu_key = None
key_timeserver_pub = None
key_timeserver_pri = None
key_directortargets_pub = None
initial_time = None
@classmethod
def setUpClass(cls):
"""
This is run once for the full class (and so the full module, which contains
only one class), before all tests. It prepares some variables and stores
them in the class.
"""
destroy_temp_dir()
# Load the private key for this Secondary ECU.
cls.secondary_ecu_key = uptane.common.canonical_key_from_pub_and_pri(
demo.import_public_key('secondary'),
demo.import_private_key('secondary'))
# Load the public timeserver key.
cls.key_timeserver_pub = demo.import_public_key('timeserver')
cls.key_timeserver_pri = demo.import_private_key('timeserver')
# Load the public director key.
cls.key_directortargets_pub = demo.import_public_key('director')
# Generate a trusted initial time for the Secondaries.
cls.initial_time = tuf.formats.unix_timestamp_to_datetime(
int(time.time())).isoformat() + 'Z'
tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(cls.initial_time)
# Set up client directories for the two Secondaries, containing the
# initial root.json and root.der (both, for good measure) metadata files
# so that the clients can validate further metadata they obtain.
# NOTE that running multiple clients in the same Python process does not
# work normally in the reference implementation, as the value of
# tuf.conf.repository_directories is client-specific, and it is set during
# uptane.common.create_directory_structure_for_client, and used when a
# client is created (initialization of a Secondary in our case)
# We're going to cheat in this test module for the purpose of testing
# and update tuf.conf.repository_directories before each Secondary is
# created, to refer to the client we're creating.
for client_dir in TEMP_CLIENT_DIRS:
uptane.common.create_directory_structure_for_client(
client_dir,
TEST_PINNING_FNAME,
{'imagerepo': TEST_IMAGE_REPO_ROOT_FNAME,
'director': TEST_DIRECTOR_ROOT_FNAME})
@classmethod
def tearDownClass(cls):
"""This is run once for the full class (and so the full module, which
contains only one class), after all tests."""
destroy_temp_dir()
def test_01_init(self):
"""
Tests uptane.clients.secondary.Secondary::__init__()
Note that this doesn't test the root files provided to the constructor, as
those aren't used at all in the initialization; those will be tested by
attempting an update in the test for process_metadata below.
"""
# TODO: Test with invalid pinning file
# TODO: Test with pinning file lacking a Director repo.
# Now try creating a Secondary with a series of bad arguments, expecting
# errors.
# Invalid full_client_dir
with self.assertRaises(tuf.FormatError):
secondary.Secondary(
full_client_dir=42,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=False)
# TODO: Test providing a nonexistent directory for full_client_dir
# TODO: Test providing the wrong directory for full_client_dir.
# Both of these tests may require saving additional clients and
# running the later tests with them.
# Invalid director_repo_name
with self.assertRaises(tuf.FormatError):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=42,
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=False)
# Unknown director_repo_name
with self.assertRaises(uptane.Error):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name='string_that_is_not_a_known_repo_name',
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=False)
# Invalid VIN:
with self.assertRaises(tuf.FormatError):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=5,
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=False)
# Invalid ECU Serial
with self.assertRaises(tuf.FormatError):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vins[0],
ecu_serial=500,
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=False)
# Invalid ECU Key
with self.assertRaises(tuf.FormatError):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key={''},
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=False)
# Invalid initial time:
with self.assertRaises(tuf.FormatError):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time='potato',
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=TestSecondary.key_directortargets_pub,
partial_verifying=False)
# Invalid director_public_key:
with self.assertRaises(tuf.FormatError):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key={''},
partial_verifying=False)
# Inconsistent arguments, partial_verifying and director_public_key.
# partial verification requires a director_public_key argument, as it does
# not use the normal trust chain. Providing a director_public_key when not
# performing partial verification makes no sense, as the keys to be used
# for full verification are determined based on the root metadata file.
with self.assertRaises(uptane.Error):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=TestSecondary.key_directortargets_pub,
partial_verifying=False)
with self.assertRaises(uptane.Error):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=True)
# Invalid timeserver key
with self.assertRaises(tuf.FormatError):
secondary.Secondary(
full_client_dir=TEMP_CLIENT_DIRS[0],
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vins[0],
ecu_serial=ecu_serials[0],
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.initial_time, # INVALID
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=False)
# Try initializing three Secondaries, expecting the three calls to work.
# Save the instances for future tests as class variables to save time and
# code.
# Recall that, as mentioned in a comment in the SetUpClass method, running
# multiple reference implementation updater clients simultaneously in the
# same Python process is not supported, and we're going to engage in the
# hack of swapping tuf.conf.repository_directories back and forth to make
# it work for these tests.
# Initialize three clients and perform checks on each of them.
for i in range(0, len(TEMP_CLIENT_DIRS)):
client_dir = TEMP_CLIENT_DIRS[i]
ecu_serial = ecu_serials[i]
vin = vins[i]
# Try initializing each of three secondaries, expecting these calls to
# work. Save the instances for future tests as elements in a module list
# variable(secondary_instances) to save time and code.
tuf.conf.repository_directory = client_dir
secondary_instances[i] = secondary.Secondary(
full_client_dir=client_dir,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=vin,
ecu_serial=ecu_serial,
ecu_key=TestSecondary.secondary_ecu_key,
time=TestSecondary.initial_time,
timeserver_public_key=TestSecondary.key_timeserver_pub,
firmware_fileinfo=factory_firmware_fileinfo,
director_public_key=None,
partial_verifying=False)
instance = secondary_instances[i]
# Check the fields initialized in the instance to make sure they're correct.
# Fields initialized from parameters
self.assertEqual(client_dir, instance.full_client_dir)
self.assertEqual(demo.DIRECTOR_REPO_NAME, instance.director_repo_name)
self.assertEqual(vin, instance.vin)
self.assertEqual(ecu_serial, instance.ecu_serial)
self.assertEqual(TestSecondary.secondary_ecu_key, instance.ecu_key)
self.assertEqual(
TestSecondary.initial_time, instance.all_valid_timeserver_times[0])
self.assertEqual(
TestSecondary.initial_time, instance.all_valid_timeserver_times[1])
self.assertEqual(
TestSecondary.key_timeserver_pub, instance.timeserver_public_key)
self.assertTrue(None is instance.director_public_key)
self.assertFalse(instance.partial_verifying)
# Fields initialized, but not directly with parameters
self.assertTrue(None is instance.last_nonce_sent)
self.assertTrue(instance.nonce_next) # Random value
self.assertIsInstance(
instance.updater, tuf.client.updater.Updater)
# Now, fix the updater's pinned metadata, since the pinned metadata we
# fed in was actually for the Primary (which connects to central
# services) instead of for the Secondary (which obtains metadata and
# images via TUF from an unverified local directory, then validates
# them). Do this for both clients.
# The location of the files will be as follows, after the sample
# metadata archive is expanded (in test 40 below):
# TODO: Determine if this code should be adjusted to use os.path.join(),
# or if that's not appropriate for file:// links.
image_repo_mirror = ['file://' + client_dir + '/unverified/imagerepo']
director_mirror = ['file://' + client_dir + '/unverified/director']
if vin == '000':
# Simulate unavailable Director repo for the third Secondary
director_mirror[0] += '/nonexistent_directory'
repository_urls = instance.updater.pinned_metadata['repositories']
repository_urls['imagerepo']['mirrors'] = image_repo_mirror
repository_urls['director']['mirrors'] = director_mirror
# Also fix the copied pinned metadata in the individual repo updaters
# in the updater.
instance.updater.repositories['imagerepo'].mirrors = image_repo_mirror
instance.updater.repositories['director'].mirrors = director_mirror
def test_10_nonce_rotation(self):
"""
Tests two uptane.clients.secondary.Secondary methods:
- change_nonce()
- set_nonce_as_sent()
"""
# We'll just test one of the three client instances, since it shouldn't
# make a difference.
instance = secondary_instances[0]
old_nonce = instance.nonce_next
instance.change_nonce()
# Collision is unlikely in the next line (new random nonce equal to
# previous).
self.assertNotEqual(old_nonce, instance.nonce_next)
instance.set_nonce_as_sent()
self.assertEqual(instance.last_nonce_sent, instance.nonce_next)
def test_20_update_time(self):
"""
Tests uptane.clients.secondary.Secondary::update_time()
"""
# We'll just test one of the three client instances, since it shouldn't
# make a difference.
instance = secondary_instances[0]
# Try a good time attestation first, signed by an expected timeserver key,
# with an expected nonce (previously "received" from a Secondary)
original_time_attestation = time_attestation = {
'signed': {'nonces': [nonce], 'time': '2016-11-02T21:06:05Z'},
'signatures': [{
'method': 'ed25519',
'sig': 'aabffcebaa57f1d6397bdc5647764261fd23516d2996446c3c40b3f30efb2a4a8d80cd2c21a453e78bf99dafb9d0f5e56c4e072db365499fa5f2f304afec100e',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
# Make sure that the Secondary thinks that it sent the nonce listed in the
# sample data above.
instance.last_nonce_sent = nonce
if tuf.conf.METADATA_FORMAT == 'der':
# Convert this time attestation to the expected ASN.1/DER format.
time_attestation = asn1_codec.convert_signed_metadata_to_der(
original_time_attestation, DATATYPE_TIME_ATTESTATION,
private_key=TestSecondary.key_timeserver_pri, resign=True)
# Check expected base conditions before updating time:
# The only timeserver times registered should be two "now"s added during
# initialization. Because the clock override is a module variable in TUF,
# its value (whether None or already set) depends on whether or not other
# tests resulting in time attestation verification have occurred (e.g.
# those for the Primary).
self.assertEqual(2, len(instance.all_valid_timeserver_times))
# If the time_attestation is not deemed valid, an exception will be raised.
instance.update_time(time_attestation)
# Check results.
self.assertEqual(3, len(instance.all_valid_timeserver_times))
# self.assertIsNotNone(tuf.conf.CLOCK_OVERRIDE)
self.assertEqual(
int(tuf.formats.datetime_to_unix_timestamp(iso8601.parse_date(
'2016-11-02T21:06:05Z'))), tuf.conf.CLOCK_OVERRIDE)
# Prepare to try again with a bad signature.
# This test we will conduct differently depending on TUF's current format:
if tuf.conf.METADATA_FORMAT == 'der':
# Fail to re-sign the DER, so that the signature is over JSON instead,
# which results in a bad signature.
time_attestation__badsig = asn1_codec.convert_signed_metadata_to_der(
original_time_attestation, DATATYPE_TIME_ATTESTATION, resign=False)
else: # 'json' format
# Rewrite the first 9 digits of the signature ('sig') to something
# invalid.
time_attestation__badsig = {
'signed': {'nonces': [nonce], 'time': '2016-11-02T21:06:05Z'},
'signatures': [{
'method': 'ed25519',
'sig': '987654321a57f1d6397bdc5647764261fd23516d2996446c3c40b3f30efb2a4a8d80cd2c21a453e78bf99dafb9d0f5e56c4e072db365499fa5f2f304afec100e',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
# Now actually perform the bad signature test.
with self.assertRaises(tuf.BadSignatureError):
instance.update_time(time_attestation__badsig)
# Check results. The bad attestation should change none of these.
self.assertEqual(3, len(instance.all_valid_timeserver_times))
# self.assertIsNotNone(tuf.conf.CLOCK_OVERRIDE)
self.assertEqual(
int(tuf.formats.datetime_to_unix_timestamp(iso8601.parse_date(
'2016-11-02T21:06:05Z'))), tuf.conf.CLOCK_OVERRIDE)
self.assertNotEqual(500, nonce, msg='Programming error: bad and good '
'test nonces are equal.')
time_attestation__wrongnonce = {
'signed': {'nonces': [500], 'time': '2016-11-02T21:15:00Z'},
'signatures': [{
'method': 'ed25519',
'sig': '4d01df35ca829fd7ead1408c250950c444db8ac51fa929a7f0288578fbf81016f0e81ed35789689481aee6b7af28ab311306397ef38572732854fb6cf2072604',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
if tuf.conf.METADATA_FORMAT == 'der':
# Convert this time attestation to the expected ASN.1/DER format.
time_attestation__wrongnonce = asn1_codec.convert_signed_metadata_to_der(
time_attestation__wrongnonce, DATATYPE_TIME_ATTESTATION,
private_key=TestSecondary.key_timeserver_pri, resign=True)
with self.assertRaises(uptane.BadTimeAttestation):
instance.update_time(time_attestation__wrongnonce)
# TODO: Consider other tests here.
def test_25_generate_signed_ecu_manifest(self):
"""
Tests uptane.clients.secondary.Secondary::generate_signed_ecu_manifest()
"""
# We'll just test one of the three client instances, since it shouldn't
# make a difference.
ecu_manifest = secondary_instances[0].generate_signed_ecu_manifest()
# If the ECU Manifest is in DER format, check its format and then
# convert back to JSON so that we can inspect it further.
if tuf.conf.METADATA_FORMAT == 'der':
uptane.formats.DER_DATA_SCHEMA.check_match(ecu_manifest)
ecu_manifest = asn1_codec.convert_signed_der_to_dersigned_json(
ecu_manifest, DATATYPE_ECU_MANIFEST)
# Now it's not in DER format, whether or not it started that way.
# Check its format and inspect it.
uptane.formats.SIGNABLE_ECU_VERSION_MANIFEST_SCHEMA.check_match(
ecu_manifest)
# Test contents of the ECU Manifest.
# Make sure there is exactly one signature. (Not specified by the
# Implementation Specification, but the way we do it. Using more is
# unlikely to be particularly useful).
self.assertEqual(1, len(ecu_manifest['signatures']))
# TODO: Check some values from the ECU Manifest
# Check the signature on the ECU Manifest.
self.assertTrue(uptane.common.verify_signature_over_metadata(
TestSecondary.secondary_ecu_key,
ecu_manifest['signatures'][0], # TODO: Deal with 1-sig assumption?
ecu_manifest['signed'],
DATATYPE_ECU_MANIFEST))
def test_40_process_metadata(self):
"""
Tests uptane.clients.secondary.Secondary::process_metadata()
Tests three clients:
- secondary_instances[0]: an update is provided in Director metadata
- secondary_instances[1]: no update is provided in Director metadata
- secondary_instances[2]: no Director metadata can be retrieved
"""
# --- Test this test module's setup (defensive)
# Check that the clients' metadata directories have the right
# properties -- that the correct root metadata file was transferred to the
# client directories when the directories were created by the
# create_directory_structure_for_client() calls in setUpClass above, and
# only the root metadata file.
for client_dir in TEMP_CLIENT_DIRS:
for repo in ['director', 'imagerepo']:
self.assertEqual(
['root.' + tuf.conf.METADATA_FORMAT],
sorted(os.listdir(os.path.join(
client_dir, 'metadata', repo, 'current'))))
# --- Set up this test
# Location of the sample Primary-produced metadata archive
sample_archive_fname = os.path.join(
uptane.WORKING_DIR, 'samples', 'metadata_samples_long_expiry',
'update_to_one_ecu', 'full_metadata_archive.zip')
assert os.path.exists(sample_archive_fname), 'Cannot test ' \
'process_metadata; unable to find expected sample metadata archive' + \
' at ' + repr(sample_archive_fname)
# Continue set-up followed by the test, per client.
for i in range(0, len(TEMP_CLIENT_DIRS)):
client_dir = TEMP_CLIENT_DIRS[i]
instance = secondary_instances[i]
# Make sure TUF uses the right client directory.
# Hack to allow multiple clients to run in the same Python process.
# See comments in SetUpClass() method.
tuf.conf.repository_directory = client_dir
# Location in the client directory to which we'll copy the archive.
archive_fname = os.path.join(client_dir, 'full_metadata_archive.zip')
# Copy the sample archive into place in the client directory.
shutil.copy(sample_archive_fname, archive_fname)
# --- Perform the test
# Process this sample metadata.
if instance is secondary_instances[2]:
# Expect the update to fail for the third Secondary client.
with self.assertRaises(tuf.NoWorkingMirrorError):
instance.process_metadata(archive_fname)
continue
else:
instance.process_metadata(archive_fname)
# Make sure the archive of unverified metadata was expanded
for repo in ['director', 'imagerepo']:
for role in ['root', 'snapshot', 'targets', 'timestamp']:
self.assertTrue(os.path.exists(client_dir + '/unverified/' + repo +
'/metadata/' + role + '.' + tuf.conf.METADATA_FORMAT))
# Verify the results of the test, which are different for the three clients.
# First: Check the top-level metadata files in the client directories.
# For clients 0 and 1, we expect root, snapshot, targets, and timestamp for
# both director and image repo.
for client_dir in [TEMP_CLIENT_DIRS[0], TEMP_CLIENT_DIRS[1]]:
for repo in ['director', 'imagerepo']:
self.assertEqual([
'root.' + tuf.conf.METADATA_FORMAT,
'snapshot.' + tuf.conf.METADATA_FORMAT,
'targets.' + tuf.conf.METADATA_FORMAT,
'timestamp.' + tuf.conf.METADATA_FORMAT],
sorted(os.listdir(os.path.join(client_dir, 'metadata', repo,
'current'))))
# For client 2, we are certain that Director metadata will have failed to
# update. Image Repository metadata may or may not have updated before the
# Director repository update failure, so we don't check that. Client 2
# started with root metadata for the Director repository, so that is all
# we expect to find.
self.assertEqual(
['root.' + tuf.conf.METADATA_FORMAT],
sorted(os.listdir(os.path.join(TEMP_CLIENT_DIRS[2], 'metadata',
'director', 'current'))))
# Second: Check targets each Secondary client has been instructed to
# install (and has in turn validated).
# Client 0 should have validated expected_updated_fileinfo.
self.assertEqual(
expected_updated_fileinfo,
secondary_instances[0].validated_targets_for_this_ecu[0])
# Clients 1 and 2 should have no validated targets.
self.assertFalse(secondary_instances[1].validated_targets_for_this_ecu)
self.assertFalse(secondary_instances[2].validated_targets_for_this_ecu)
# Finally, test behavior if the file we indicate does not exist.
instance = secondary_instances[0]
with self.assertRaises(uptane.Error):
instance.process_metadata('some_file_that_does_not_actually_exist.xyz')
def test_50_validate_image(self):
image_fname = 'TCU1.1.txt'
sample_image_location = os.path.join(demo.DEMO_DIR, 'images')
client_unverified_targets_dir = TEMP_CLIENT_DIRS[0] + '/unverified_targets'
if os.path.exists(client_unverified_targets_dir):
shutil.rmtree(client_unverified_targets_dir)
os.mkdir(client_unverified_targets_dir)
shutil.copy(
os.path.join(sample_image_location, image_fname),
client_unverified_targets_dir)
secondary_instances[0].validate_image(image_fname)
with self.assertRaises(uptane.Error):
secondary_instances[1].validate_image(image_fname)
with self.assertRaises(uptane.Error):
secondary_instances[2].validate_image(image_fname)
# Run unit tests.
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e5aa3016399f500692c5412a39a98bbc",
"timestamp": "",
"source": "github",
"line_count": 757,
"max_line_length": 153,
"avg_line_length": 39.00792602377807,
"alnum_prop": 0.6887466558298622,
"repo_name": "uptane/uptane",
"id": "cdd92bde5686b5921426505304de2df750a74c14",
"size": "29529",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/test_secondary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1398"
},
{
"name": "Python",
"bytes": "335435"
}
],
"symlink_target": ""
}
|
from waterbutler import settings
config = settings.child('GITHUB_PROVIDER_CONFIG')
BASE_URL = config.get('BASE_URL', 'https://api.github.com/')
VIEW_URL = config.get('VIEW_URL', 'https://github.com/')
MOVE_MESSAGE = config.get('MOVE_MESSAGE', 'Moved on behalf of WaterButler')
COPY_MESSAGE = config.get('COPY_MESSAGE', 'Copied on behalf of WaterButler')
DELETE_FILE_MESSAGE = config.get('DELETE_FILE_MESSAGE', 'File deleted on behalf of WaterButler')
UPDATE_FILE_MESSAGE = config.get('UPDATE_FILE_MESSAGE', 'File updated on behalf of WaterButler')
UPLOAD_FILE_MESSAGE = config.get('UPLOAD_FILE_MESSAGE', 'File uploaded on behalf of WaterButler')
DELETE_FOLDER_MESSAGE = config.get('DELETE_FOLDER_MESSAGE', 'Folder deleted on behalf of WaterButler')
# At some point in the near(?) future git will be changing its internal hash function from SHA-1
# to SHA-256. sha1-names are 40 hexdigits long and sha256-names are 64 hexdigits long. At that
# point, it seems probable that GitHub will update its API to accept both sha types. When that
# happens, the following config var will need to be updated to include both sizes.
#
# Example for passing multiple length values via an envvar on the command line:
# $ GITHUB_PROVIDER_GITHUB_SHA_LENGTHS="40 64" invoke server
#
# Example setting in a .docker-compose.env (no quotes):
# GITHUB_PROVIDER_GITHUB_SHA_LENGTHS=40 64
#
GITHUB_SHA_LENGTHS = [int(x) for x in config.get('GITHUB_SHA_LENGTHS', '40').split(' ')]
# Config For GitHub Rate Limiting
#
# The time in seconds to wait before making another attempt to add more tokens
RL_TOKEN_ADD_DELAY = int(config.get('RL_TOKEN_ADD_DELAY', 1))
# The maximum number of available tokens (requests) allowed
RL_MAX_AVAILABLE_TOKENS = float(config.get('RL_MAX_AVAILABLE_TOKENS', 10.0))
# The percentage of remaining requests to be reserved.
RL_RESERVE_RATIO = float(config.get('RL_RESERVE_RATIO', 0.2))
# The base number of requests to be reserved.
RL_RESERVE_BASE = int(config.get('RL_RESERVE_BASE', 100))
# The minimum request rate allowed. Applies when the provider is near the reserve base.
RL_MIN_REQ_RATE = float(config.get('RL_MIN_REQ_RATE', 0.01))
|
{
"content_hash": "3d41a2ace6dd0fecddc362285330862f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 102,
"avg_line_length": 51.26190476190476,
"alnum_prop": 0.7473293079424059,
"repo_name": "felliott/waterbutler",
"id": "54e088c2102254de3f2cd5ec4f8c3f0561ab885e",
"size": "2153",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "waterbutler/providers/github/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "922"
},
{
"name": "Python",
"bytes": "1673806"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from .models import Unidade
from django.urls import reverse
# Create your views here.
def index(request):
unidades = Unidade.objects.all()
return render(request, "list_unidade.html", {"unidades": unidades})
def add(request):
if request.method == 'GET':
return render(request, "add_unidade.html")
unidade = Unidade(unidade=request.POST['unidade'], endereco=request.POST['endereco'])
unidade.save()
return HttpResponseRedirect(reverse('unidade'))
def detail(request, unidade_id):
unidade = get_object_or_404(Unidade, pk=unidade_id)
context = {'unidade': unidade}
return render(request, "detail_unidade.html", context)
|
{
"content_hash": "5a39d8714fcfa9819df7c7c31e3c3b90",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 89,
"avg_line_length": 35.31818181818182,
"alnum_prop": 0.7271557271557272,
"repo_name": "Bleno/sisgestor-django",
"id": "7646087e03a010025d2a8ea07559d09d4461d080",
"size": "777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unidade/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5967"
},
{
"name": "Python",
"bytes": "22758"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from EncryptDecrypt import views
urlpatterns = [
url(r'^$', views.viewlogin, name='index'),
url(r'^Register/', views.viewRegister, name='register'),
url(r'^RegisterAct/', views.register, name='registeract'),
url(r'^LoginAct/', views.login, name='loginact'),
url(r'^inbox/', views.inbox, name='inbox'),
url(r'^Sentmail/', views.viewSentMail, name='sent'),
url(r'^trash/', views.viewTrash, name='trash'),
url(r'^compose/', views.viewCompose, name='compose'),
url(r'^download/([^/]+)/$', views.downloader,name="downs"),
url(r'^sendmail/', views.sendmail,name="composed"),
url(r'^logut/', views.logout, name='logut'),
]
|
{
"content_hash": "99c90cf950f861b3d87d18fa5322d337",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 66,
"avg_line_length": 33.52173913043478,
"alnum_prop": 0.582360570687419,
"repo_name": "Midhus/musicalcrypto",
"id": "081ce32bbae439d13a614b3dfc6a0a71677462f5",
"size": "771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EncryptDecrypt/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10035"
},
{
"name": "HTML",
"bytes": "44422"
},
{
"name": "JavaScript",
"bytes": "14889"
},
{
"name": "Python",
"bytes": "21548"
}
],
"symlink_target": ""
}
|
class Throwing:
def __init__(self, exceptionFactory):
self.exceptionFactory = exceptionFactory
|
{
"content_hash": "0853b60d3a282b903c6d38d7d208ded8",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 48,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.7102803738317757,
"repo_name": "haarcuba/testix",
"id": "4f48dfd65ba9194e391ebdaab0ce93a633e80994",
"size": "107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testix/DSL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46054"
},
{
"name": "Ruby",
"bytes": "2247"
},
{
"name": "Shell",
"bytes": "450"
},
{
"name": "Vim Script",
"bytes": "76189"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
try:
# Python 3
import tkinter as tk
except ImportError:
# Python 2
import Tkinter as tk
from PLOD import plotHandler as ph
from matplotlib import pyplot as plt
import subprocess
import numpy as np
class Control:
def __init__( self, master ):
self.master = master
self.plots = ph.PlotHandler()
# GUI entries
self.master.wm_title("Plot Designer")
self.activePlot = tk.StringVar( self.master )
self.activePlot.set("None")
self.availablePlots = ["None"]
self.availablePlotsMenu = tk.OptionMenu( self.master, self.activePlot, *self.availablePlots, command=self.changePlot )
# Control x-min
self.xminLab = tk.Label( self.master, text="xmin:")
self.xmin = tk.Entry( self.master, width=10 )
self.xmaxLab = tk.Label( self.master, text="xmax:")
self.xmax = tk.Entry( self.master, width=10)
self.yminLab = tk.Label( self.master, text="ymin:")
self.ymin = tk.Entry( self.master, width=10 )
self.ymaxLab = tk.Label( self.master, text="ymax:")
self.ymax = tk.Entry( self.master, width=10 )
# Check button fox x-logscale
self.xlogVar = tk.IntVar()
self.xlog = tk.Checkbutton( self.master, text="Log", variable=self.xlogVar, onvalue=1, offvalue=0)
# Check button for y-scale
self.ylogVar = tk.IntVar()
self.ylog= tk.Checkbutton( self.master, text="Log", variable=self.ylogVar, onvalue=1, offvalue=0)
# Button for replotting
self.replotButton = tk.Button(self.master, text="Replot", command=self.replot)
# Button for saving
self.saveButton = tk.Button(self.master, text="Save", command=self.save)
# Button for saving all plots
self.saveAllButton = tk.Button(self.master, text="Save all", command=self.saveall)
# Button for closing all plots
self.closeAllButton = tk.Button(self.master, text="Close all", command=self.closeall)
# Pack all
self.pack()
# Controls disabled
self.isDisabled = False
def attach( self, fig, ax, name ):
plt.show(block=False)
self.plots.attach(fig, ax,name)
self.availablePlots.append(name)
self.activePlot.set(name)
self.updateEntries(ax)
self.availablePlotsMenu.destroy()
self.availablePlotsMenu = tk.OptionMenu( self.master, self.activePlot, *self.availablePlots, command=self.changePlot )
self.pack()
def updateEntries( self, ax ):
active = self.plots.getActive()
if ( active is None ):
return
xmin, xmax = ax.get_xlim()
self.xmin.delete(0, tk.END )
self.xmax.delete(0, tk.END)
self.xmin.insert(0, xmin)
self.xmax.insert(0,xmax)
ymin, ymax = ax.get_ylim()
self.ymin.delete(0, tk.END )
self.ymax.delete(0, tk.END )
self.ymin.insert(0,ymin)
self.ymax.insert(0,ymax)
# Update checkbuttons
if ( active.ax.get_xscale() == "log" ):
self.xlogVar.set(1)
elif ( active.ax.get_yscale() == "linear" ):
self.xlogVar.set(0)
else:
print ("Warning! Did not recognize scale: %s"%(active.ax.get_xscale()))
if ( active.ax.get_yscale() == "log" ):
self.ylogVar.set(1)
elif ( active.ax.get_yscale() == "linear" ):
self.ylogVar.set(0)
else:
print ("Warning! Did not recognize scale: %s"%(active.ax.get_yscale()))
def updateXmin( self, value ):
self.plots.set_xlim( left=value )
def updateXmax( self, value ):
self.plots.set_xlim(right=value)
def updateYmin( self, value ):
self.plots.set_ylim( bottom=value )
def updateYmax( self, value ):
self.plots.set_ylim( top=value )
def replot( self ):
if ( self.plots.getActive() is None ):
return
# Update xmin
try:
xmin = float(self.xmin.get())
self.updateXmin( xmin )
except Exception as exc:
print (str(exc))
xmin, xmax = self.plots.getActive().ax.get_xlim()
self.xmin.delete(0, tk.END)
self.xmin.insert(0, str(xmin))
# Update xmax
try:
xmax = float( self.xmax.get() )
self.updateXmax(xmax)
except Exception as exc:
print (str(exc))
xmin, xmax = self.plots.getActive().ax.get_xlim()
self.xmax.delete(0, tk.END)
self.xmax.insert(0, str(xmax))
# Update ymin
try:
ymin = float( self.ymin.get() )
self.updateYmin( ymin )
except Exception as exc:
print (str(exc))
ymin, ymax = self.plots.getActive().ax.get_ylim()
self.ymin.delete(0,tk.END)
self.ymin.insert(0, ymin)
# Update ymax
try:
ymax = float( self.ymax.get() )
self.updateYmax( ymax )
except Exception as exc:
print (str(exc))
ymin, ymax = self.plots.getActive().ax.get_ylim()
self.ymax.delete(0,tk.END)
self.ymax.insert(0, ymax)
# Set log scales
active = self.plots.getActive()
if ( self.xlogVar.get() == 1 ):
active.ax.set_xscale( "log" )
else:
active.ax.set_xscale( "linear" )
if ( self.ylogVar.get() == 1 ):
active.ax.set_yscale("log")
else:
active.ax.set_yscale("linear")
plt.show( block=False )
def pack( self ):
self.availablePlotsMenu.grid(row=0)
self.xminLab.grid(row=1, column=0)
self.xmin.grid(row=1,column=1)
self.xmaxLab.grid(row=1,column=2)
self.xmax.grid(row=1,column=3)
self.xlog.grid(row=1,column=4)
self.yminLab.grid(row=2,column=0)
self.ymin.grid(row=2, column=1)
self.ymaxLab.grid(row=2,column=2)
self.ymax.grid(row=2,column=3)
self.ylog.grid(row=2,column=4)
self.replotButton.grid(row=3,column=0)
self.saveButton.grid(row=3,column=1)
self.saveAllButton.grid(row=3,column=2)
self.closeAllButton.grid(row=3,column=3)
def disableControls( self ):
self.replotButton.config(state="disabled")
self.saveButton.config(state="disabled")
self.isDisabled = True
def enableControls( self ):
self.replotButton.config(state="normal")
self.saveButton.config(state="normal")
def changePlot( self, newentry ):
if ( newentry == "None" ):
self.disableControls()
return
if ( self.isDisabled ):
self.enableControls()
self.isDisabled = False
activeAx = self.plots.updateActive( newentry )
if ( not activeAx is None ):
self.updateEntries( activeAx )
def save( self ):
obj = self.plots.getActive()
if ( obj is None ):
return
obj.fig.savefig( obj.name )
print ("Figure written to %s"%(obj.name), end="")
# Export to ps_tex if filename is an svg
if ( obj.name[-3:] == "svg" ):
psname = obj.name[:-3]+"ps"
subprocess.call(["inkscape", "--export-ps=%s"%(psname), "--export-latex", obj.name] )
print (" ...and ps+ps_tex written to %s"%(psname), end="")
print ("")
def saveall( self ):
for i in range(0, len(self.plots.axes)):
self.plots.active = i
self.save()
self.changePlot( self.activePlot.get() )
def closeall(self):
plt.close("all")
self.disableControls()
self.saveAllButton.configure(state="disabled")
self.closeAllButton.configure(state="disabled")
|
{
"content_hash": "40bbc48b69812e6062cc5d9f772b950b",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 126,
"avg_line_length": 33.075949367088604,
"alnum_prop": 0.5718841688990943,
"repo_name": "davidkleiven/PLOD",
"id": "43f400be619d542bce108f831ede7fe2d1af8cb5",
"size": "7839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PLOD/controlGUI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11996"
}
],
"symlink_target": ""
}
|
"""
categories: Modules,random
description: ``getrandbits`` method can only return a maximum of 32 bits at a time.
cause: PRNG's internal state is only 32bits so it can only return a maximum of 32 bits of data at a time.
workaround: If you need a number that has more than 32 bits then utilize the random module from micropython-lib.
"""
import random
x = random.getrandbits(64)
print("{}".format(x))
|
{
"content_hash": "2d4af049b3dacd72a913ed30040298c1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 112,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.75,
"repo_name": "bvernoux/micropython",
"id": "523e3a329d4e4255f5fff188ab545525a6ba92ae",
"size": "404",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tests/cpydiff/modules_random_getrandbits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "50694"
},
{
"name": "C",
"bytes": "19869126"
},
{
"name": "C++",
"bytes": "2489380"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "49218"
},
{
"name": "Objective-C",
"bytes": "8382"
},
{
"name": "Python",
"bytes": "856777"
},
{
"name": "Shell",
"bytes": "6229"
}
],
"symlink_target": ""
}
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
'''
Test.SkipUnless(Condition.PluginExists('cookie_remap.so'))
Test.ContinueOnFail = True
Test.testName = "cookie_remap: Substitute variables"
# Define default ATS
ts = Test.MakeATSProcess("ts")
server = Test.MakeOriginServer("server", ip='127.0.0.10')
request_header = {"headers": "GET /photos/search?query=magic HTTP/1.1\r\nHost: www.example.com\r\n\r\n",
"timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionfile.log", request_header, response_header)
request_header_2 = {"headers": "GET /photos/search?query=/theunmatchedpath HTTP/1.1\r\nHost: www.example.com\r\n\r\n",
"timestamp": "1469733493.993", "body": ""}
response_header_2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionfile.log", request_header_2, response_header_2)
request_header_3 = {"headers": "GET /photos/search/magic/foobar HTTP/1.1\r\nHost: www.example.com\r\n\r\n",
"timestamp": "1469733493.993", "body": ""}
response_header_3 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionfile.log", request_header_3, response_header_3)
# Setup the remap configuration
config_path = os.path.join(Test.TestDirectory, "configs/substituteconfig.txt")
with open(config_path, 'r') as config_file:
config1 = config_file.read()
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*',
})
config1 = config1.replace("$PORT", str(server.Variables.Port))
ts.Disk.File(ts.Variables.CONFIGDIR + "/substituteconfig.txt", exists=False, id="config1")
ts.Disk.config1.WriteOn(config1)
ts.Disk.remap_config.AddLine(
'map http://www.example.com/magic http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/substituteconfig.txt'
)
tr = Test.AddTestRun("Substitute $path in the dest query")
tr.Processes.Default.Command = '''
curl \
--proxy 127.0.0.1:{0} \
"http://www.example.com/magic" \
-H"Cookie: fpbeta=abcd" \
-H "Proxy-Connection: keep-alive" \
--verbose \
'''.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr = Test.AddTestRun("Substitute $unmatched_path in the dest query")
tr.Processes.Default.Command = '''
curl \
--proxy 127.0.0.1:{0} \
"http://www.example.com/magic/theunmatchedpath" \
-H"Cookie: oxalpha=3333" \
-H "Proxy-Connection: keep-alive" \
--verbose \
'''.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr = Test.AddTestRun("Substitute $cr_req_url using $cr_urlencode")
tr.Processes.Default.Command = '''
curl \
--proxy 127.0.0.1:{0} \
"http://www.example.com/magic" \
-H"Cookie: acgamma=dfndfdfd" \
-H "Proxy-Connection: keep-alive" \
--verbose \
'''.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr = Test.AddTestRun("Substitute $path as is in outgoing path")
tr.Processes.Default.Command = '''
curl \
--proxy 127.0.0.1:{0} \
"http://www.example.com/magic/foobar" \
-H "Proxy-Connection: keep-alive" \
--verbose \
'''.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
server.Streams.All = "gold/substitute.gold"
|
{
"content_hash": "55987b93a561b2a8a2deeb9216b60206",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 128,
"avg_line_length": 37.049180327868854,
"alnum_prop": 0.7165929203539823,
"repo_name": "pbchou/trafficserver",
"id": "13b41028049e70f423630785d35ba36b777bea08",
"size": "4520",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/gold_tests/pluginTest/cookie_remap/substitute.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1478100"
},
{
"name": "C++",
"bytes": "16547456"
},
{
"name": "CMake",
"bytes": "13151"
},
{
"name": "Dockerfile",
"bytes": "6693"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "Lua",
"bytes": "64412"
},
{
"name": "M4",
"bytes": "216500"
},
{
"name": "Makefile",
"bytes": "250518"
},
{
"name": "Objective-C",
"bytes": "12972"
},
{
"name": "Perl",
"bytes": "128436"
},
{
"name": "Python",
"bytes": "1509938"
},
{
"name": "SWIG",
"bytes": "25777"
},
{
"name": "Shell",
"bytes": "175893"
},
{
"name": "Starlark",
"bytes": "987"
},
{
"name": "Vim script",
"bytes": "192"
}
],
"symlink_target": ""
}
|
from searchlight.elasticsearch.plugins import base
from searchlight.elasticsearch.plugins.glance \
import metadefs_notification_handler
from searchlight.elasticsearch.plugins.glance \
import serialize_glance_metadef_ns
class MetadefIndex(base.IndexBase):
def __init__(self):
super(MetadefIndex, self).__init__()
def get_index_name(self):
return 'glance'
def get_document_type(self):
return 'metadef'
def get_document_id_field(self):
return 'namespace'
def get_mapping(self):
property_mapping = {
'dynamic': True,
'type': 'nested',
'properties': {
'name': {'type': 'string', 'index': 'not_analyzed'},
'type': {'type': 'string'},
'title': {'type': 'string'},
'description': {'type': 'string'},
}
}
mapping = {
'_id': {
'path': 'namespace',
},
'properties': {
'display_name': {'type': 'string'},
'description': {'type': 'string'},
'namespace': {'type': 'string', 'index': 'not_analyzed'},
'owner': {'type': 'string', 'index': 'not_analyzed'},
'visibility': {'type': 'string', 'index': 'not_analyzed'},
'resource_types': {
'type': 'nested',
'properties': {
'name': {'type': 'string'},
# TODO(sjmc7): add these back in? They don't seem
# to be accessible via the API
# 'prefix': {'type': 'string'},
# 'properties_target': {'type': 'string'},
},
},
'objects': {
'type': 'nested',
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'name': {'type': 'string'},
'description': {'type': 'string'},
'properties': property_mapping,
}
},
'properties': property_mapping,
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'string'},
}
}
},
}
return mapping
def get_rbac_filter(self, request_context):
# TODO(krykowski): Define base get_rbac_filter in IndexBase class
# which will provide some common subset of query pieces.
# Something like:
# def get_common_context_pieces(self, request_context):
# return [{'term': {'owner': request_context.owner,
# 'type': {'value': self.get_document_type()}}]
return [
{
"and": [
{
'or': [
{
'term': {
'owner': request_context.owner
}
},
{
'term': {
'visibility': 'public'
}
}
]
},
{
'type': {
'value': self.get_document_type()
}
},
{
'index': {
'value': self.get_index_name()
}
}
]
}
]
def get_objects(self):
from searchlight.elasticsearch.plugins import openstack_clients
gc = openstack_clients.get_glanceclient()
return list(gc.metadefs_namespace.list())
def serialize(self, metadef_obj):
return serialize_glance_metadef_ns(metadef_obj)
def get_notification_handler(self):
return metadefs_notification_handler.MetadefHandler(
self.engine,
self.get_index_name(),
self.get_document_type()
)
def get_notification_supported_events(self):
return [
"metadef_namespace.create",
"metadef_namespace.update",
"metadef_namespace.delete",
"metadef_object.create",
"metadef_object.update",
"metadef_object.delete",
"metadef_property.create",
"metadef_property.update",
"metadef_property.delete",
"metadef_tag.create",
"metadef_tag.update",
"metadef_tag.delete",
"metadef_resource_type.create",
"metadef_resource_type.delete",
"metadef_namespace.delete_properties",
"metadef_namespace.delete_objects",
"metadef_namespace.delete_tags"
]
|
{
"content_hash": "9f4e1bf21d8260d2cc5a21ebe2c324b9",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 74,
"avg_line_length": 35.1875,
"alnum_prop": 0.4187882376159463,
"repo_name": "lakshmisampath/searchlight",
"id": "054da54b914b493a53e4bb2d37ddd7f154fab6e8",
"size": "5700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "searchlight/elasticsearch/plugins/glance/metadefs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "366198"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.