text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from datetime import datetime
from django.conf import settings
from django.test import TestCase
from django.http import HttpResponse, HttpRequest
from mock import patch, Mock
from django_geoip.storage import LocationCookieStorage, LocationDummyStorage, BaseLocationStorage
from test_app.models import MyCustomLocation
from tests.factory import create_custom_location
class BaseLocationStorageTest(TestCase):
def setUp(self):
self.settings_patcher = patch.object(settings, 'GEOIP_LOCATION_MODEL', 'test_app.models.MyCustomLocation')
self.settings_patcher.start()
self.storage = BaseLocationStorage(request=HttpRequest(), response=HttpResponse())
def tearDown(self):
self.settings_patcher.stop()
def test_validate_location(self):
self.assertFalse(self.storage._validate_location(None))
self.assertFalse(self.storage._validate_location(Mock()))
location = create_custom_location(MyCustomLocation)
self.assertTrue(self.storage._validate_location(location))
class LocationCookieStorageTest(TestCase):
def setUp(self):
self.request = HttpRequest()
self.request.location = Mock()
def test_should_not_update_cookie_if_no_location_in_request(self):
storage = LocationCookieStorage(request=HttpRequest(), response=HttpResponse())
self.assertFalse(storage._should_update_cookie(new_value=10))
def test_should_update_cookie_if_cookie_doesnt_exist(self):
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertTrue(storage._should_update_cookie(new_value=10))
def test_should_not_update_cookie_if_cookie_is_none(self):
self.request.COOKIES[settings.GEOIP_COOKIE_NAME] = settings.GEOIP_LOCATION_EMPTY_VALUE
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertFalse(storage._should_update_cookie(new_value=settings.GEOIP_LOCATION_EMPTY_VALUE))
def test_should_not_update_cookie_if_cookie_is_none(self):
self.request.COOKIES[settings.GEOIP_COOKIE_NAME] = None
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertFalse(storage._should_update_cookie(new_value=None))
def test_should_not_update_cookie_if_cookie_is_fresh(self):
self.request.COOKIES[settings.GEOIP_COOKIE_NAME] = 10
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertFalse(storage._should_update_cookie(new_value=10))
def test_should_update_cookie_if_cookie_is_obsolete(self):
self.request.COOKIES[settings.GEOIP_COOKIE_NAME] = 42
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertTrue(storage._should_update_cookie(new_value=10))
def test_should_update_cookie_if_cookie_is_empty_value(self):
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertTrue(storage._should_update_cookie(new_value=settings.GEOIP_LOCATION_EMPTY_VALUE))
def test_validate_location_if_cookies_is_empty_value(self):
value = settings.GEOIP_LOCATION_EMPTY_VALUE
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertTrue(storage._validate_location(location=value))
@patch.object(settings, 'GEOIP_LOCATION_MODEL', 'test_app.models.MyCustomLocation')
def test_malicious_cookie_is_no_problem(self):
self.request.COOKIES[settings.GEOIP_COOKIE_NAME] = "wtf"
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertEqual(storage.get(), None)
@patch('django_geoip.storage.datetime')
def test_do_set(self, mock):
mock.utcnow.return_value = datetime(2030, 1, 1, 0, 0, 0)
base_response = HttpResponse()
storage = LocationCookieStorage(request=self.request, response=base_response)
storage._do_set(10)
expected = ['Set-Cookie: geoip_location_id=10', 'expires=Thu, 02-Jan-2031 00:00:00 GMT']
self.assertEqual(base_response.cookies[settings.GEOIP_COOKIE_NAME].output().split('; ')[:2], expected)
@patch.object(settings, 'GEOIP_COOKIE_DOMAIN', '.testserver.local')
def test_get_cookie_domain_from_settings(self):
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertEqual(storage.get_cookie_domain(), '.testserver.local')
def test_get_cookie_domain_no_settings(self):
self.request.get_host = Mock(return_value='my.localserver.tld')
storage = LocationCookieStorage(request=self.request, response=HttpResponse())
self.assertEqual(storage.get_cookie_domain(), None)
class LocationDummyStorageTest(TestCase):
def setUp(self):
self.request = HttpRequest()
self.request.location = Mock()
def test_get(self):
storage = LocationDummyStorage(request=self.request, response=HttpResponse())
self.assertEqual(storage.get(), self.request.location)
def test_set(self):
storage = LocationDummyStorage(request=self.request, response=HttpResponse())
fake_location = Mock()
storage.set(fake_location)
|
{
"content_hash": "c84c33c3f9cafc16a8dfd6abacb1ccff",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 114,
"avg_line_length": 46.88392857142857,
"alnum_prop": 0.724052561416873,
"repo_name": "futurecolors/django-geoip",
"id": "3ef4d66a28e6e71b53af3bdd69cb3793d82d9586",
"size": "5275",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/test_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "302"
},
{
"name": "Python",
"bytes": "80674"
}
],
"symlink_target": ""
}
|
from ..Qt import QtGui, QtCore
from ..python2_3 import asUnicode
from ..SignalProxy import SignalProxy
from .. import functions as fn
from math import log
from decimal import Decimal as D ## Use decimal to avoid accumulating floating-point errors
from decimal import *
import weakref
__all__ = ['SpinBox']
class SpinBox(QtGui.QAbstractSpinBox):
"""
**Bases:** QtGui.QAbstractSpinBox
QSpinBox widget on steroids. Allows selection of numerical value, with extra features:
- SI prefix notation (eg, automatically display "300 mV" instead of "0.003 V")
- Float values with linear and decimal stepping (1-9, 10-90, 100-900, etc.)
- Option for unbounded values
- Delayed signals (allows multiple rapid changes with only one change signal)
============================= ==============================================
**Signals:**
valueChanged(value) Same as QSpinBox; emitted every time the value
has changed.
sigValueChanged(self) Emitted when value has changed, but also combines
multiple rapid changes into one signal (eg,
when rolling the mouse wheel).
sigValueChanging(self, value) Emitted immediately for all value changes.
============================= ==============================================
"""
## There's a PyQt bug that leaks a reference to the
## QLineEdit returned from QAbstractSpinBox.lineEdit()
## This makes it possible to crash the entire program
## by making accesses to the LineEdit after the spinBox has been deleted.
## I have no idea how to get around this..
valueChanged = QtCore.Signal(object) # (value) for compatibility with QSpinBox
sigValueChanged = QtCore.Signal(object) # (self)
sigValueChanging = QtCore.Signal(object, object) # (self, value) sent immediately; no delay.
def __init__(self, parent=None, value=0.0, **kwargs):
"""
============== ========================================================================
**Arguments:**
parent Sets the parent widget for this SpinBox (optional)
value (float/int) initial value
bounds (min,max) Minimum and maximum values allowed in the SpinBox.
Either may be None to leave the value unbounded.
suffix (str) suffix (units) to display after the numerical value
siPrefix (bool) If True, then an SI prefix is automatically prepended
to the units and the value is scaled accordingly. For example,
if value=0.003 and suffix='V', then the SpinBox will display
"300 mV" (but a call to SpinBox.value will still return 0.003).
step (float) The size of a single step. This is used when clicking the up/
down arrows, when rolling the mouse wheel, or when pressing
keyboard arrows while the widget has keyboard focus. Note that
the interpretation of this value is different when specifying
the 'dec' argument.
dec (bool) If True, then the step value will be adjusted to match
the current size of the variable (for example, a value of 15
might step in increments of 1 whereas a value of 1500 would
step in increments of 100). In this case, the 'step' argument
is interpreted *relative* to the current value. The most common
'step' values when dec=True are 0.1, 0.2, 0.5, and 1.0.
minStep (float) When dec=True, this specifies the minimum allowable step size.
int (bool) if True, the value is forced to integer type
decimals (int) Number of decimal values to display
============== ========================================================================
"""
QtGui.QAbstractSpinBox.__init__(self, parent)
self.lastValEmitted = None
self.lastText = ''
self.textValid = True ## If false, we draw a red border
self.setMinimumWidth(0)
self.setMaximumHeight(20)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
self.opts = {
'bounds': [None, None],
## Log scaling options #### Log mode is no longer supported.
#'step': 0.1,
#'minStep': 0.001,
#'log': True,
#'dec': False,
## decimal scaling option - example
#'step': 0.1,
#'minStep': .001,
#'log': False,
#'dec': True,
## normal arithmetic step
'step': D('0.01'), ## if 'dec' is false, the spinBox steps by 'step' every time
## if 'dec' is True, the step size is relative to the value
## 'step' needs to be an integral divisor of ten, ie 'step'*n=10 for some integer value of n (but only if dec is True)
'log': False,
'dec': False, ## if true, does decimal stepping. ie from 1-10 it steps by 'step', from 10 to 100 it steps by 10*'step', etc.
## if true, minStep must be set in order to cross zero.
'int': False, ## Set True to force value to be integer
'suffix': '',
'siPrefix': False, ## Set to True to display numbers with SI prefix (ie, 100pA instead of 1e-10A)
'delay': 0.3, ## delay sending wheel update signals for 300ms
'delayUntilEditFinished': True, ## do not send signals until text editing has finished
## for compatibility with QDoubleSpinBox and QSpinBox
'decimals': 2,
}
self.decOpts = ['step', 'minStep']
self.val = D(asUnicode(value)) ## Value is precise decimal. Ordinary math not allowed.
self.updateText()
self.skipValidate = False
self.setCorrectionMode(self.CorrectToPreviousValue)
self.setKeyboardTracking(False)
self.setOpts(**kwargs)
self.editingFinished.connect(self.editingFinishedEvent)
self.proxy = SignalProxy(self.sigValueChanging, slot=self.delayedChange, delay=self.opts['delay'])
def event(self, ev):
ret = QtGui.QAbstractSpinBox.event(self, ev)
if ev.type() == QtCore.QEvent.KeyPress and ev.key() == QtCore.Qt.Key_Return:
ret = True ## For some reason, spinbox pretends to ignore return key press
return ret
##lots of config options, just gonna stuff 'em all in here rather than do the get/set crap.
def setOpts(self, **opts):
"""
Changes the behavior of the SpinBox. Accepts most of the arguments
allowed in :func:`__init__ <pyqtgraph.SpinBox.__init__>`.
"""
#print opts
for k in opts:
if k == 'bounds':
#print opts[k]
self.setMinimum(opts[k][0], update=False)
self.setMaximum(opts[k][1], update=False)
#for i in [0,1]:
#if opts[k][i] is None:
#self.opts[k][i] = None
#else:
#self.opts[k][i] = D(unicode(opts[k][i]))
elif k in ['step', 'minStep']:
self.opts[k] = D(asUnicode(opts[k]))
elif k == 'value':
pass ## don't set value until bounds have been set
else:
self.opts[k] = opts[k]
if 'value' in opts:
self.setValue(opts['value'])
## If bounds have changed, update value to match
if 'bounds' in opts and 'value' not in opts:
self.setValue()
## sanity checks:
if self.opts['int']:
if 'step' in opts:
step = opts['step']
## not necessary..
#if int(step) != step:
#raise Exception('Integer SpinBox must have integer step size.')
else:
self.opts['step'] = int(self.opts['step'])
if 'minStep' in opts:
step = opts['minStep']
if int(step) != step:
raise Exception('Integer SpinBox must have integer minStep size.')
else:
ms = int(self.opts.get('minStep', 1))
if ms < 1:
ms = 1
self.opts['minStep'] = ms
if 'delay' in opts:
self.proxy.setDelay(opts['delay'])
self.updateText()
def setMaximum(self, m, update=True):
"""Set the maximum allowed value (or None for no limit)"""
if m is not None:
m = D(asUnicode(m))
self.opts['bounds'][1] = m
if update:
self.setValue()
def setMinimum(self, m, update=True):
"""Set the minimum allowed value (or None for no limit)"""
if m is not None:
m = D(asUnicode(m))
self.opts['bounds'][0] = m
if update:
self.setValue()
def setPrefix(self, p):
self.setOpts(prefix=p)
def setRange(self, r0, r1):
self.setOpts(bounds = [r0,r1])
def setProperty(self, prop, val):
## for QSpinBox compatibility
if prop == 'value':
#if type(val) is QtCore.QVariant:
#val = val.toDouble()[0]
self.setValue(val)
else:
print("Warning: SpinBox.setProperty('%s', ..) not supported." % prop)
def setSuffix(self, suf):
self.setOpts(suffix=suf)
def setSingleStep(self, step):
self.setOpts(step=step)
def setDecimals(self, decimals):
self.setOpts(decimals=decimals)
def value(self):
"""
Return the value of this SpinBox.
"""
if self.opts['int']:
return int(self.val)
else:
return float(self.val)
def setValue(self, value=None, update=True, delaySignal=False):
"""
Set the value of this spin.
If the value is out of bounds, it will be clipped to the nearest boundary.
If the spin is integer type, the value will be coerced to int.
Returns the actual value set.
If value is None, then the current value is used (this is for resetting
the value after bounds, etc. have changed)
"""
if value is None:
value = self.value()
bounds = self.opts['bounds']
if bounds[0] is not None and value < bounds[0]:
value = bounds[0]
if bounds[1] is not None and value > bounds[1]:
value = bounds[1]
if self.opts['int']:
value = int(value)
value = D(asUnicode(value))
if value == self.val:
return
prev = self.val
self.val = value
if update:
self.updateText(prev=prev)
self.sigValueChanging.emit(self, float(self.val)) ## change will be emitted in 300ms if there are no subsequent changes.
if not delaySignal:
self.emitChanged()
return value
def emitChanged(self):
self.lastValEmitted = self.val
self.valueChanged.emit(float(self.val))
self.sigValueChanged.emit(self)
def delayedChange(self):
try:
if self.val != self.lastValEmitted:
self.emitChanged()
except RuntimeError:
pass ## This can happen if we try to handle a delayed signal after someone else has already deleted the underlying C++ object.
def widgetGroupInterface(self):
return (self.valueChanged, SpinBox.value, SpinBox.setValue)
def sizeHint(self):
return QtCore.QSize(120, 0)
def stepEnabled(self):
return self.StepUpEnabled | self.StepDownEnabled
#def fixup(self, *args):
#print "fixup:", args
def stepBy(self, n):
n = D(int(n)) ## n must be integral number of steps.
s = [D(-1), D(1)][n >= 0] ## determine sign of step
val = self.val
for i in range(int(abs(n))):
if self.opts['log']:
raise Exception("Log mode no longer supported.")
# step = abs(val) * self.opts['step']
# if 'minStep' in self.opts:
# step = max(step, self.opts['minStep'])
# val += step * s
if self.opts['dec']:
if val == 0:
step = self.opts['minStep']
exp = None
else:
vs = [D(-1), D(1)][val >= 0]
#exp = D(int(abs(val*(D('1.01')**(s*vs))).log10()))
fudge = D('1.01')**(s*vs) ## fudge factor. at some places, the step size depends on the step sign.
exp = abs(val * fudge).log10().quantize(1, ROUND_FLOOR)
step = self.opts['step'] * D(10)**exp
if 'minStep' in self.opts:
step = max(step, self.opts['minStep'])
val += s * step
#print "Exp:", exp, "step", step, "val", val
else:
val += s*self.opts['step']
if 'minStep' in self.opts and abs(val) < self.opts['minStep']:
val = D(0)
self.setValue(val, delaySignal=True) ## note all steps (arrow buttons, wheel, up/down keys..) emit delayed signals only.
def valueInRange(self, value):
bounds = self.opts['bounds']
if bounds[0] is not None and value < bounds[0]:
return False
if bounds[1] is not None and value > bounds[1]:
return False
if self.opts.get('int', False):
if int(value) != value:
return False
return True
def updateText(self, prev=None):
#print "Update text."
self.skipValidate = True
if self.opts['siPrefix']:
if self.val == 0 and prev is not None:
(s, p) = fn.siScale(prev)
txt = "0.0 %s%s" % (p, self.opts['suffix'])
else:
txt = fn.siFormat(float(self.val), suffix=self.opts['suffix'])
else:
txt = '%g%s' % (self.val , self.opts['suffix'])
self.lineEdit().setText(txt)
self.lastText = txt
self.skipValidate = False
def validate(self, strn, pos):
if self.skipValidate:
#print "skip validate"
#self.textValid = False
ret = QtGui.QValidator.Acceptable
else:
try:
## first make sure we didn't mess with the suffix
suff = self.opts.get('suffix', '')
if len(suff) > 0 and asUnicode(strn)[-len(suff):] != suff:
#print '"%s" != "%s"' % (unicode(strn)[-len(suff):], suff)
ret = QtGui.QValidator.Invalid
## next see if we actually have an interpretable value
else:
val = self.interpret()
if val is False:
#print "can't interpret"
#self.setStyleSheet('SpinBox {border: 2px solid #C55;}')
#self.textValid = False
ret = QtGui.QValidator.Intermediate
else:
if self.valueInRange(val):
if not self.opts['delayUntilEditFinished']:
self.setValue(val, update=False)
#print " OK:", self.val
#self.setStyleSheet('')
#self.textValid = True
ret = QtGui.QValidator.Acceptable
else:
ret = QtGui.QValidator.Intermediate
except:
#print " BAD"
#import sys
#sys.excepthook(*sys.exc_info())
#self.textValid = False
#self.setStyleSheet('SpinBox {border: 2px solid #C55;}')
ret = QtGui.QValidator.Intermediate
## draw / clear border
if ret == QtGui.QValidator.Intermediate:
self.textValid = False
elif ret == QtGui.QValidator.Acceptable:
self.textValid = True
## note: if text is invalid, we don't change the textValid flag
## since the text will be forced to its previous state anyway
self.update()
## support 2 different pyqt APIs. Bleh.
if hasattr(QtCore, 'QString'):
return (ret, pos)
else:
return (ret, strn, pos)
def paintEvent(self, ev):
QtGui.QAbstractSpinBox.paintEvent(self, ev)
## draw red border if text is invalid
if not self.textValid:
p = QtGui.QPainter(self)
p.setRenderHint(p.Antialiasing)
p.setPen(fn.mkPen((200,50,50), width=2))
p.drawRoundedRect(self.rect().adjusted(2, 2, -2, -2), 4, 4)
p.end()
def interpret(self):
"""Return value of text. Return False if text is invalid, raise exception if text is intermediate"""
strn = self.lineEdit().text()
suf = self.opts['suffix']
if len(suf) > 0:
if strn[-len(suf):] != suf:
return False
#raise Exception("Units are invalid.")
strn = strn[:-len(suf)]
try:
val = fn.siEval(strn)
except:
#sys.excepthook(*sys.exc_info())
#print "invalid"
return False
#print val
return val
#def interpretText(self, strn=None):
#print "Interpret:", strn
#if strn is None:
#strn = self.lineEdit().text()
#self.setValue(siEval(strn), update=False)
##QtGui.QAbstractSpinBox.interpretText(self)
def editingFinishedEvent(self):
"""Edit has finished; set value."""
#print "Edit finished."
if asUnicode(self.lineEdit().text()) == self.lastText:
#print "no text change."
return
try:
val = self.interpret()
except:
return
if val is False:
#print "value invalid:", str(self.lineEdit().text())
return
if val == self.val:
#print "no value change:", val, self.val
return
self.setValue(val, delaySignal=False) ## allow text update so that values are reformatted pretty-like
#def textChanged(self):
#print "Text changed."
### Drop-in replacement for SpinBox; just for crash-testing
#class SpinBox(QtGui.QDoubleSpinBox):
#valueChanged = QtCore.Signal(object) # (value) for compatibility with QSpinBox
#sigValueChanged = QtCore.Signal(object) # (self)
#sigValueChanging = QtCore.Signal(object) # (value)
#def __init__(self, parent=None, *args, **kargs):
#QtGui.QSpinBox.__init__(self, parent)
#def __getattr__(self, attr):
#return lambda *args, **kargs: None
#def widgetGroupInterface(self):
#return (self.valueChanged, SpinBox.value, SpinBox.setValue)
|
{
"content_hash": "7d0ae1991800719521735d6c89ef966b",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 150,
"avg_line_length": 39.70318725099602,
"alnum_prop": 0.5147759771210677,
"repo_name": "isaacyeaton/pyadisi",
"id": "422522de7142b7e2338bfc6c0d7d4b7418f72c71",
"size": "19955",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "pyadisi/pyqtgraph/widgets/SpinBox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "1965802"
}
],
"symlink_target": ""
}
|
from nose.tools import eq_
import bot_mock
from pyfibot.modules import module_urltitle
bot = bot_mock.BotMock()
def test_one():
msg = "https://en.wikipedia.org/wiki/Hatfield–McCoy_feud"
module_urltitle.init(bot)
eq_(("#channel", u"Title: The Hatfield–McCoy feud involved two families of the West Virginia–Kentucky area along the Tug Fork of the Big Sandy River."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_two():
msg = "http://fi.wikipedia.org/wiki/DTMF"
module_urltitle.init(bot)
eq_(("#channel", u"Title: DTMF on puhelinlaitteissa käytetty numeroiden äänitaajuusvalintatapa."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_three():
msg = "http://en.wikipedia.org/wiki/Gender_performativity"
module_urltitle.init(bot)
eq_(("#channel", u"Title: Gender performativity is a term created by post-structuralist feminist philosopher Judith Butler in her 1990 book Gender Trouble, which has subsequently been used in a variety of academic fields."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_four():
msg = "http://en.wikipedia.org/wiki/Dynamo_(magician)"
module_urltitle.init(bot)
eq_(("#channel", u"Title: Steven Frayne, commonly known by his stage name \"Dynamo\", is an English magician, best known for his show Dynamo: Magician Impossible."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_five():
msg = "http://fi.wikipedia.org/wiki/David_Eddings"
module_urltitle.init(bot)
eq_(("#channel", u"Title: David Carroll Eddings oli yhdysvaltalainen kirjailija, joka kirjoitti useita suosittuja fantasiakirjoja."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_six():
msg = "http://fi.wikipedia.org/wiki/Birger_Ek"
module_urltitle.init(bot)
eq_(("#channel", u"Title: Rolf Birger Ek oli suomalainen lentäjä ja Mannerheim-ristin ritari."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_seven():
msg = "http://en.wikipedia.org/wiki/Ramon_Llull"
module_urltitle.init(bot)
eq_(("#channel", u"Title: Ramon Llull, T.O.S.F. was a Majorcan writer and philosopher, logician and a Franciscan tertiary."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_eight():
msg = "http://en.wikipedia.org/wiki/Lazarus_of_Bethany#In_culture"
module_urltitle.init(bot)
eq_(("#channel", u"Title: Lazarus of Bethany, also known as Saint Lazarus or Lazarus of the Four Days, is the subject of a prominent miracle attributed to Jesus in the Gospel of John, in which Jesus restores him to life four d..."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_nine():
msg = "http://fi.wikipedia.org/wiki/Kimi_Räikkönen"
module_urltitle.init(bot)
eq_(("#channel", u"Title: Kimi-Matias Räikkönen on suomalainen autourheilija ja Formula 1:n maailmanmestari."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_ten():
msg = 'http://en.wikipedia.org/wiki/802.11ac'
module_urltitle.init(bot)
eq_(("#channel", u"Title: IEEE 802.11ac is a wireless networking standard in the 802.11 family, developed in the IEEE Standards Association process, providing high-throughput wireless local area networks on the 5\xa0GHz band."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
def test_eleven():
msg = 'http://en.wikipedia.org/wiki/Edison_Arantes_do_Nascimento'
module_urltitle.init(bot)
eq_(("#channel", u"Title: Edson Arantes do Nascimento, who is better known as Pelé, is a retired Brazilian footballer who is widely regarded to be the best football player of all time."), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
|
{
"content_hash": "3024659f683927d400a29803be0623bc",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 297,
"avg_line_length": 52.31944444444444,
"alnum_prop": 0.7154234138571808,
"repo_name": "huqa/pyfibot",
"id": "59056766e39d70b68d314be217ed23af78e6745f",
"size": "3807",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_wikipedia.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "319679"
},
{
"name": "Shell",
"bytes": "6833"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("api", "0015_auto_20151022_1008")]
operations = [
migrations.AlterField(
model_name="check",
name="status",
field=models.CharField(
default="new",
max_length=6,
choices=[
("up", "Up"),
("down", "Down"),
("new", "New"),
("paused", "Paused"),
],
),
)
]
|
{
"content_hash": "e06a211eda98f354edee5aaeb5ca9e3c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 55,
"avg_line_length": 24.52,
"alnum_prop": 0.43066884176182707,
"repo_name": "healthchecks/healthchecks",
"id": "d0fb4fb80884edaeff09e0b13db4339a3ee1102e",
"size": "637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hc/api/migrations/0016_auto_20151030_1107.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "65959"
},
{
"name": "Dockerfile",
"bytes": "1088"
},
{
"name": "HTML",
"bytes": "716643"
},
{
"name": "JavaScript",
"bytes": "50869"
},
{
"name": "Less",
"bytes": "211300"
},
{
"name": "Python",
"bytes": "1043149"
},
{
"name": "Shell",
"bytes": "1655"
}
],
"symlink_target": ""
}
|
from django.contrib.comments.managers import CommentManager as DjangoCM
from mezzanine.conf import settings
from mezzanine.core.managers import CurrentSiteManager
class CommentManager(CurrentSiteManager, DjangoCM):
"""
Provides filter for restricting comments that are not approved
if ``COMMENTS_UNAPPROVED_VISIBLE`` is set to ``False``.
"""
def visible(self):
"""
Return the comments that are visible based on the
``COMMENTS_XXX_VISIBLE`` settings. When these settings
are set to ``True``, the relevant comments are returned
that shouldn't be shown, and are given placeholders in
the template ``generic/includes/comment.html``.
"""
settings.use_editable()
visible = self.all()
if not settings.COMMENTS_UNAPPROVED_VISIBLE:
visible = visible.filter(is_public=True)
if not settings.COMMENTS_REMOVED_VISIBLE:
visible = visible.filter(is_removed=False)
return visible
def count_queryset(self):
"""
Called from ``CommentsField.related_items_changed`` to store
the comment count against an item each time a comment is saved.
"""
return self.visible().count()
class KeywordManager(CurrentSiteManager):
def get_by_natural_key(self, value):
"""
Provides natural key method.
"""
return self.get(value=value)
def get_or_create_iexact(self, **kwargs):
"""
Case insensitive title version of ``get_or_create``. Also
allows for multiple existing results.
"""
lookup = dict(**kwargs)
try:
lookup["title__iexact"] = lookup.pop("title")
except KeyError:
pass
try:
return self.filter(**lookup)[0], False
except IndexError:
return self.create(**kwargs), True
|
{
"content_hash": "1b9696e285dbd7bb939eaf4fb2acdb6c",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 71,
"avg_line_length": 32.62068965517241,
"alnum_prop": 0.6273784355179705,
"repo_name": "stbarnabas/mezzanine",
"id": "f6a8233bb002437c248ecf7a239e184f99e30d28",
"size": "1893",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mezzanine/generic/managers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "224938"
},
{
"name": "JavaScript",
"bytes": "277433"
},
{
"name": "Python",
"bytes": "1012084"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.jvm_dependency_usage import DependencyUsageGraph, JvmDependencyUsage
from pants.util.dirutil import safe_mkdir, touch
from pants_test.tasks.task_test_base import TaskTestBase, ensure_cached
class TestJvmDependencyUsage(TaskTestBase):
@classmethod
def task_type(cls):
return JvmDependencyUsage
def _setup(self, target_classfiles):
"""Takes a dict mapping targets to lists of classfiles."""
context = self.context(target_roots=target_classfiles.keys())
# Create classfiles in a target-specific directory, and add it to the classpath for the target.
classpath_products = context.products.get_data('runtime_classpath', ClasspathProducts.init_func(self.pants_workdir))
for target, classfiles in target_classfiles.items():
target_dir = os.path.join(self.test_workdir, target.id)
safe_mkdir(target_dir)
for classfile in classfiles:
touch(os.path.join(target_dir, classfile))
classpath_products.add_for_target(target, [('default', target_dir)])
product_deps_by_src = context.products.get_data('product_deps_by_src', dict)
return self.create_task(context), product_deps_by_src
def make_java_target(self, *args, **kwargs):
assert 'target_type' not in kwargs
return self.make_target(target_type=JavaLibrary, *args, **kwargs)
def _cover_output(self, graph):
# coverage of the output code
self.assertNotEqual(graph.to_json(), "")
self.assertNotEqual(graph.to_summary(), "")
def test_simple_dep_usage_graph(self):
t1 = self.make_java_target(spec=':t1', sources=['a.java', 'b.java'])
t2 = self.make_java_target(spec=':t2', sources=['c.java'], dependencies=[t1])
t3 = self.make_java_target(spec=':t3', sources=['d.java', 'e.java'], dependencies=[t1])
self.set_options(size_estimator='filecount')
dep_usage, product_deps_by_src = self._setup({
t1: ['a.class', 'b.class'],
t2: ['c.class'],
t3: ['d.class', 'e.class'],
})
product_deps_by_src[t1] = {}
product_deps_by_src[t2] = {'c.java': ['a.class']}
product_deps_by_src[t3] = {'d.java': ['a.class', 'b.class'],
'e.java': ['a.class', 'b.class']}
graph = self.create_graph(dep_usage, [t1, t2, t3])
self.assertEqual(graph._nodes[t1].products_total, 2)
self.assertEqual(graph._nodes[t2].products_total, 1)
self.assertEqual(graph._nodes[t3].products_total, 2)
self.assertEqual(graph._nodes[t1].dep_edges, {})
self.assertEqual(len(graph._nodes[t2].dep_edges[t1].products_used), 1)
self.assertEqual(len(graph._nodes[t3].dep_edges[t1].products_used), 2)
self.assertEqual(graph._trans_cost(t1), 2)
self.assertEqual(graph._trans_cost(t2), 3)
self.assertEqual(graph._trans_cost(t3), 4)
self._cover_output(graph)
def test_dep_usage_graph_with_synthetic_targets(self):
t1 = self.make_java_target(spec=':t1', sources=['t1.thrift'])
t1_x = self.make_java_target(spec=':t1.x', derived_from=t1)
t1_y = self.make_java_target(spec=':t1.y', derived_from=t1)
t1_z = self.make_java_target(spec=':t1.z', derived_from=t1)
t2 = self.make_java_target(spec=':t2',
sources=['a.java', 'b.java'],
dependencies=[t1, t1_x, t1_y, t1_z])
self.set_options(size_estimator='nosize')
dep_usage, product_deps_by_src = self._setup({
t1_x: ['x1.class'],
t1_y: ['y1.class'],
t1_z: ['z1.class', 'z2.class', 'z3.class'],
t2: ['a.class', 'b.class'],
})
product_deps_by_src[t1] = {}
product_deps_by_src[t1_x] = {}
product_deps_by_src[t1_y] = {}
product_deps_by_src[t1_z] = {}
product_deps_by_src[t2] = {'a.java': ['x1.class'],
'b.java': ['z1.class', 'z2.class']}
graph = self.create_graph(dep_usage, [t1, t1_x, t1_y, t1_z, t2])
self.assertEqual(graph._nodes[t1].products_total, 5)
self.assertEqual(len(graph._nodes[t2].dep_edges[t1].products_used), 3)
self._cover_output(graph)
def create_graph(self, task, targets):
classes_by_source = task.context.products.get_data('classes_by_source')
runtime_classpath = task.context.products.get_data('runtime_classpath')
product_deps_by_src = task.context.products.get_data('product_deps_by_src')
def node_creator(target):
return task.create_dep_usage_node(target, '',
classes_by_source, runtime_classpath, product_deps_by_src)
return DependencyUsageGraph(task.create_dep_usage_nodes(targets, node_creator),
task.size_estimators[task.get_options().size_estimator])
@ensure_cached(JvmDependencyUsage, expected_num_artifacts=2)
def test_cache_write(self):
t1 = self.make_java_target(spec=':t1', sources=['a.java'])
self.create_file('a.java')
t2 = self.make_java_target(spec=':t2', sources=['b.java'], dependencies=[t1])
self.create_file('b.java')
self.set_options(size_estimator='filecount')
dep_usage, product_deps_by_src = self._setup({
t1: ['a.class'],
t2: ['b.class'],
})
product_deps_by_src[t1] = {}
product_deps_by_src[t2] = {'b.java': ['a.class']}
dep_usage.create_dep_usage_graph([t1, t2])
|
{
"content_hash": "0d961ea9a25d5caedfbca05af11de23b",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 120,
"avg_line_length": 43.13953488372093,
"alnum_prop": 0.6422282120395328,
"repo_name": "dbentley/pants",
"id": "e42b060bda7b5da0bd23892102931d2c9a888d39",
"size": "5712",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/test_jvm_dependency_usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1569"
},
{
"name": "HTML",
"bytes": "64699"
},
{
"name": "Java",
"bytes": "290988"
},
{
"name": "JavaScript",
"bytes": "31040"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4277407"
},
{
"name": "Scala",
"bytes": "84066"
},
{
"name": "Shell",
"bytes": "50882"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
}
|
from typing import TYPE_CHECKING, Optional
from pluggy import HookspecMarker
if TYPE_CHECKING:
from sqlalchemy.orm.session import Session
from airflow.models.taskinstance import TaskInstance
from airflow.utils.state import TaskInstanceState
hookspec = HookspecMarker("airflow")
@hookspec
def on_task_instance_running(
previous_state: "TaskInstanceState", task_instance: "TaskInstance", session: Optional["Session"]
):
"""Called when task state changes to RUNNING. Previous_state can be State.NONE."""
@hookspec
def on_task_instance_success(
previous_state: "TaskInstanceState", task_instance: "TaskInstance", session: Optional["Session"]
):
"""Called when task state changes to SUCCESS. Previous_state can be State.NONE."""
@hookspec
def on_task_instance_failed(
previous_state: "TaskInstanceState", task_instance: "TaskInstance", session: Optional["Session"]
):
"""Called when task state changes to FAIL. Previous_state can be State.NONE."""
|
{
"content_hash": "843726342a8e348ebefd55a039435e10",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 100,
"avg_line_length": 30.90625,
"alnum_prop": 0.7512639029322548,
"repo_name": "danielvdende/incubator-airflow",
"id": "fbaf63e89ac6e8cc96353475012441a75e63378e",
"size": "1776",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/listeners/spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
from PLC.Nodes import Node
from PLC.Interfaces import Interface
from PLC.Slices import Slice
from PLC.Accessors.Factory import define_accessors, all_roles, tech_roles
import sys
current_module = sys.modules[__name__]
#### IPv6 addr/prefix to distribute to slivers on the node!
define_accessors(current_module, Interface, "SliversIPv6Prefix", "sliversipv6prefix",
"interface/ipv6", "The IPv6 Range/Prefix for the Slivers",
set_roles=tech_roles)
#### IPv6 address assigned to the sliver of a particular node!
define_accessors(current_module, Slice, "IPv6Address", "ipv6_address",
"slice/usertools","IPv6 address assigned to the sliver in a particular node",
set_roles=all_roles, expose_in_api=True)
|
{
"content_hash": "b9a2b47083ed4663ec2dee3b8ef03432",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 94,
"avg_line_length": 40.526315789473685,
"alnum_prop": 0.7077922077922078,
"repo_name": "dreibh/planetlab-lxc-plcapi",
"id": "fb736db33d115fd5c930c60bfecd7df52045d08c",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PLC/Accessors/Accessors_ipv6.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "724"
},
{
"name": "Makefile",
"bytes": "2995"
},
{
"name": "PHP",
"bytes": "574445"
},
{
"name": "PLpgSQL",
"bytes": "2764"
},
{
"name": "Perl",
"bytes": "1350"
},
{
"name": "Python",
"bytes": "871238"
},
{
"name": "Shell",
"bytes": "31392"
}
],
"symlink_target": ""
}
|
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import logging
from functools import partial
from scipy import stats
from shot_detector.filters.common import MathFilter
from .base_swfilter import BaseSWFilter
class BaseStatSWFilter(BaseSWFilter, MathFilter):
"""
...
"""
__logger = logging.getLogger(__name__)
# noinspection PyUnusedLocal
@staticmethod
def get_max(features, max_key=lambda x: x, **_kwargs):
"""
Returns native max of given sequence of features.
:param collections.Iterable[Feature] features:
sequence of features
:param lambda max_key:
:param _kwargs:
:return:
"""
m = max(features, key=max_key)
return m
# noinspection PyUnusedLocal
@staticmethod
def get_min(features, min_key=lambda x: x, **_kwargs):
"""
:param features:
:param min_key:
:param _kwargs:
:return:
"""
m = min(features, key=min_key)
return m
def get_mean(self, features, **kwargs):
"""
:param features:
:param kwargs:
:return:
"""
mean_function = self.choose_mean(**kwargs)
mean_value = mean_function(features, **kwargs)
return mean_value
# noinspection PyUnusedLocal
def choose_mean(self, mean_name=None, **_kwargs):
"""
:param mean_name:
:param _kwargs:
:return:
"""
mean_function = self.get_average
# weighted moving average
if 'WMA' == mean_name:
mean_function = self.get_wma
# exponentially weighted moving average
elif 'EWMA' == mean_name:
mean_function = self.get_ewma
# gaussian weighted moving average
elif 'GWMA' == mean_name:
mean_function = self.get_gwma
elif 'median' == mean_name:
mean_function = self.get_median
return mean_function
@staticmethod
def get_sorted(features, sort_key=None,
norm_function=None, **kwargs):
"""
:param features:
:param sort_key:
:param norm_function:
:param kwargs:
:return:
"""
if norm_function:
sort_fun = partial(norm_function, **kwargs)
sorts = sorted(features, key=lambda x: sort_fun(x)[0])
else:
sorts = sorted(features, key=sort_key)
return sorts
def get_median(self, features, **kwargs):
"""
:param features:
:param kwargs:
:return:
"""
sorts = self.get_sorted(features, **kwargs)
length = int(len(sorts))
if not length % 2:
return (sorts[length // 2] + sorts[length // 2 - 1]) / 2.0
return sorts[length // 2]
# noinspection PyUnusedLocal
@staticmethod
def get_average(features, **_kwargs):
"""
:param features:
:param _kwargs:
:return:
"""
features_len = len(features)
average = 1.0 * sum(features) / features_len
return average
# noinspection PyUnusedLocal
def get_wma(self, features, **_kwargs):
"""
weighted moving average
:param features:
"""
n = len(features)
if n > 1:
weighted_sum = 0
for i, feature in enumerate(features):
weighted_sum += feature * (n - i - 1)
weighted_average = 2 * weighted_sum / (n * (n - 1))
return weighted_average
return self.get_mean(features)
def get_ewma(self, features, alpha=None, **kwargs):
"""
exponentially weighted moving average
:param features:
:param alpha:
"""
if not features:
return 0
features = list(features)
size = len(features)
if alpha is None:
alpha = 2 / (size + 1)
rest = self.get_ewma_rest(features, alpha, size, **kwargs)
return rest
def get_ewma_rest(self, features, alpha=None, size=0, **kwargs):
"""
exponentially weighted moving average
:param features:
:param int alpha:
:param int size:
:param kwargs
"""
head = features[-1]
rest = features[:-1]
if size <= 1:
return head
rest = self.get_ewma_rest(rest, alpha, size - 1, **kwargs)
ewa = alpha * head + (1 - alpha) * rest
return ewa
def get_gwma(self, features, **kwargs):
"""
gaussian weighted moving average
:param features:
"""
gaussian_convolution = self.gaussian_convolve(features,
**kwargs)
return gaussian_convolution
def get_deviation(self, features, std_coef=3, **kwargs):
"""
:param features:
:param std_coef:
:param kwargs:
:return:
"""
mean_value = self.get_mean(
features=features,
**kwargs
)
std_value = self.get_std(
features=features,
mean_value=mean_value,
**kwargs
)
deviation = mean_value + std_value * std_coef
return deviation
def get_mad(self, features, **kwargs):
"""
Mean absolute deviation
:param features:
:param kwargs:
:return:
"""
features_len = 1.0 * len(features)
mean_value = self.get_mean(
features=features,
**kwargs
)
mad_sum = 0.0
for feature in features:
mad_sum += abs(feature - mean_value)
deviation = 1.0 * (mad_sum / features_len)
return deviation
def get_std_error(self, features, mean_value=None, **kwargs):
"""
Computes SE_x = std / sqrt(n)
See https://en.wikipedia.org/wiki/Standard_error
:param features:
:param mean_value:
:param kwargs:
:return:
"""
features_len = 1.0 * len(features)
standard_deviation = self.get_std(
features=features,
mean_value=mean_value,
**kwargs
)
standard_error = standard_deviation / self.sqrt(features_len)
return standard_error
def get_std(self, features, mean_value=None, **kwargs):
"""
Computes corrected sample standard deviation
:param features:
:param mean_value:
:param kwargs:
:return:
"""
corrected_variance = self.get_corrected_variance(
features=features,
mean_value=mean_value,
**kwargs
)
standard_deviation_value = self.sqrt(corrected_variance)
return standard_deviation_value
def get_corrected_variance(self, features, mean_value=None,
**kwargs):
"""
Compute corrected sample variance.
:param features: list_like
list of samples.
:param mean_value:
precomputed mean value, if None mean will be computed/
:param kwargs:
options for function `get_mean`
:return: s² = (n /(n - 1)) * sₙ² = (n /(n - 1)) E[(x - E[x])²]
uncorrected sample variance
"""
features_len = 1.0 * len(features)
uncorrected_variance = self.get_uncorrected_variance(
features=features,
mean_value=mean_value,
**kwargs
)
if 1 == features_len:
features_len = 2
corrected_variance = \
features_len * uncorrected_variance / (features_len - 1)
return corrected_variance
def get_uncorrected_variance(self, features, mean_value=None,
**kwargs):
"""
Compute uncorrected sample variance.
:param features: list_like
list of samples.
:param mean_value:
precomputed mean value, if None mean will be computed/
:param kwargs:
options for function `get_mean`
:return: sₙ² = E[(x - E[x])²]
uncorrected sample variance
"""
if mean_value is None:
mean_value = self.get_mean(features, **kwargs)
sum_list = []
for feature in features:
diff = feature - mean_value
sum_list += [diff * diff]
uncorrected_variance = self.get_mean(sum_list, **kwargs)
return uncorrected_variance
@staticmethod
def describe(features, **_):
"""
:param features:
:param _:
:return:
"""
return stats.describe(features)
|
{
"content_hash": "212b70525500f7c1161556ff5ab14f40",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 72,
"avg_line_length": 28.321766561514195,
"alnum_prop": 0.5228335932278904,
"repo_name": "w495/python-video-shot-detector",
"id": "a4458c8fc956f598a5faad7011199d01b8cff17a",
"size": "9010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shot_detector/filters/sliding_window/base_stat_swfilter.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Makefile",
"bytes": "1751"
},
{
"name": "Python",
"bytes": "599048"
},
{
"name": "Shell",
"bytes": "89"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import pytest
import pytz
from marshmallow import ValidationError
from indico.util.marshmallow import NaiveDateTime
def test_NaiveDateTime_serialize():
now = datetime.now()
utc_now = pytz.utc.localize(datetime.utcnow())
obj = type('Test', (), {
'naive': now,
'aware': utc_now,
})
field = NaiveDateTime()
assert field.serialize('naive', obj) == now.isoformat()
with pytest.raises(AssertionError):
field.serialize('aware', obj)
def test_NaiveDateTime_deserialize():
now = datetime.now()
utc_now = pytz.utc.localize(datetime.utcnow())
field = NaiveDateTime()
assert field.deserialize(now.isoformat()) == now
with pytest.raises(ValidationError):
field.deserialize(utc_now.isoformat())
|
{
"content_hash": "897414e74c35d2385102aa326693877b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 59,
"avg_line_length": 27.482758620689655,
"alnum_prop": 0.6813048933500627,
"repo_name": "ThiefMaster/indico",
"id": "4598b58007c46ebe081b0b88c7fe3e9f6c8e8ca7",
"size": "1011",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/util/marshmallow_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
# noinspection PyPackageRequirements
from django.views.decorators.csrf import csrf_exempt
from federation.entities.matrix.django.views import MatrixASTransactionsView
urlpatterns = [
url(
regex=r"transactions/(?P<txn_id>[\w-]+)$",
view=csrf_exempt(MatrixASTransactionsView.as_view()),
name="matrix-as-transactions",
),
]
|
{
"content_hash": "1d6812fb7a0fe78db91cfc3d3d9aa0c2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 29.846153846153847,
"alnum_prop": 0.7242268041237113,
"repo_name": "jaywink/social-federation",
"id": "15a6e7448a86d76f7a0e42a0010d621d355017fb",
"size": "425",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "federation/entities/matrix/django/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2934"
},
{
"name": "Python",
"bytes": "124166"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='collectd-riakcs',
version='0.2.0',
description='A plugin for collectd to gather metrics for a RiakCS instance.',
long_description=long_description,
url='https://github.com/abulimov/collectd-riakcs',
author='Alexander Bulimov',
author_email='lazywolf0@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Environment :: No Input/Output (Daemon)',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
# What does your project relate to?
keywords='collectd riakcs',
py_modules=['collectd_riakcs'],
install_requires=['requests', 'requests-aws'],
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['pylint'],
},
)
|
{
"content_hash": "d894d4218b59d08a834195c246483625",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 81,
"avg_line_length": 31.338461538461537,
"alnum_prop": 0.6161021109474718,
"repo_name": "abulimov/collectd-riakcs",
"id": "0fd25d23e7b7b2d55b0fac39817b8e72b66fe098",
"size": "2059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6678"
}
],
"symlink_target": ""
}
|
'''
New Integration Test for Request access multiple KVM VM console.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstacklib.utils.shell as shell
import zstackwoodpecker.operations.account_operations as acc_ops
test_stub = test_lib.lib_get_specific_stub()
vms = []
def test():
global vms
for i in range(6):
vms.append(test_stub.create_vm())
session_uuid = acc_ops.login_as_admin()
for vm in vms:
if vm:
vm.check()
console = test_lib.lib_get_vm_console_address(vm.get_vm().uuid, session_uuid)
if test_lib.lib_network_check(console.hostIp, console.port):
test_util.test_logger('[vm:] %s console on %s:%s is connectable' % (vm.get_vm().uuid, console.hostIp, console.port))
else:
test_util.test_fail('[vm:] %s console on %s:%s is not connectable' % (vm.get_vm().uuid, console.hostIp, console.port))
acc_ops.logout(session_uuid)
for vm in vms:
if vm:
vm.destroy()
vm.check()
test_util.test_pass('Request Access Multiple VM Console Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vms
for vm in vms:
if vm:
vm.destroy()
|
{
"content_hash": "4fced8daead5e8178b908249c7c0701f",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 134,
"avg_line_length": 29.97826086956522,
"alnum_prop": 0.612037708484409,
"repo_name": "zstackio/zstack-woodpecker",
"id": "49ea2dcac456a41ad02f558b5a196b8cc078ffda",
"size": "1379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/basic/test_get_multi_vm_console_address.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
}
|
import unittest
try:
import bezier
except ImportError: # pragma: NO COVER
bezier = None
WRONG_FLAGS_TEMPLATE = """\
Arrays are not Fortran contiguous
array1 flags =
{}
array2 flags =
{}
"""
WRONG_TYPE_TEMPLATE = """\
Arrays have different types
array1({}) =
{!r}
array2({}) =
{!r}
"""
WRONG_SHAPE_TEMPLATE = """\
Arrays have different shapes
array1{} =
{!r}
array2{} =
{!r}
"""
NOT_EQ_TEMPLATE = """\
Arrays not equal
array1 =
{!r}
array2 =
{!r}
"""
def get_random(seed):
import numpy as np
return np.random.RandomState(seed=seed) # pylint: disable=no-member
def binary_round(value, num_bits):
# NOTE: This assumes ``value`` is not Inf/-Inf/NaN or
# a subnormal number.
hex_val = value.hex()
# NOTE: `pre` is either "" or "-".
pre, hex_digits = hex_val.split("0x1.")
hex_digits, post = hex_digits.split("p")
assert len(hex_digits) == 13
all_bits = f"{int(hex_digits, 16):052b}"
assert len(all_bits) == 52
truncated_bits = all_bits[:num_bits] + "0" * (52 - num_bits)
truncated_hex = f"{int(truncated_bits, 2):013x}"
python_hex = pre + "0x1." + truncated_hex + "p" + post
return float.fromhex(python_hex)
def get_random_nodes(shape, seed, num_bits):
import functools
import numpy as np
random_state = get_random(seed)
nodes = np.asfortranarray(random_state.random_sample(shape))
# Round the nodes to ``num_bits`` bits to avoid round-off.
to_vectorize = functools.partial(binary_round, num_bits=num_bits)
return np.vectorize(to_vectorize)(nodes)
def ref_triangle_uniform_nodes(pts_exponent):
import numpy as np
# Using the exponent means that we will divide by
# 2**exp, which can be done without roundoff (for small
# enough exponents).
pts_per_side = 2**pts_exponent + 1
total = ((pts_per_side + 1) * pts_per_side) // 2
result = np.zeros((total, 2), order="F")
index = 0
for y_val in range(pts_per_side):
remaining = pts_per_side - y_val
for x_val in range(remaining):
result[index, :] = x_val, y_val
index += 1
result /= pts_per_side - 1.0
return result
def check_plot_call(test_case, call, expected, **kwargs):
import numpy as np
# Unpack the call as name, positional args, keyword args
_, positional, keyword = call
test_case.assertEqual(keyword, kwargs)
test_case.assertEqual(len(positional), 2)
test_case.assertEqual(
np.asfortranarray(positional[0]), np.asfortranarray(expected[0, :])
)
test_case.assertEqual(
np.asfortranarray(positional[1]), np.asfortranarray(expected[1, :])
)
def needs_speedup(test_class):
if bezier is None:
has_speedup = False # pragma: NO COVER
else:
has_speedup = bezier._HAS_SPEEDUP
decorator = unittest.skipUnless(has_speedup, "No speedup available")
return decorator(test_class)
def almost(test_case, expected, actual, num_ulps):
import numpy as np
test_case.assertNotEqual(expected, 0.0)
delta = num_ulps * np.spacing(expected)
test_case.assertAlmostEqual(actual, expected, delta=delta)
class NumPyTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
import numpy as np
super().__init__(*args, **kwargs)
self.addTypeEqualityFunc(np.ndarray, self.assertArrayEqual)
def assertArrayEqual(self, arr1, arr2, msg=None):
import numpy as np
if (
not arr1.flags.f_contiguous or not arr2.flags.f_contiguous
): # pragma: NO COVER
standard_msg = WRONG_FLAGS_TEMPLATE.format(arr1.flags, arr2.flags)
self.fail(self._formatMessage(msg, standard_msg))
if arr1.dtype is not arr2.dtype: # pragma: NO COVER
standard_msg = WRONG_TYPE_TEMPLATE.format(
arr1.dtype, arr1, arr2.dtype, arr2
)
self.fail(self._formatMessage(msg, standard_msg))
if arr1.shape != arr2.shape: # pragma: NO COVER
standard_msg = WRONG_SHAPE_TEMPLATE.format(
arr1.shape, arr1, arr2.shape, arr2
)
self.fail(self._formatMessage(msg, standard_msg))
if not np.all(arr1 == arr2): # pragma: NO COVER
standard_msg = NOT_EQ_TEMPLATE.format(arr1, arr2)
self.fail(self._formatMessage(msg, standard_msg))
|
{
"content_hash": "c0112e1d1296a5deb27cd9076cf0de2a",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 78,
"avg_line_length": 29.04,
"alnum_prop": 0.6294765840220385,
"repo_name": "dhermes/bezier",
"id": "3a1be52f6bb656721ccaddd0a5f5e9c9f7686f7b",
"size": "4902",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11812"
},
{
"name": "CMake",
"bytes": "4735"
},
{
"name": "Cython",
"bytes": "11990"
},
{
"name": "Dockerfile",
"bytes": "2291"
},
{
"name": "Fortran",
"bytes": "660533"
},
{
"name": "Makefile",
"bytes": "3873"
},
{
"name": "Python",
"bytes": "1125695"
},
{
"name": "Shell",
"bytes": "25364"
},
{
"name": "TeX",
"bytes": "5153"
}
],
"symlink_target": ""
}
|
import os
def acc():
package = open(os.getcwd()+"/package.json", "w")
package.write("{")
package.write("\n")
package.write(" \"name\": \""+project+"\",")
package.write("\n")
package.write(" \"description\": \""+description+"\",")
package.write("\n")
package.write(" \"author\": \""+author+"\",")
package.write("\n")
package.write(" \"width\": 800,")
package.write("\n")
package.write(" \"height\": 600")
package.write("\n")
package.write("}")
print("Wrote 7 lines.")
author = input("Author: ")
project = input("Project Name: ")
description = input("Description: ")
print("package.json: ")
print("{")
print(" \"name\": \""+project+"\",")
print(" \"description\": \""+description+"\",")
print(" \"author\": \""+author+"\",")
print(" \"width\": 800,")
print(" \"height\": 600")
print("}")
accept = input("Are you Happy with this? (Y/N) ")
if accept == "Y":
acc()
pass
elif accept == "y":
acc()
pass
|
{
"content_hash": "94228d43052b66f4ea963ef838fbc04f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 59,
"avg_line_length": 23.170731707317074,
"alnum_prop": 0.5589473684210526,
"repo_name": "iplo/Specter",
"id": "66aa45ef811900bbb1c354329529bff9ef296808",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "specter/init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155"
},
{
"name": "HTML",
"bytes": "750"
},
{
"name": "JavaScript",
"bytes": "334"
},
{
"name": "Python",
"bytes": "3783"
}
],
"symlink_target": ""
}
|
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class ChangesResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'ChangesResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # ChangesResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
|
{
"content_hash": "c4f15a3c78ef7e86b6231142b6e69d8c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 30.18918918918919,
"alnum_prop": 0.6410026857654432,
"repo_name": "liosha2007/temporary-groupdocs-python-sdk",
"id": "dad9718d9c34394978eb02b70aa41a39ce666632",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groupdocs/models/ChangesResponse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1070081"
}
],
"symlink_target": ""
}
|
def greeting():
return "Welcome to twiOpinion 1.1.3!"
def startingInfo():
with open ("../startingInfo.txt", "r") as myfile:
data=myfile.read()
return data
|
{
"content_hash": "3058f42c8fe1ed48c695e5305d623f01",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.625,
"repo_name": "orangeYao/twiOpinion",
"id": "7ddb43f1d8b8002061aa64435c6fabc324dc6225",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testGui/versionControl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "41503"
},
{
"name": "Objective-C",
"bytes": "25470"
},
{
"name": "Python",
"bytes": "348940"
},
{
"name": "Shell",
"bytes": "919"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12);
|
{
"content_hash": "ec55812d81bee770b0ae8c154e879e3c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 169,
"avg_line_length": 38.42857142857143,
"alnum_prop": 0.7100371747211895,
"repo_name": "antoinecarme/pyaf",
"id": "a61ecfa3201f7963eb0e8d78992c4a4fd85e704d",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_LinearTrend/cycle_7/ar_12/test_artificial_1024_Difference_LinearTrend_7_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
""" configuration management """
import json
class ConfVar(object):
"""
The atomic unit of a Conf object
"""
def __init__(self, name, description, value):
self._name, self._description, self.value = name, description, value
@property
def name(self):
return self._name
@property
def description(self):
return self._description
def to_dict(self):
return {
'name': self._name,
'description': self._description,
'value': self.value
}
def __eq__(self, other):
return (
self.name == other.name and
self.description == other.description and
self.value == other.value)
def __ne__(self, other):
return (
self.name != other.name or
self.description != other.description or
self.value != other.value)
class Conf(dict):
def __init__(self, *args, **kwargs):
for arg in args:
assert type(arg) == ConfVar
kwargs[arg.name] = arg
super(Conf, self).__init__([], **kwargs)
self.__dict__ = self
def __str__(self):
items = []
for key, value in self.get_all():
items.append('%s: %s' % (key, value))
return '\n'.join(items)
@classmethod
def from_json(cls, content):
dic = json.loads(content)
confvars = []
for key, vard in dic.items():
cvar = ConfVar(vard['name'], vard['description'], vard['value'])
confvars.append(cvar)
return cls(*confvars)
def to_json(self):
out = {}
for key, var in self.items():
out[key] = var.to_dict()
return json.dumps(out, indent=4)
def get_all(self):
for key, cvar in self.items():
yield key, cvar.value
def get_str(self, key, default=None):
val = self.get(key)
return str(val.value) if val else default
def get_int(self, key, default=None):
val = self.get(key)
if val:
try:
return int(val.value)
except ValueError:
pass
return default
def describe(self, key, default=''):
cvar = self.get(key)
return cvar.description if cvar else default
def describe_all(self):
items = []
for key, cvar in self.items():
items.append('%s: %s' % (key, cvar.description))
return '\n'.join(items)
|
{
"content_hash": "530d5288a9ecf14dbad74ee29d617d47",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 76,
"avg_line_length": 26.25263157894737,
"alnum_prop": 0.5252606255012029,
"repo_name": "rgs1/xcmd",
"id": "12fe06dea80aeeb1ec87e94283cb1656913df01c",
"size": "2494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xcmd/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37892"
}
],
"symlink_target": ""
}
|
import os
import json
from subprocess import check_output, DEVNULL, CalledProcessError
from .constants import ffmpeg
ffprobe = os.path.join(os.path.split(ffmpeg)[0], 'ffprobe')
if ffmpeg.endswith('.exe'):
ffprobe += '.exe'
def get_iframes(filename, read_interval=None):
"""
Args:
filename:
read_interval: string specifying a read_interval, see
https://ffmpeg.org/ffprobe.html
Returns:
list of iframe dicts
"""
args = [
"-show_frames",
"-select_streams", "v"
]
if read_interval:
args.extend(["-read_intervals", read_interval])
try:
results = check_output([
"ffprobe",
'-loglevel', '16',
*args,
'-i', filename,
'-of', 'json'
],
universal_newlines=True,
stderr=DEVNULL,
stdin=DEVNULL
)
except CalledProcessError:
return None
else:
r = json.loads(results)
return [frame['pkt_pts_time'] for frame in r['frames'] if frame['pict_type'].upper() == 'I']
def get_iframes2(filename, read_interval=None):
"""
Get all iframes in a video.
:param filename: path to video
:param read_interval: A sequence of ffmpeg intervals separated by ","
see read_intervals here: https://www.ffmpeg.org/ffprobe.html for more information
:return: a list of iframe timestamps, as strings
e.g. ["0.033333", "1.133333", "2.233333", ...]
"""
args = [
ffprobe,
"-loglevel", "16",
"-show_packets",
"-select_streams", "v",
"-show_entries", "packet=pts_time,flags"
]
if read_interval:
args.extend(["-read_intervals", read_interval])
args.extend(["-i", filename, "-of", "json"])
try:
results = check_output([
*args
], universal_newlines=True)
except CalledProcessError:
return None
else:
r = json.loads(results)
iframes = [float(packet['pts_time']) for packet in r['packets'] if 'K' in packet['flags']]
return iframes
def probe_video(filename, stream='v', entries=()):
try:
results = check_output([
ffprobe,
'-loglevel', '16',
'-show_entries', 'stream={}'.format(','.join(entries)),
'-select_streams', stream,
'-i', filename,
'-of', 'json'
],
universal_newlines=True,
stderr=DEVNULL,
stdin=DEVNULL
)
except CalledProcessError:
return None
else:
try:
return json.loads(results)['streams']
except IndexError:
return None
|
{
"content_hash": "c07553ba470e24b43f6f4a920902d9e2",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 100,
"avg_line_length": 26.93069306930693,
"alnum_prop": 0.5426470588235294,
"repo_name": "wlerin/showroom",
"id": "e2cdb9a1b25556f1bfb7a50cf35bb0f7ec5907d6",
"size": "2720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "showroom/archive/probe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "294752"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class StunTimeout(A10BaseClass):
"""Class Description::
Set STUN timeout for endpoint-independent filtering.
Class stun-timeout supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param stun_timeout_val_port_range: {"description": "STUN timeout (default: 2 minutes)", "format": "number", "default": 2, "optional": true, "maximum": 60, "minimum": 0, "type": "number"}
:param port: {"description": "Single Destination Port or Port Range Start", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param port_end: {"description": "Port Range End", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/stateful-firewall/udp/stun-timeout/{port}+{port_end}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "port","port_end"]
self.b_key = "stun-timeout"
self.a10_url="/axapi/v3/cgnv6/stateful-firewall/udp/stun-timeout/{port}+{port_end}"
self.DeviceProxy = ""
self.uuid = ""
self.stun_timeout_val_port_range = ""
self.port = ""
self.port_end = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "81f919645f6f083ad515f353b83c8e3d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 191,
"avg_line_length": 41.5,
"alnum_prop": 0.6351118760757315,
"repo_name": "a10networks/a10sdk-python",
"id": "847e85ede66fdaac0074b72d4c83bb1a1d88d132",
"size": "1743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/cgnv6/cgnv6_stateful_firewall_udp_stun_timeout.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
}
|
import autograd.numpy as np
"""
References:
https://en.wikipedia.org/wiki/Activation_function
"""
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def softmax(z):
# Avoid numerical overflow by removing max
e = np.exp(z - np.amax(z, axis=1, keepdims=True))
return e / np.sum(e, axis=1, keepdims=True)
def linear(z):
return z
def softplus(z):
"""Smooth relu."""
# Avoid numerical overflow, see:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.logaddexp.html
return np.logaddexp(0.0, z)
def softsign(z):
return z / (1 + np.abs(z))
def tanh(z):
return np.tanh(z)
def relu(z):
return np.maximum(0, z)
def leakyrelu(z, a=0.01):
return np.maximum(z * a, z)
def get_activation(name):
"""Return activation function by name"""
try:
return globals()[name]
except Exception:
raise ValueError("Invalid activation function.")
|
{
"content_hash": "610f1a4b95821380dd960b787f620987",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 18.098039215686274,
"alnum_prop": 0.6338028169014085,
"repo_name": "rushter/MLAlgorithms",
"id": "949cdf75b7465714e30198599f5830e2587cbf6b",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mla/neuralnet/activations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "174"
},
{
"name": "Python",
"bytes": "124757"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from dv_apps.datafiles.models import Datafile, FileMetadata, DatafileTag,\
DatafileCategory, FilemetadataDatafileCategory
class DatafileAdmin(admin.ModelAdmin):
save_on_top = True
search_fields = ('label',)
list_display = ('dvobject', 'filesize', 'contenttype', 'ingeststatus', 'restricted')
list_filter= ( 'ingeststatus', 'restricted', 'contenttype', )
admin.site.register(Datafile, DatafileAdmin)
class FileMetadataAdmin(admin.ModelAdmin):
save_on_top = True
search_fields = ('label', 'description')
list_display = ('label', 'version', 'restricted', 'description', )
list_filter= ('restricted',)
fields = ( 'label', 'description', 'version', 'datafile',)# 'datasetversion')
admin.site.register(FileMetadata, FileMetadataAdmin)
class DatafileCategoryAdmin(admin.ModelAdmin):
save_on_top = True
search_fields = ('name',)
list_display = ('name', 'dataset')
admin.site.register(DatafileCategory, DatafileCategoryAdmin)
class DatafileTagAdmin(admin.ModelAdmin):
save_on_top = True
search_fields = ('name',)
list_display = ('dtype', 'datafile')
admin.site.register(DatafileTag, DatafileTagAdmin)
class FilemetadataDatafileCategoryAdmin(admin.ModelAdmin):
save_on_top = True
search_fields = ('filemetadatas__label',)
list_display = ('filecategories', 'filemetadatas')
admin.site.register(FilemetadataDatafileCategory, FilemetadataDatafileCategoryAdmin)
|
{
"content_hash": "c099dc23dcae086da04322d9a789ec39",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 88,
"avg_line_length": 35.78048780487805,
"alnum_prop": 0.7280163599182005,
"repo_name": "IQSS/miniverse",
"id": "0bfc04c0762507d360e5da9829e9eba56f055a8d",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dv_apps/datafiles/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "171690"
},
{
"name": "HTML",
"bytes": "218468"
},
{
"name": "JavaScript",
"bytes": "3789254"
},
{
"name": "Python",
"bytes": "710342"
}
],
"symlink_target": ""
}
|
import logging
import os
import pipes
import subprocess
from shortcut.channel import const as C
platform_whitelist = ['darwin']
TYPE = 'osx'
TITLE = 'OSX Dictionary'
DESCRIPTION = """You can lookup word definition through OSX's builtin dictionary."""
busy = False
def lookup(word, channel=None, msg=None):
global busy
if not word:
return {
'source': TYPE,
C.ERROR: 'Bad Parameter',
}
if busy:
return {
'source': TYPE,
C.ERROR: 'Busy',
}
busy = True
path = os.path.dirname(__file__)
args = './lookup %s' % pipes.quote(word)
#put them in list not working, the quote here is very important, otherwise
#it will be very insecure to run arbitrary cmds here
result = None
try:
f = subprocess.Popen(args=args, shell=True, cwd=path,
stdout=subprocess.PIPE)
result = f.communicate()[0]
result = {
'source': TYPE,
'word': word,
'format': 'text',
'definition': result,
}
except Exception, e:
logging.error('Dictionary on_lookup error: %s' % e, exc_info=True)
result = {
'source': TYPE,
C.ERROR: '%s' % e,
}
busy = False
return result
|
{
"content_hash": "cff249d5b4499cd790f301f7fd49c114",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 85,
"avg_line_length": 25.823529411764707,
"alnum_prop": 0.5504935459377372,
"repo_name": "yjpark/shortcut",
"id": "ef293571a24ce989107ae01eb95b322752dd2d68",
"size": "1317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "channels/ext/dictionary/osx/osx_dictionary_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "20944"
},
{
"name": "CSS",
"bytes": "0"
},
{
"name": "JavaScript",
"bytes": "35771"
},
{
"name": "Objective-C",
"bytes": "101821"
},
{
"name": "Perl",
"bytes": "43"
},
{
"name": "Python",
"bytes": "55595"
},
{
"name": "Shell",
"bytes": "468"
}
],
"symlink_target": ""
}
|
from context import *
from settings.filemgmt import fileManager
from settings.paths import MSD_MXM, MSD_TID_YEAR, CHARTED_TIDS, \
MXM, UNCHARTED, sep
def loadSet(fileName):
setName = ''
with open(fileName) as lyricsFile:
# to avoid the entire file from being read into memory
for line in lyricsFile:
setName += line
return setName
if __name__ == '__main__':
charted_tid = loadSet(CHARTED_TIDS).split('\n')
msd_mxm = loadSet(MSD_MXM).split('\n')
msd_tid_year = loadSet(MSD_TID_YEAR).split('\n')
mxm = loadSet(MXM).split('\n')
filtered = [i.split(sep)[0] for i in msd_tid_year]
filtered1 = [i.split(',')[0] for i in mxm]
yo = []
for i in msd_mxm:
if i in filtered and i in filtered1 and i not in charted_tid:
tid_year = msd_tid_year[filtered.index(i)]
bow = mxm[filtered1.index(i)]
newRow = ','.join(tid_year.split(sep)) + ',' + \
','.join(bow.split(',')[2:])
yo.append(newRow)
fileManager(UNCHARTED, 'w', '\n'.join(sorted(yo)))
|
{
"content_hash": "6039c254395355a360be94391819cb17",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 69,
"avg_line_length": 27.3,
"alnum_prop": 0.5833333333333334,
"repo_name": "kug3lblitz/Heat-Replay",
"id": "5172cd50826b29c4cb335bb8acd5a47c83ad870f",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/code/db/mgmt/final/uncharted_final.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "890554"
},
{
"name": "Python",
"bytes": "52104"
}
],
"symlink_target": ""
}
|
"""The islamic_prayer_times component."""
from datetime import timedelta
import logging
from prayer_times_calculator import PrayerTimesCalculator, exceptions
from requests.exceptions import ConnectionError as ConnError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_call_later, async_track_point_in_time
import homeassistant.util.dt as dt_util
from .const import (
CALC_METHODS,
CONF_CALC_METHOD,
DATA_UPDATED,
DEFAULT_CALC_METHOD,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
vol.Optional(CONF_CALC_METHOD, default=DEFAULT_CALC_METHOD): vol.In(
CALC_METHODS
),
}
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Import the Islamic Prayer component from config."""
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config[DOMAIN]
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Islamic Prayer Component."""
client = IslamicPrayerClient(hass, config_entry)
if not await client.async_setup():
return False
hass.data.setdefault(DOMAIN, client)
return True
async def async_unload_entry(hass, config_entry):
"""Unload Islamic Prayer entry from config_entry."""
if hass.data[DOMAIN].event_unsub:
hass.data[DOMAIN].event_unsub()
hass.data.pop(DOMAIN)
await hass.config_entries.async_forward_entry_unload(config_entry, "sensor")
return True
class IslamicPrayerClient:
"""Islamic Prayer Client Object."""
def __init__(self, hass, config_entry):
"""Initialize the Islamic Prayer client."""
self.hass = hass
self.config_entry = config_entry
self.prayer_times_info = {}
self.available = True
self.event_unsub = None
@property
def calc_method(self):
"""Return the calculation method."""
return self.config_entry.options[CONF_CALC_METHOD]
def get_new_prayer_times(self):
"""Fetch prayer times for today."""
calc = PrayerTimesCalculator(
latitude=self.hass.config.latitude,
longitude=self.hass.config.longitude,
calculation_method=self.calc_method,
date=str(dt_util.now().date()),
)
return calc.fetch_prayer_times()
async def async_schedule_future_update(self):
"""Schedule future update for sensors.
Midnight is a calculated time. The specifics of the calculation
depends on the method of the prayer time calculation. This calculated
midnight is the time at which the time to pray the Isha prayers have
expired.
Calculated Midnight: The Islamic midnight.
Traditional Midnight: 12:00AM
Update logic for prayer times:
If the Calculated Midnight is before the traditional midnight then wait
until the traditional midnight to run the update. This way the day
will have changed over and we don't need to do any fancy calculations.
If the Calculated Midnight is after the traditional midnight, then wait
until after the calculated Midnight. We don't want to update the prayer
times too early or else the timings might be incorrect.
Example:
calculated midnight = 11:23PM (before traditional midnight)
Update time: 12:00AM
calculated midnight = 1:35AM (after traditional midnight)
update time: 1:36AM.
"""
_LOGGER.debug("Scheduling next update for Islamic prayer times")
now = dt_util.utcnow()
midnight_dt = self.prayer_times_info["Midnight"]
if now > dt_util.as_utc(midnight_dt):
next_update_at = midnight_dt + timedelta(days=1, minutes=1)
_LOGGER.debug(
"Midnight is after day the changes so schedule update for after Midnight the next day"
)
else:
_LOGGER.debug(
"Midnight is before the day changes so schedule update for the next start of day"
)
next_update_at = dt_util.start_of_local_day(now + timedelta(days=1))
_LOGGER.info("Next update scheduled for: %s", next_update_at)
self.event_unsub = async_track_point_in_time(
self.hass, self.async_update, next_update_at
)
async def async_update(self, *_):
"""Update sensors with new prayer times."""
try:
prayer_times = await self.hass.async_add_executor_job(
self.get_new_prayer_times
)
self.available = True
except (exceptions.InvalidResponseError, ConnError):
self.available = False
_LOGGER.debug("Error retrieving prayer times.")
async_call_later(self.hass, 60, self.async_update)
return
for prayer, time in prayer_times.items():
self.prayer_times_info[prayer] = dt_util.parse_datetime(
f"{dt_util.now().date()} {time}"
)
await self.async_schedule_future_update()
_LOGGER.debug("New prayer times retrieved. Updating sensors.")
async_dispatcher_send(self.hass, DATA_UPDATED)
async def async_setup(self):
"""Set up the Islamic prayer client."""
await self.async_add_options()
try:
await self.hass.async_add_executor_job(self.get_new_prayer_times)
except (exceptions.InvalidResponseError, ConnError):
raise ConfigEntryNotReady
await self.async_update()
self.config_entry.add_update_listener(self.async_options_updated)
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, "sensor"
)
)
return True
async def async_add_options(self):
"""Add options for entry."""
if not self.config_entry.options:
data = dict(self.config_entry.data)
calc_method = data.pop(CONF_CALC_METHOD, DEFAULT_CALC_METHOD)
self.hass.config_entries.async_update_entry(
self.config_entry, data=data, options={CONF_CALC_METHOD: calc_method}
)
@staticmethod
async def async_options_updated(hass, entry):
"""Triggered by config entry options updates."""
if hass.data[DOMAIN].event_unsub:
hass.data[DOMAIN].event_unsub()
await hass.data[DOMAIN].async_update()
|
{
"content_hash": "7bfc61564518026f011aa736ca3f1949",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 102,
"avg_line_length": 33.092233009708735,
"alnum_prop": 0.6364969928120874,
"repo_name": "robbiet480/home-assistant",
"id": "90a31890d162d043ec0c6138a9f4f01637edabec",
"size": "6817",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/islamic_prayer_times/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import logging
from quantum.db import db_base_plugin_v2
from quantum.db import models_v2
from quantum.plugins.linuxbridge.db import l2network_db as cdb
LOG = logging.getLogger(__name__)
class LinuxBridgePluginV2(db_base_plugin_v2.QuantumDbPluginV2):
"""
LinuxBridgePlugin provides support for Quantum abstractions
using LinuxBridge. A new VLAN is created for each network.
It relies on an agent to perform the actual bridge configuration
on each host.
"""
def __init__(self):
cdb.initialize(base=models_v2.model_base.BASEV2)
LOG.debug("Linux Bridge Plugin initialization complete")
def create_network(self, context, network):
new_network = super(LinuxBridgePluginV2, self).create_network(context,
network)
try:
vlan_id = cdb.reserve_vlanid()
cdb.add_vlan_binding(vlan_id, new_network['id'])
except:
super(LinuxBridgePluginV2, self).delete_network(context,
new_network['id'])
raise
return new_network
def delete_network(self, context, id):
vlan_binding = cdb.get_vlan_binding(id)
cdb.release_vlanid(vlan_binding['vlan_id'])
cdb.remove_vlan_binding(id)
return super(LinuxBridgePluginV2, self).delete_network(context, id)
|
{
"content_hash": "528e907760856bfd3741c3ab1b5d428a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 36.43589743589744,
"alnum_prop": 0.6213933849401829,
"repo_name": "savi-dev/quantum",
"id": "ffb52ba7173ccb986a3bd96ac807a18b7c929fd6",
"size": "2006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/plugins/linuxbridge/lb_quantum_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "18263"
},
{
"name": "Python",
"bytes": "1519204"
},
{
"name": "Shell",
"bytes": "7766"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import concurrent.futures._base as futbase
import execnet
import inspect
import textwrap
def _worker(channel):
"""Pure function for running tasks on a host."""
import traceback
while not channel.isclosed():
ident, source, call_name, args, kwargs = channel.receive()
co = compile(source+'\n', '', 'exec')
loc = {}
exec co in loc
try:
res = loc[call_name](*args, **kwargs)
except BaseException:
res = traceback.format_exc()
failed = True
else:
failed = False
try:
channel.send((failed, ident, res))
except BaseException:
channel.send((True, ident, 'unserializable result'))
class RemoteException(Exception):
def __init__(self, text):
self.text = text.strip()
def __str__(self):
return self.text
class GatewayExecutor(futbase.Executor):
def __init__(self, group):
self._group = group
self._pending_tasks = {}
self._running_tasks = {}
self._busy_gateways = {}
self._idle_gateways = set()
self._channels = {}
for gateway in self._group:
chan = gateway.remote_exec(_worker)
chan.setcallback(self._message)
self._channels[gateway] = chan
self._idle_gateways.add(gateway)
def _message(self, msg):
# Future finished.
failed, ident, res = msg
fut, _, _, _ = self._running_tasks[ident]
if failed:
fut.set_exception(RemoteException(res))
else:
fut.set_result(res)
# Gateway no longer busy.
gw = self._busy_gateways.pop(ident)
self._idle_gateways.add(gw)
self._advance()
def _advance(self):
"""Run a new task if possible (a pending task and idle gateway
are both available).
"""
if self._idle_gateways and self._pending_tasks:
ident = self._pending_tasks.iterkeys().next()
fut, fn, args, kwargs = self._pending_tasks.pop(ident)
if not fut.set_running_or_notify_cancel():
return
self._running_tasks[ident] = (fut, fn, args, kwargs)
gw = self._idle_gateways.pop()
self._busy_gateways[ident] = gw
call_name = fn.__name__
# FIXME need checks here like _source_of_function
source = inspect.getsource(fn.func_code)
source = textwrap.dedent(source)
self._channels[gw].send((ident, source, call_name, args, kwargs))
def submit(self, fn, *args, **kwargs):
fut = futbase.Future()
idents = self._pending_tasks.keys() + self._running_tasks.keys()
if idents:
ident = max(idents) + 1
else:
ident = 0
self._pending_tasks[ident] = (fut, fn, args, kwargs)
self._advance()
return fut
def shutdown(self, wait=True):
for chan in self._channels.itervalues():
chan.close()
self._group.terminate()
# FIXME wait=False?
|
{
"content_hash": "5ad2d8529c2116cf80774beacc903960",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 29.046728971962615,
"alnum_prop": 0.5514800514800515,
"repo_name": "sampsyo/execnet-futures",
"id": "1388b19a037a4911ab2f871f403526c41aec6449",
"size": "3108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5230"
}
],
"symlink_target": ""
}
|
"""Creates a GCE instance template for Forseti Security."""
def GenerateConfig(context):
"""Generate configuration."""
if context.properties.get('branch-name'):
DOWNLOAD_FORSETI = """
git clone {}.git --branch {} --single-branch forseti-security
cd forseti-security
""".format(
context.properties['src-path'],
context.properties['branch-name'])
else:
DOWNLOAD_FORSETI = """
wget -qO- {}/archive/v{}.tar.gz | tar xvz
cd forseti-security-{}
""".format(
context.properties['src-path'],
context.properties['release-version'],
context.properties['release-version'])
CLOUDSQL_CONN_STRING = '{}:{}:{}'.format(
context.env['project'],
'$(ref.cloudsql-instance.region)',
'$(ref.cloudsql-instance.name)')
SCANNER_BUCKET = context.properties['scanner-bucket']
DATABASE_NAME = context.properties['database-name']
SERVICE_ACCOUNT_SCOPES = context.properties['service-account-scopes']
resources = []
resources.append({
'name': '{}-explain-vm'.format(context.env['deployment']),
'type': 'compute.v1.instance',
'properties': {
'zone': context.properties['zone'],
'machineType': (
'https://www.googleapis.com/compute/v1/projects/{}'
'/zones/{}/machineTypes/{}'.format(
context.env['project'], context.properties['zone'],
context.properties['instance-type'])),
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': (
'https://www.googleapis.com/compute/v1'
'/projects/{}/global/images/family/{}'.format(
context.properties['image-project'],
context.properties['image-family']
)
)
}
}],
'networkInterfaces': [{
'network': (
'https://www.googleapis.com/compute/v1/'
'projects/{}/global/networks/default'.format(
context.env['project'])),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}]
}],
'serviceAccounts': [{
'email': context.properties['service-account'],
'scopes': SERVICE_ACCOUNT_SCOPES,
}],
'metadata': {
'items': [{
'key': 'startup-script',
'value': """#!/bin/bash
# Forseti setup
sudo apt-get install -y git unzip
# Forseti dependencies
sudo apt-get install -y libmysqlclient-dev python-pip python-dev
USER_HOME=/home/ubuntu
FORSETI_PROTOC_URL=https://raw.githubusercontent.com/GoogleCloudPlatform/forseti-security/master/data/protoc_url.txt
# Install fluentd if necessary
FLUENTD=$(ls /usr/sbin/google-fluentd)
if [ -z "$FLUENTD" ]; then
cd $USER_HOME
curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh
bash install-logging-agent.sh
fi
# Check whether Cloud SQL proxy is installed
CLOUD_SQL_PROXY=$(ls $USER_HOME/cloud_sql_proxy)
if [ -z "$CLOUD_SQL_PROXY" ]; then
cd $USER_HOME
wget https://dl.google.com/cloudsql/cloud_sql_proxy.{}
mv cloud_sql_proxy.{} cloud_sql_proxy
chmod +x cloud_sql_proxy
fi
$USER_HOME/cloud_sql_proxy -instances={}=tcp:{} &
# Check if rules.yaml exists
RULES_FILE=$(gsutil ls gs://{}/rules/rules.yaml)
if [ $? -eq 1 ]; then
cd $USER_HOME
read -d '' RULES_YAML << EOF
rules:
- name: sample whitelist
mode: whitelist
resource:
- type: organization
applies_to: self_and_children
resource_ids:
- {}
inherit_from_parents: true
bindings:
- role: roles/*
members:
- serviceAccount:*@*.gserviceaccount.com
EOF
echo "$RULES_YAML" > $USER_HOME/rules.yaml
gsutil cp $USER_HOME/rules.yaml gs://{}/rules/rules.yaml
fi
# Check whether protoc is installed
PROTOC_PATH=$(which protoc)
if [ -z "$PROTOC_PATH" ]; then
cd $USER_HOME
PROTOC_DOWNLOAD_URL=$(curl -s $FORSETI_PROTOC_URL)
if [ -z "$PROTOC_DOWNLOAD_URL" ]; then
echo "No PROTOC_DOWNLOAD_URL set: $PROTOC_DOWNLOAD_URL"
exit 1
else
wget $PROTOC_DOWNLOAD_URL
unzip -o $(basename $PROTOC_DOWNLOAD_URL)
sudo cp bin/protoc /usr/local/bin
fi
fi
# Install Forseti Security
cd $USER_HOME
rm -rf forseti-*
pip install --upgrade pip
pip install --upgrade setuptools
pip install google-apputils grpcio grpcio-tools protobuf
cd $USER_HOME
# Download Forseti src; see DOWNLOAD_FORSETI
{}
python setup.py install
# Create upstart script for API server
read -d '' API_SERVER << EOF
description "Explain API Server"
author "Felix Matenaar <fmatenaar@google.com>"
start on runlevel [234]
stop on runlevel[0156]
chdir $USER_HOME
export PYTHONPATH=.
respawn
exec /usr/local/bin/forseti_api
EOF
echo "$API_SERVER" > /etc/init/forseti_api.conf
initctl reload-configuration
start forseti_api
""".format(
# cloud_sql_proxy
context.properties['cloudsqlproxy-os-arch'],
context.properties['cloudsqlproxy-os-arch'],
CLOUDSQL_CONN_STRING,
context.properties['db-port'],
# rules.yaml
SCANNER_BUCKET,
context.properties['organization-id'],
SCANNER_BUCKET,
# install forseti
DOWNLOAD_FORSETI,
)
}]
}
}
})
return {'resources': resources}
|
{
"content_hash": "1ba8ebe06d635f2b7fdccbe42d0abe8f",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 116,
"avg_line_length": 30.128865979381445,
"alnum_prop": 0.5704020530367836,
"repo_name": "felixbb/forseti-security",
"id": "a5bc3676cab59cb6fa6c5f87a21a134d8e2892c9",
"size": "6420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deployment-templates/py/explain-instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4155"
},
{
"name": "Protocol Buffer",
"bytes": "9190"
},
{
"name": "Python",
"bytes": "1068694"
},
{
"name": "Shell",
"bytes": "6114"
}
],
"symlink_target": ""
}
|
import argparse
import os
import urllib
import xml.etree.ElementTree as ET
import tarfile
import tempfile
import json
import re
from glob import glob
from shutil import copyfile
gt_catalog_dir = '~/.local/share/gthumb/catalogs'
gt_catalog_dir = os.path.expanduser(gt_catalog_dir)
def get_cat_name(cat_file):
m = re.search('([^/]+)\.catalog$', cat_file)
if m is not None:
return m.group(1)
else:
return cat_file
def get_catalogs():
dict = {}
catalogs = glob(gt_catalog_dir + '/*.catalog')
i = 0
for cat_file in catalogs:
i = i + 1
name = get_cat_name(cat_file)
dict[i] = { "name": name, "file": cat_file }
return dict
def read_catalog(cat_name, cat_file):
pics = [];
root = ET.parse(cat_file).getroot()
files_tag = root.find('files')
pics = []
count = 0
for f in files_tag.findall('file'):
count = count + 1
pic_path = urllib.unquote(f.get('uri'))[7:]
if os.path.isfile(pic_path): # skip removed pics
arcname = cat_name + "/" + str(count) + os.path.splitext(pic_path)[1]
pics.append({ 'file': pic_path, 'arcname': arcname })
return pics
def create_json(cat_name, pics):
pix = []
for pic in pics:
pix.append(pic['arcname'])
d = { 'name': cat_name,
'type': 'slideshow catalog',
'showDuration': 5000,
'autoForward': "false",
'animation': "RANDOM",
'animDuration': 2000,
'pics': pix }
json_str = json.dumps(d, indent=4)
json_str = json_str.replace('"false"', "false")
json_str = json_str.replace('"true"', "true")
f = tempfile.NamedTemporaryFile(delete=False)
f.write(json_str)
f.close()
return f.name
#def create_tar(tar_name, pics, json_file, json_name):
def create_tar(tar_name, pics):
tar = tarfile.open(tar_name, "w")
#tar.add(json_file, json_name)
for pic in pics:
tar.add(pic['file'], pic['arcname'])
tar.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Export gThumb catalogs. Script asks interactively for required input.")
args = parser.parse_args()
catalogs = get_catalogs()
num_catalogs = len(catalogs)
if num_catalogs > 0:
print "Found %d gThumb catalogs:\n" % num_catalogs
for i in catalogs:
print "(%d) %s" % (i, catalogs[i]['name'])
try:
selected=int(raw_input('\nWhich catalog do you want to export (type number)? '))
if (selected < 1 or selected > num_catalogs):
print "\nSorry, no such catalog exists!\n"
else:
cat_name = catalogs[selected]['name']
print "\nThanks, you selected catalog number %d" % selected
print "Exporting catalog '%s'...\n" % cat_name
print "The exported catalog will be stored in your home directory!\n"
pics = read_catalog(cat_name, catalogs[selected]['file'])
if len(pics) > 0:
tar_name = os.path.expanduser('~/' + cat_name + '.tar')
#json_name = 'pics.json'
#json_file = create_json(cat_name, pics)
#create_tar(tar_name, pics, json_file, json_name)
create_tar(tar_name, pics)
#os.unlink(json_file)
print "Exported gThumb catalog to %s\n" % tar_name
else:
print "Sorry, catalog is empty! Nothing to export.\n"
except ValueError:
print "\nSorry, that's no number!\n"
else:
print "Sorry, no catalogs found.\n"
|
{
"content_hash": "4a8083a87c9bb06e5c171fefe5d08124",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 121,
"avg_line_length": 34.570093457943926,
"alnum_prop": 0.5601513922681806,
"repo_name": "janis-lindholm/glide-slideshow",
"id": "c26cdd9c623aa99b0beddfaf740e4a10789ed3a8",
"size": "3723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "converter/gthumb-catalog-to-portret.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7046"
},
{
"name": "JavaScript",
"bytes": "25774"
},
{
"name": "Python",
"bytes": "3723"
}
],
"symlink_target": ""
}
|
from recipe_engine import recipe_api
from . import android
from . import default
"""Chromecast flavor, used for running code on Chromecast"""
class ChromecastFlavor(android.AndroidFlavor):
def __init__(self, m):
super(ChromecastFlavor, self).__init__(m)
self._ever_ran_adb = False
self._user_ip = ''
# Disk space is extremely tight on the Chromecasts (~100M) There is not
# enough space on the android_data_dir (/cache/skia) to fit the images,
# resources, executable and output the dm images. So, we have dm_out be
# on the tempfs (i.e. RAM) /dev/shm. (which is about 140M)
data_dir = '/cache/skia/'
self.device_dirs = default.DeviceDirs(
bin_dir = '/cache/skia/bin',
dm_dir = '/dev/shm/skia/dm_out',
perf_data_dir = data_dir + 'perf',
resource_dir = data_dir + 'resources',
images_dir = data_dir + 'images',
lotties_dir = data_dir + 'lotties',
skp_dir = data_dir + 'skps',
svg_dir = data_dir + 'svgs',
mskp_dir = data_dir + 'mskp',
tmp_dir = data_dir)
@property
def user_ip_host(self):
if not self._user_ip:
self._user_ip = self.m.run(self.m.python.inline, 'read chromecast ip',
program="""
import os
CHROMECAST_IP_FILE = os.path.expanduser('~/chromecast.txt')
with open(CHROMECAST_IP_FILE, 'r') as f:
print f.read()
""",
stdout=self.m.raw_io.output(),
infra_step=True).stdout
return self._user_ip
@property
def user_ip(self):
return self.user_ip_host.split(':')[0]
def install(self):
super(ChromecastFlavor, self).install()
self._adb('mkdir ' + self.device_dirs.bin_dir,
'shell', 'mkdir', '-p', self.device_dirs.bin_dir)
def _adb(self, title, *cmd, **kwargs):
if not self._ever_ran_adb:
self._connect_to_remote()
self._ever_ran_adb = True
# The only non-infra adb steps (dm / nanobench) happen to not use _adb().
if 'infra_step' not in kwargs:
kwargs['infra_step'] = True
return self._run(title, 'adb', *cmd, **kwargs)
def _connect_to_remote(self):
self.m.run(self.m.step, 'adb connect %s' % self.user_ip_host, cmd=['adb',
'connect', self.user_ip_host], infra_step=True)
def create_clean_device_dir(self, path):
# Note: Chromecast does not support -rf
self._adb('rm %s' % path, 'shell', 'rm', '-r', path)
self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path)
def copy_directory_contents_to_device(self, host, device):
# Copy the tree, avoiding hidden directories and resolving symlinks.
# Additionally, due to space restraints, we don't push files > 3 MB
# which cuts down the size of the SKP asset to be around 50 MB as of
# version 41.
self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device),
program="""
import os
import subprocess
import sys
host = sys.argv[1]
device = sys.argv[2]
for d, _, fs in os.walk(host):
p = os.path.relpath(d, host)
if p != '.' and p.startswith('.'):
continue
for f in fs:
print os.path.join(p,f)
hp = os.path.realpath(os.path.join(host, p, f))
if os.stat(hp).st_size > (1.5 * 1024 * 1024):
print "Skipping because it is too big"
else:
subprocess.check_call(['adb', 'push',
hp, os.path.join(device, p, f)])
""", args=[host, device], infra_step=True)
def cleanup_steps(self):
if self._ever_ran_adb:
# To clean up disk space for next time
self._ssh('Delete executables', 'rm', '-r', self.device_dirs.bin_dir,
abort_on_failure=False, infra_step=True)
# Reconnect if was disconnected
self._adb('disconnect', 'disconnect')
self._connect_to_remote()
self.m.run(self.m.python.inline, 'dump log', program="""
import os
import subprocess
import sys
out = sys.argv[1]
log = subprocess.check_output(['adb', 'logcat', '-d'])
for line in log.split('\\n'):
tokens = line.split()
if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc':
addr, path = tokens[-2:]
local = os.path.join(out, os.path.basename(path))
if os.path.exists(local):
sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr])
line = line.replace(addr, addr + ' ' + sym.strip())
print line
""",
args=[self.host_dirs.bin_dir],
infra_step=True,
abort_on_failure=False)
self._adb('disconnect', 'disconnect')
self._adb('kill adb server', 'kill-server')
def _ssh(self, title, *cmd, **kwargs):
# Don't use -t -t (Force psuedo-tty allocation) like in the ChromeOS
# version because the pseudo-tty allocation seems to fail
# instantly when talking to a Chromecast.
# This was excacerbated when we migrated to kitchen and was marked by
# the symptoms of all the ssh commands instantly failing (even after
# connecting and authenticating) with exit code -1 (255)
ssh_cmd = ['ssh', '-oConnectTimeout=15', '-oBatchMode=yes',
'-T', 'root@%s' % self.user_ip] + list(cmd)
return self.m.run(self.m.step, title, cmd=ssh_cmd, **kwargs)
def step(self, name, cmd, **kwargs):
app = self.host_dirs.bin_dir.join(cmd[0])
self._adb('push %s' % cmd[0],
'push', app, self.device_dirs.bin_dir)
cmd[0] = '%s/%s' % (self.device_dirs.bin_dir, cmd[0])
self._ssh(str(name), *cmd, infra_step=False)
|
{
"content_hash": "8ebec93f42c1f222a4bba5b746f566d6",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 82,
"avg_line_length": 37.453947368421055,
"alnum_prop": 0.5803618478833655,
"repo_name": "youtube/cobalt_sandbox",
"id": "e0bbfb12d6f19ca0cfe4f674445be11bd83f6c9b",
"size": "5856",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "third_party/skia/infra/bots/recipe_modules/flavor/chromecast.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import json
import datetime
from indra import get_config
# Before the import, we have to deal with the CLASSPATH to avoid clashes
# with REACH.
def _set_classpath():
clp = os.environ.get('CLASSPATH')
eip = get_config('EIDOSPATH')
rep = get_config('REACHPATH')
clp_parts = clp.split(':') if clp else []
new_clp_parts = []
has_eidos = False
# Look at all the parts of the CLASSPATH
for part in clp_parts:
# If REACH is on the CLASSPATH, remove it
if not rep or os.path.abspath(part) != rep:
new_clp_parts.append(part)
# If Eidos is not on the CLASSPATH, add it
if eip and os.path.abspath(part) == eip:
has_eidos = True
if eip and not has_eidos:
new_clp_parts.append(eip)
# Set the new CLASSPATH
new_clp = ':'.join(new_clp_parts)
os.environ['CLASSPATH'] = new_clp
_set_classpath()
from indra.java_vm import autoclass
eidos_package = 'org.clulab.wm.eidos'
class EidosReader(object):
"""Reader object keeping an instance of the Eidos reader as a singleton.
This allows the Eidos reader to need initialization when the first piece of
text is read, the subsequent readings are done with the same
instance of the reader and are therefore faster.
Attributes
----------
eidos_reader : org.clulab.wm.eidos.EidosSystem
A Scala object, an instance of the Eidos reading system. It is
instantiated only when first processing text.
"""
def __init__(self):
self.eidos_reader = None
self.default_ontology = None
def get_default_ontology(self):
if self.default_ontology is None:
from indra.ontology.world import world_ontology
self.default_ontology = world_ontology.dump_yml_str()
return self.default_ontology
def initialize_reader(self):
"""Instantiate the Eidos reader attribute of this reader."""
eidos = autoclass(eidos_package + '.EidosSystem')
self.eidos_reader = eidos()
def reground_texts(self, texts, yaml_str=None, topk=10,
is_canonicalized=False, filter=True):
if self.eidos_reader is None:
self.initialize_reader()
if yaml_str is None:
yaml_str = self.get_default_ontology()
text_seq = _list_to_seq(texts)
raw_groundings = \
self.eidos_reader.components().ontologyHandler().reground(
'Custom', # name
yaml_str, # ontologyYaml
text_seq, # texts
filter, # filter
topk, # topk
is_canonicalized # isAlreadyCanonicalized
)
# Process the return values into a proper Python representation
groundings = [[_get_scored_grounding(entry) for entry in text_grounding]
for text_grounding in raw_groundings]
return groundings
def process_text(self, text):
"""Return a mentions JSON object given text.
Parameters
----------
text : str
Text to be processed.
Returns
-------
json_dict : dict
A JSON object of mentions extracted from text.
"""
if self.eidos_reader is None:
self.initialize_reader()
default_arg = lambda x: autoclass('scala.Some')(x)
today = datetime.date.today().strftime("%Y-%m-%d")
fname = 'default_file_name'
annot_doc = self.eidos_reader.extractFromText(
text,
False, # CAG-relevant only
default_arg(today), # doc creation time
default_arg(fname) # file name
)
# We need to get a Scala Seq of annot docs here
ml = _list_to_seq([annot_doc])
# We currently do not need toinstantiate the adjective grounder
# if we want to reinstate it, we would need to do the following
# ag = EidosAdjectiveGrounder.fromConfig(
# EidosSystem.defaultConfig.getConfig("adjectiveGrounder"))
# We now create a JSON-LD corpus
jc = autoclass(eidos_package + '.serialization.json.JLDCorpus')
corpus = jc(ml)
# Finally, serialize the corpus into JSON string
mentions_json = corpus.toJsonStr()
json_dict = json.loads(mentions_json)
return json_dict
def _list_to_seq(lst):
"""Return a scala.collection.Seq from a Python list."""
ml = autoclass('scala.collection.mutable.MutableList')()
for element in lst:
ml.appendElem(element)
return ml
def _get_scored_grounding(tpl):
ts = tpl.toString()
parts = ts[1:-1].rsplit(',', maxsplit=1)
return parts[0], float(parts[1])
|
{
"content_hash": "f762136ae08bd6a9b9a8e32941afc062",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 80,
"avg_line_length": 34.19565217391305,
"alnum_prop": 0.6094511549057003,
"repo_name": "johnbachman/belpy",
"id": "3c506ab37c27a94d22453c8027232bcd3f0c1d31",
"size": "4719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indra/sources/eidos/reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "318177"
},
{
"name": "Ruby",
"bytes": "433"
},
{
"name": "Shell",
"bytes": "430"
}
],
"symlink_target": ""
}
|
import collections
import logging
from django.conf import settings
from django.forms import ValidationError
from django import http
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions as utils
from horizon.utils import validators
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
PROJECT_REQUIRED = api.keystone.VERSIONS.active < 3
class PasswordMixin(forms.SelfHandlingForm):
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False))
no_autocomplete = True
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'password' in data and 'confirm_password' in data:
if data['password'] != data['confirm_password']:
raise ValidationError(_('Passwords do not match.'))
return data
class BaseUserForm(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(BaseUserForm, self).__init__(request, *args, **kwargs)
# Populate project choices
project_choices = []
# If the user is already set (update action), list only projects which
# the user has access to.
user_id = kwargs['initial'].get('id', None)
domain_id = kwargs['initial'].get('domain_id', None)
default_project_id = kwargs['initial'].get('project', None)
try:
if api.keystone.VERSIONS.active >= 3:
projects, has_more = api.keystone.tenant_list(
request, domain=domain_id)
else:
projects, has_more = api.keystone.tenant_list(
request, user=user_id)
for project in sorted(projects, key=lambda p: p.name.lower()):
if project.enabled:
project_choices.append((project.id, project.name))
if not project_choices:
project_choices.insert(0, ('', _("No available projects")))
# TODO(david-lyle): if keystoneclient is fixed to allow unsetting
# the default project, then this condition should be removed.
elif default_project_id is None:
project_choices.insert(0, ('', _("Select a project")))
self.fields['project'].choices = project_choices
except Exception:
LOG.debug("User: %s has no projects", user_id)
class AddExtraColumnMixIn(object):
def add_extra_fields(self, ordering=None):
if api.keystone.VERSIONS.active >= 3:
# add extra column defined by setting
EXTRA_INFO = getattr(settings, 'USER_TABLE_EXTRA_INFO', {})
for key, value in EXTRA_INFO.items():
self.fields[key] = forms.CharField(label=value,
required=False)
if ordering:
ordering.append(key)
ADD_PROJECT_URL = "horizon:identity:projects:create"
class CreateUserForm(PasswordMixin, BaseUserForm, AddExtraColumnMixIn):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("User Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
email = forms.EmailField(
label=_("Email"),
required=False)
project = forms.ThemableDynamicChoiceField(label=_("Primary Project"),
required=PROJECT_REQUIRED,
add_item_link=ADD_PROJECT_URL)
role_id = forms.ThemableChoiceField(label=_("Role"),
required=PROJECT_REQUIRED)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
def __init__(self, *args, **kwargs):
roles = kwargs.pop('roles')
super(CreateUserForm, self).__init__(*args, **kwargs)
# Reorder form fields from multiple inheritance
ordering = ["domain_id", "domain_name", "name",
"description", "email", "password",
"confirm_password", "project", "role_id",
"enabled"]
self.add_extra_fields(ordering)
self.fields = collections.OrderedDict(
(key, self.fields[key]) for key in ordering)
role_choices = [
(role.id, role.name) for role in
sorted(roles, key=lambda r: r.name.lower())
]
self.fields['role_id'].choices = role_choices
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# For keystone V2.0, hide description field
else:
self.fields["description"].widget = forms.HiddenInput()
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data')
def handle(self, request, data):
domain = api.keystone.get_default_domain(self.request, False)
try:
LOG.info('Creating user with name "%s"', data['name'])
desc = data["description"]
if "email" in data:
data['email'] = data['email'] or None
# add extra information
if api.keystone.VERSIONS.active >= 3:
EXTRA_INFO = getattr(settings, 'USER_TABLE_EXTRA_INFO', {})
kwargs = dict((key, data.get(key)) for key in EXTRA_INFO)
else:
kwargs = {}
new_user = \
api.keystone.user_create(request,
name=data['name'],
email=data['email'],
description=desc or None,
password=data['password'],
project=data['project'] or None,
enabled=data['enabled'],
domain=domain.id,
**kwargs)
messages.success(request,
_('User "%s" was successfully created.')
% data['name'])
if data['project'] and data['role_id']:
roles = api.keystone.roles_for_user(request,
new_user.id,
data['project']) or []
assigned = [role for role in roles if role.id == str(
data['role_id'])]
if not assigned:
try:
api.keystone.add_tenant_user_role(request,
data['project'],
new_user.id,
data['role_id'])
except Exception:
exceptions.handle(request,
_('Unable to add user '
'to primary project.'))
return new_user
except exceptions.Conflict:
msg = _('User name "%s" is already used.') % data['name']
messages.error(request, msg)
except Exception:
exceptions.handle(request, _('Unable to create user.'))
class UpdateUserForm(BaseUserForm, AddExtraColumnMixIn):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
id = forms.CharField(label=_("ID"), widget=forms.HiddenInput)
name = forms.CharField(max_length=255, label=_("User Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
email = forms.EmailField(
label=_("Email"),
required=False)
project = forms.ThemableChoiceField(label=_("Primary Project"),
required=PROJECT_REQUIRED)
def __init__(self, request, *args, **kwargs):
super(UpdateUserForm, self).__init__(request, *args, **kwargs)
self.add_extra_fields()
if api.keystone.keystone_can_edit_user() is False:
for field in ('name', 'email'):
self.fields.pop(field)
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# For keystone V2.0, hide description field
else:
self.fields["description"].widget = forms.HiddenInput()
def handle(self, request, data):
user = data.pop('id')
data.pop('domain_id')
data.pop('domain_name')
if not PROJECT_REQUIRED and 'project' not in self.changed_data:
data.pop('project')
if 'description' not in self.changed_data:
data.pop('description')
try:
if "email" in data:
data['email'] = data['email']
response = api.keystone.user_update(request, user, **data)
messages.success(request,
_('User has been updated successfully.'))
except exceptions.Conflict:
msg = _('User name "%s" is already used.') % data['name']
messages.error(request, msg)
return False
except Exception:
response = exceptions.handle(request, ignore=True)
messages.error(request, _('Unable to update the user.'))
if isinstance(response, http.HttpResponse):
return response
else:
return True
class ChangePasswordForm(PasswordMixin, forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput)
name = forms.CharField(
label=_("User Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
def __init__(self, request, *args, **kwargs):
super(ChangePasswordForm, self).__init__(request, *args, **kwargs)
if getattr(settings, 'ENFORCE_PASSWORD_CHECK', False):
self.fields["admin_password"] = forms.CharField(
label=_("Admin Password"),
widget=forms.PasswordInput(render_value=False))
# Reorder form fields from multiple inheritance
self.fields.keyOrder = ["id", "name", "admin_password",
"password", "confirm_password"]
@sensitive_variables('data', 'password', 'admin_password')
def handle(self, request, data):
user_id = data.pop('id')
password = data.pop('password')
admin_password = None
# Throw away the password confirmation, we're done with it.
data.pop('confirm_password', None)
# Verify admin password before changing user password
if getattr(settings, 'ENFORCE_PASSWORD_CHECK', False):
admin_password = data.pop('admin_password')
if not api.keystone.user_verify_admin_password(request,
admin_password):
self.api_error(_('The admin password is incorrect.'))
return False
try:
response = api.keystone.user_update_password(
request, user_id, password, admin=False)
if user_id == request.user.id:
return utils.logout_with_message(
request,
_('Password changed. Please log in to continue.'),
redirect=False)
messages.success(request,
_('User password has been updated successfully.'))
except Exception:
response = exceptions.handle(request, ignore=True)
messages.error(request, _('Unable to update the user password.'))
if isinstance(response, http.HttpResponse):
return response
else:
return True
|
{
"content_hash": "da936bd3d91a3fc5f083dba5549ee868",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 43.28706624605678,
"alnum_prop": 0.5370937181168925,
"repo_name": "noironetworks/horizon",
"id": "84507a645f6b7a25cf438eb98895ad0846718798",
"size": "14486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/identity/users/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "129247"
},
{
"name": "HTML",
"bytes": "581169"
},
{
"name": "JavaScript",
"bytes": "2455930"
},
{
"name": "Python",
"bytes": "5190295"
},
{
"name": "Shell",
"bytes": "7108"
}
],
"symlink_target": ""
}
|
import sys
import traceback
import uuid
from pprint import pprint, pformat
from biokbase.workspace.client import Workspace as workspaceService
#END_HEADER
class test:
'''
Module Name:
test
Module Description:
A KBase module: test
This sample module contains one small method - filter_contigs.
'''
######## WARNING FOR GEVENT USERS #######
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
#########################################
#BEGIN_CLASS_HEADER
# Class variables and functions can be defined in this block
workspaceURL = None
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
#END_CONSTRUCTOR
pass
def filter_contigs(self, ctx, params):
# ctx is the context object
# return variables are: returnVal
#BEGIN filter_contigs
# Print statements to stdout/stderr are captured and available as the method log
print('Starting filter contigs method.')
# Step 1 - Parse/examine the parameters and catch any errors
# It is important to check that parameters exist and are defined, and that nice error
# messages are returned to the user
if 'workspace' not in params:
raise ValueError('Parameter workspace is not set in input arguments')
workspace_name = params['workspace']
if 'contigset_id' not in params:
raise ValueError('Parameter contigset_id is not set in input arguments')
contigset_id = params['contigset_id']
if 'min_length' not in params:
raise ValueError('Parameter min_length is not set in input arguments')
min_length_orig = params['min_length']
min_length = None
try:
min_length = int(min_length_orig)
except ValueError:
raise ValueError('Cannot parse integer from min_length parameter (' + str(min_length_orig) + ')')
if min_length < 0:
raise ValueError('min_length parameter shouldn\'t be negative (' + str(min_length) + ')')
# Step 2- Download the input data
# Most data will be based to your method by its workspace name. Use the workspace to pull that data
# (or in many cases, subsets of that data). The user token is used to authenticate with the KBase
# data stores and other services. DO NOT PRINT OUT OR OTHERWISE SAVE USER TOKENS
token = ctx['token']
wsClient = workspaceService(self.workspaceURL, token=token)
try:
# Note that results from the workspace are returned in a list, and the actual data is saved
# in the 'data' key. So to get the ContigSet data, we get the first element of the list, and
# look at the 'data' field.
contigSet = wsClient.get_objects([{'ref': workspace_name+'/'+contigset_id}])[0]['data']
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
orig_error = ''.join(' ' + line for line in lines)
raise ValueError('Error loading original ContigSet object from workspace:\n' + orig_error)
print('Got ContigSet data.')
# Step 3- Actually perform the filter operation, saving the good contigs to a new list
good_contigs = []
n_total = 0;
n_remaining = 0;
for contig in contigSet['contigs']:
n_total += 1
if len(contig['sequence']) >= min_length:
good_contigs.append(contig)
n_remaining += 1
# replace the contigs in the contigSet object in local memory
contigSet['contigs'] = good_contigs
print('Filtered ContigSet to '+str(n_remaining)+' contigs out of '+str(n_total))
# Step 4- Save the new ContigSet back to the Workspace
# When objects are saved, it is important to always set the Provenance of that object. The basic
# provenance info is given to you as part of the context object. You can add additional information
# to the provenance as necessary. Here we keep a pointer to the input data object.
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
# add additional info to provenance here, in this case the input data object reference
provenance[0]['input_ws_objects']=[workspace_name+'/'+contigset_id]
obj_info_list = None
try:
obj_info_list = wsClient.save_objects({
'workspace':workspace_name,
'objects': [
{
'type':'KBaseGenomes.ContigSet',
'data':contigSet,
'name':contigset_id,
'provenance':provenance
}
]
})
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
orig_error = ''.join(' ' + line for line in lines)
raise ValueError('Error saving filtered ContigSet object to workspace:\n' + orig_error)
info = obj_info_list[0]
# Workspace Object Info is a tuple defined as-
# absolute ref = info[6] + '/' + info[0] + '/' + info[4]
# 0 - obj_id objid - integer valued ID of the object
# 1 - obj_name name - the name of the data object
# 2 - type_string type - the full type of the data object as: [ModuleName].[Type]-v[major_ver].[minor_ver]
# 3 - timestamp save_date
# 4 - int version - the object version number
# 5 - username saved_by
# 6 - ws_id wsid - the unique integer valued ID of the workspace containing this object
# 7 - ws_name workspace - the workspace name
# 8 - string chsum - md5 of the sorted json content
# 9 - int size - size of the json content
# 10 - usermeta meta - dictionary of string keys/values of user set or auto generated metadata
print('saved ContigSet:'+pformat(info))
# Step 5- Create the Report for this method, and return the results
# Create a Report of the method
report = 'New ContigSet saved to: '+str(info[7]) + '/'+str(info[1])+'/'+str(info[4])+'\n'
report += 'Number of initial contigs: '+ str(n_total) + '\n'
report += 'Number of contigs removed: '+ str(n_total - n_remaining) + '\n'
report += 'Number of contigs in final set: '+ str(n_remaining) + '\n'
reportObj = {
'objects_created':[{
'ref':str(info[6]) + '/'+str(info[0])+'/'+str(info[4]),
'description':'Filtered Contigs'
}],
'text_message':report
}
# generate a unique name for the Method report
reportName = 'filter_contigs_report_'+str(hex(uuid.getnode()))
report_info_list = None
try:
report_info_list = wsClient.save_objects({
'id':info[6],
'objects':[
{
'type':'KBaseReport.Report',
'data':reportObj,
'name':reportName,
'meta':{},
'hidden':1, # important! make sure the report is hidden
'provenance':provenance
}
]
})
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
orig_error = ''.join(' ' + line for line in lines)
raise ValueError('Error saving filtered ContigSet object to workspace:\n' + orig_error)
report_info = report_info_list[0]
print('saved Report: '+pformat(report_info))
returnVal = {
'report_name': reportName,
'report_ref': str(report_info[6]) + '/' + str(report_info[0]) + '/' + str(report_info[4]),
'new_contigset_ref': str(info[6]) + '/'+str(info[0])+'/'+str(info[4]),
'n_initial_contigs':n_total,
'n_contigs_removed':n_total-n_remaining,
'n_contigs_remaining':n_remaining
}
print('returning:'+pformat(returnVal))
#END filter_contigs
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method filter_contigs return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
|
{
"content_hash": "d64b7ca390daf9504731db8f92614ac4",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 114,
"avg_line_length": 44.38388625592417,
"alnum_prop": 0.5614522156967432,
"repo_name": "msneddon/test_contig_filter",
"id": "c5ca6c1515560762f7a1fb0ef9400303a76e0a14",
"size": "9440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/test/testImpl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "12761"
},
{
"name": "JavaScript",
"bytes": "3674"
},
{
"name": "Makefile",
"bytes": "2727"
},
{
"name": "Perl",
"bytes": "11486"
},
{
"name": "Python",
"bytes": "47972"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
}
|
"""Gets filesize of a first level of directories and sends it to a CouchDB instance.
Not using os.path.getsize neither os.stat.st_size since they report
inaccurate filesizes:
http://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
"""
# TODO: Manage depth of root (how many dir levels): http://stackoverflow.com/questions/229186/os-walk-without-digging-into-directories-below
# TODO: Filter out by .bcl files and/or include other ones
import os
import argparse
import subprocess
import datetime
import couchdb
import re
from scilifelab.utils import config
def get_dirsizes(path="."):
"""Gets directory size.
TODO: Be replaced with a more pythonic way which reports the size correctly.
"""
path = path.strip().split('\t')
out = subprocess.check_output(["du", "-sb", path[0]], stderr=subprocess.STDOUT)
return out.split('\t')[0]
def parse_dirsizes(path, dirsizes={"errors": []}):
"""Parse directory sizes that have been saved to a file
"""
date_regexp = r'(?:\d{2})?(?:\d{2}\-?){3}[\sT]\d{2}(?:\:\d{2}){1,2}'
try:
with open(path) as fh:
for line in fh:
# Parse a timestamp
m = re.search(date_regexp, line)
if m:
try:
timestamp = datetime.datetime.strptime(m.group(0), "%Y-%m-%d %H:%M")
dirsizes["time"] = timestamp.isoformat()
except:
pass
continue
# Assume directories are listed as [size] [path] on one line each
try:
splits = line.split()
if len(splits) < 2:
continue
size, path = int(splits[0]), splits[1]
dirsizes[path] = size
except ValueError:
continue
except Exception as e:
dirsizes["errors"].append(str(e))
return dirsizes
def send_db(server, db, credentials, data):
""" Submits provided data to database on server
"""
couch = couchdb.Server(server)
couch.resource.credentials = credentials
db = couch[db]
db.save(data)
#with open("runsizes.log", "w") as fh:
# print "Saving data to %s" % fh
# fh.write(str(_to_unicode(data)))
# print "Sending data to couchdb"
def main():
dirsizes = {"time": datetime.datetime.now().isoformat(),
"unit": "bytes",
"errors": []}
parser = argparse.ArgumentParser(description="Compute directory size(s) and report them to a CouchDB database")
parser.add_argument('--dir', dest='root', action='append',
help="the directory to calculate dirsizes from")
parser.add_argument("--server", dest='server', action='store', default="localhost:5984",
help="CouchDB instance to connect to, defaults to localhost:5984")
parser.add_argument("--db", dest='db', action='store', default="tests",
help="CouchDB database name, defaults to 'tests'")
parser.add_argument("--dry-run", dest='dry_run', action='store_true', default=False,
help="Do not submit the resulting hash to CouchDB")
args = parser.parse_args()
#Import DB credentials from pm.conf
c = config.load_config()
try:
user = c.get('db', 'user')
password = c.get('db', 'password')
credentials = (user, password)
except:
raise KeyError('Please specify DB credentials in your pm.conf file')
for r in args.root: # multiple --dir args provided
if os.path.exists(r) and os.path.isdir(r):
for d in os.listdir(r):
path = os.path.join(r, d)
try:
dirsizes[path] = int(get_dirsizes(path))
except subprocess.CalledProcessError as pe:
dirsizes['errors'].append(pe.output)
else:
dirsizes = parse_dirsizes(r, dirsizes)
if args.dry_run:
print(dirsizes)
else:
send_db(args.server, args.db, credentials, dirsizes)
if __name__ == "__main__":
main()
|
{
"content_hash": "2a0a0581719290bbb359e31a29761b74",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 140,
"avg_line_length": 32.90625,
"alnum_prop": 0.5728869895536562,
"repo_name": "jun-wan/scilifelab",
"id": "0ff0b2e402af509f8b59b03f88fc6fa0d2bdea7c",
"size": "4234",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/runsizes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3192"
},
{
"name": "Mako",
"bytes": "13990"
},
{
"name": "Python",
"bytes": "1322133"
},
{
"name": "R",
"bytes": "4392"
},
{
"name": "Shell",
"bytes": "38743"
}
],
"symlink_target": ""
}
|
import requests
url = "https://maps.googleapis.com/maps/api/geocode/json?address=Winnetka&bounds=34.172684%2C-118.604794%7C34.236144%2C-118.500938&key=YOUR_API_KEY"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
# [END maps_http_geocode_winnetka_bounds]
|
{
"content_hash": "f067fb2039d18231dc4d52bba652b137",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 148,
"avg_line_length": 27.25,
"alnum_prop": 0.7584097859327217,
"repo_name": "googlemaps/openapi-specification",
"id": "51d36028ff372edc04ba6c27db022ba483388673",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dist/snippets/maps_http_geocode_winnetka_bounds/maps_http_geocode_winnetka_bounds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Starlark",
"bytes": "11394"
},
{
"name": "TypeScript",
"bytes": "71469"
}
],
"symlink_target": ""
}
|
__version__ = "$Revision$"
import sys, os, imp, re, optparse
from glob import glob
from platform import machine as platform_machine
import sysconfig
from distutils import log
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.spawn import find_executable
cross_compiling = "_PYTHON_HOST_PLATFORM" in os.environ
def get_platform():
# cross build
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
# Get value of sys.platform
if sys.platform.startswith('osf1'):
return 'osf1'
return sys.platform
host_platform = get_platform()
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = ('--with-pydebug' in sysconfig.get_config_var("CONFIG_ARGS"))
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (at the front) if
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory."""
if dir is not None and os.path.isdir(dir) and dir not in dirlist:
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
or path.startswith('/System/')
or path.startswith('/Library/') )
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if host_platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/ and adding Python's include directory to the path.
(srcdir,) = sysconfig.get_config_vars('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Platform-dependent module source and include directories
incdirlist = []
if host_platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
# Mac OS X also includes some mac-specific modules
macmoddir = os.path.join(srcdir, 'Mac/Modules')
moddirlist.append(macmoddir)
incdirlist.append(os.path.join(srcdir, 'Mac/Include'))
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# platform specific include directories
ext.include_dirs.extend(incdirlist)
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
longest = 0
if self.extensions:
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print "%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g)
if missing:
print
print ("Python build finished, but the necessary bits to build "
"these modules were not found:")
print_three_column(missing)
print ("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print
if self.failed:
failed = self.failed[:]
print
print "Failed to build these modules:"
print_three_column(failed)
print
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError), why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if host_platform == 'darwin' and (
sys.maxint > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if host_platform == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
# Don't try to load extensions for cross builds
if cross_compiling:
return
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError, why:
self.failed.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
cc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile))
multiarch_path_component = ''
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
finally:
os.unlink(tmpfile)
if multiarch_path_component != '':
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
return
if not find_executable('dpkg-architecture'):
return
opt = ''
if cross_compiling:
opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
(opt, tmpfile))
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def add_gcc_paths(self):
gcc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'gccpaths')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system('%s -E -v - </dev/null 2>%s 1>/dev/null' % (gcc, tmpfile))
is_gcc = False
in_incdirs = False
inc_dirs = []
lib_dirs = []
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
for line in fp.readlines():
if line.startswith("gcc version"):
is_gcc = True
elif line.startswith("#include <...>"):
in_incdirs = True
elif line.startswith("End of search list"):
in_incdirs = False
elif is_gcc and line.startswith("LIBRARY_PATH"):
for d in line.strip().split("=")[1].split(":"):
d = os.path.normpath(d)
if '/gcc/' not in d:
add_dir_to_list(self.compiler.library_dirs,
d)
elif is_gcc and in_incdirs and '/gcc/' not in line:
add_dir_to_list(self.compiler.include_dirs,
line.strip())
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used
if not cross_compiling:
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
if cross_compiling:
self.add_gcc_paths()
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK'):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
try:
have_unicode = unicode
except NameError:
have_unicode = 0
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
inc_dirs = self.compiler.include_dirs[:]
lib_dirs = self.compiler.library_dirs[:]
if not cross_compiling:
for d in (
'/usr/include',
):
add_dir_to_list(inc_dirs, d)
for d in (
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
):
add_dir_to_list(lib_dirs, d)
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
config_h_vars = sysconfig.parse_config_h(open(config_h))
srcdir = sysconfig.get_config_var('srcdir')
# Check for AtheOS which has libraries in non-standard locations
if host_platform == 'atheos':
lib_dirs += ['/system/libs', '/atheos/autolnk/lib']
lib_dirs += os.getenv('LIBRARY_PATH', '').split(os.pathsep)
inc_dirs += ['/system/include', '/atheos/autolnk/include']
inc_dirs += os.getenv('C_INCLUDE_PATH', '').split(os.pathsep)
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if host_platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
# HP-UX11iv3 keeps files in lib/hpux folders.
if host_platform == 'hp-ux11':
lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
if host_platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses directories
# with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if host_platform in ['darwin', 'beos']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# Some modules that are normally always on:
#exts.append( Extension('_weakref', ['_weakref.c']) )
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# fast string operations implemented in C
exts.append( Extension('strop', ['stropmodule.c']) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=math_libs) )
exts.append( Extension('datetime', ['datetimemodule.c', 'timemodule.c'],
libraries=math_libs) )
# fast iterator tools implemented in C
exts.append( Extension("itertools", ["itertoolsmodule.c"]) )
# code that will be builtins in the future, but conflict with the
# current builtins
exts.append( Extension('future_builtins', ['future_builtins.c']) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# high-performance collections
exts.append( Extension("_collections", ["_collectionsmodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# operator.add() and similar goodies
exts.append( Extension('operator', ['operator.c']) )
# Python 3.1 _io library
exts.append( Extension("_io",
["_io/bufferedio.c", "_io/bytesio.c", "_io/fileio.c",
"_io/iobase.c", "_io/_iomodule.c", "_io/stringio.c", "_io/textio.c"],
depends=["_io/_iomodule.h"], include_dirs=["Modules/_io"]))
# _functools
exts.append( Extension("_functools", ["_functoolsmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# profilers (_lsprof is for cProfile.py)
exts.append( Extension('_hotshot', ['_hotshot.c']) )
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
if have_unicode:
exts.append( Extension('unicodedata', ['unicodedata.c']) )
else:
missing.append('unicodedata')
# access to ISO C locale support
data = open('pyconfig.h').read()
m = re.search(r"#s*define\s+WITH_LIBINTL\s+1\s*", data)
if m is not None:
locale_libs = ['intl']
else:
locale_libs = []
if host_platform == 'darwin':
locale_extra_link_args = ['-framework', 'CoreFoundation']
else:
locale_extra_link_args = []
exts.append( Extension('_locale', ['_localemodule.c'],
libraries=locale_libs,
extra_link_args=locale_extra_link_args) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# cStringIO and cPickle
exts.append( Extension('cStringIO', ['cStringIO.c']) )
exts.append( Extension('cPickle', ['cPickle.c']) )
# Memory-mapped files (also works on Win32).
if host_platform not in ['atheos']:
exts.append( Extension('mmap', ['mmapmodule.c']) )
else:
missing.append('mmap')
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
# George Neville-Neil's timing module:
# Deprecated in PEP 4 http://www.python.org/peps/pep-0004.html
# http://mail.python.org/pipermail/python-dev/2006-January/060023.html
#exts.append( Extension('timing', ['timingmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# Disabled on 64-bit platforms
if sys.maxsize != 9223372036854775807L:
# Operations on images
exts.append( Extension('imageop', ['imageop.c']) )
else:
missing.extend(['imageop'])
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Determine if readline is already linked against curses or tinfo.
if do_readline and find_executable('ldd'):
fp = os.popen("ldd %s" % do_readline)
ldd_output = fp.readlines()
ret = fp.close()
if ret is None or ret >> 8 == 0:
for ln in ldd_output:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
if 'tinfo' in ln: # termcap interface split out from ncurses
readline_termcap_library = 'tinfo'
break
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if host_platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if (dep_target and
(tuple(int(n) for n in dep_target.split('.')[0:2])
< (10, 5) ) ):
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if host_platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c', 'timemodule.c'],
depends=['socketmodule.h'],
libraries=math_libs) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
inc_dirs + search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if host_platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
incfile = open(name, 'r')
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = eval(m.group(1))
except IOError, msg:
print "IOError while reading opensshv.h:", msg
pass
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and
openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print ("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
if COMPILED_WITH_PYDEBUG or not have_usable_openssl:
# The _sha module implements the SHA1 hash algorithm.
exts.append( Extension('_sha', ['shamodule.c']) )
# The _md5 module implements the RSA Data Security, Inc. MD5
# Message-Digest Algorithm, described in RFC 1321. The
# necessary files md5.c and md5.h are included here.
exts.append( Extension('_md5',
sources = ['md5module.c', 'md5.c'],
depends = ['md5.h']) )
min_sha2_openssl_ver = 0x00908000
if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver:
# OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
exts.append( Extension('_sha256', ['sha256module.c']) )
exts.append( Extension('_sha512', ['sha512module.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module anydbm.py provides an
# implementation independent wrapper for these; dumbdbm.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 3)
min_db_ver = (4, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
# Use this function to filter out known bad configurations.
if (4, 6) == db_ver[:2]:
# BerkeleyDB 4.6.x is not stable on many architectures.
arch = platform_machine()
if arch not in ('i386', 'i486', 'i586', 'i686',
'x86_64', 'ia64'):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 5:
for x in range(max_db_ver[1]+1):
if allow_db_ver((5, x)):
yield x
elif major == 4:
for x in range(9):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
if cross_compiling:
db_inc_paths = []
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if host_platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print "db: looking for db.h in", f
if os.path.exists(f):
f = open(f).read()
m = re.search(r"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(r"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(r"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print "db.h:", db_ver, "patch", db_patch,
print "being ignored (4.6.x must be >= 4.6.21)"
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print "db.h: found", db_ver, "in", d
else:
# we already found a header for this library version
if db_setup_debug: print "db.h: ignoring", d
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print "db.h: no version number version in", d
db_found_vers = db_ver_inc_map.keys()
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if host_platform != 'darwin':
db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check)
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print "db lib: ", dblib, "not found"
except db_found:
if db_setup_debug:
print "bsddb using BerkeleyDB lib:", db_ver, dblib
print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir
db_incs = [db_incdir]
dblibs = [dblib]
# We add the runtime_library_dirs argument because the
# BerkeleyDB lib we're linking against often isn't in the
# system dynamic library search path. This is usually
# correct and most trouble free, but may cause problems in
# some unusual system configurations (e.g. the directory
# is on an NFS server that goes away).
exts.append(Extension('_bsddb', ['_bsddb.c'],
depends = ['bsddb.h'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
libraries=dblibs))
else:
if db_setup_debug: print "db: no appropriate library found"
db_incs = None
dblibs = []
dblib_dir = None
missing.append('_bsddb')
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
if cross_compiling:
sqlite_inc_paths = []
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
for d_ in inc_dirs + sqlite_inc_paths:
d = d_
if host_platform == 'darwin' and is_macosx_sdk_path(d):
d = os.path.join(sysroot, d[1:])
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print "sqlite: found %s"%f
incf = open(f).read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print "%s/sqlite3.h: version %s"%(d, sqlite_version)
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print "%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION)
elif sqlite_setup_debug:
print "sqlite: %s had no SQLITE_VERSION"%(f,)
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if host_platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Comment this out if you want the sqlite3 module to be able to load extensions.
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if host_platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
# Look for Berkeley db 1.85. Note that it is built as a different
# module name so it can be included even when later versions are
# available. A very restrictive search is performed to avoid
# accidentally building this module with a later version of the
# underlying db library. May BSD-ish Unixes incorporate db 1.85
# symbols into libc and place the include file in /usr/include.
#
# If the better bsddb library can be built (db_incs is defined)
# we do not build this one. Otherwise this build will pick up
# the more recent berkeleydb's db.h file first in the include path
# when attempting to compile and it will fail.
f = "/usr/include/db.h"
if host_platform == 'darwin':
if is_macosx_sdk_path(f):
sysroot = macosx_sdk_root()
f = os.path.join(sysroot, f[1:])
if os.path.exists(f) and not db_incs:
data = open(f).read()
m = re.search(r"#s*define\s+HASHVERSION\s+2\s*", data)
if m is not None:
# bingo - old version used hash file format version 2
### XXX this should be fixed to not be platform-dependent
### but I don't have direct access to an osf1 platform and
### seemed to be muffing the search somehow
libraries = host_platform == "osf1" and ['db'] or None
if libraries is not None:
exts.append(Extension('bsddb185', ['bsddbmodule.c'],
libraries=libraries))
else:
exts.append(Extension('bsddb185', ['bsddbmodule.c']))
else:
missing.append('bsddb185')
else:
missing.append('bsddb185')
dbm_order = ['gdbm']
# The standard Unix dbm module:
if host_platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others have -lgdbm_compat,
# others don't have either
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
elif self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
ndbm_libs = ['gdbm_compat']
else:
ndbm_libs = []
print "building dbm using ndbm"
dbmext = Extension('dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
print "building dbm using gdbm"
dbmext = Extension(
'dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
print "building dbm using gdbm"
dbmext = Extension(
'dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if db_incs is not None:
print "building dbm using bdb"
dbmext = Extension('dbm', ['dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('gdbm', ['gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('gdbm')
# Unix-only modules
if host_platform not in ['win32']:
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
if host_platform not in ['atheos']:
exts.append( Extension('resource', ['resource.c']) )
else:
missing.append('resource')
# Sun yellow pages. Some systems have the functions in libc.
if (host_platform not in ['cygwin', 'atheos', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
curses_incs = None
if curses_library.startswith('ncurses'):
if curses_library == 'ncursesw':
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
curses_libs = [curses_library]
curses_incs = find_file('curses.h', inc_dirs,
[os.path.join(d, 'ncursesw') for d in inc_dirs])
exts.append( Extension('_curses', ['_cursesmodule.c'],
include_dirs = curses_incs,
libraries = curses_libs) )
elif curses_library == 'curses' and host_platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
include_dirs = curses_incs,
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
if host_platform == 'darwin' and is_macosx_sdk_path(zlib_h):
zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:])
fp = open(zlib_h)
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if host_platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if host_platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('bz2', ['bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('bz2')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
expat_depends = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
expat_depends = ['expat/ascii.h',
'expat/asciitab.h',
'expat/expat.h',
'expat/expat_config.h',
'expat/expat_external.h',
'expat/internal.h',
'expat/latin1tab.h',
'expat/utf8tab.h',
'expat/xmlrole.h',
'expat/xmltok.h',
'expat/xmltok_impl.h'
]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources,
depends = expat_depends,
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
depends = ['pyexpat.c'] + expat_sources +
expat_depends,
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
if have_unicode:
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
else:
missing.append('_multibytecodec')
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
missing.append('_codecs_%s' % loc)
# Dynamic loading module
if sys.maxint == 0x7fffffff:
# This requires sizeof(int) == sizeof(long) == sizeof(char*)
dl_inc = find_file('dlfcn.h', [], inc_dirs)
if (dl_inc is not None) and (host_platform not in ['atheos']):
exts.append( Extension('dl', ['dlmodule.c']) )
else:
missing.append('dl')
else:
missing.append('dl')
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if host_platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif host_platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif host_platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif host_platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif host_platform.startswith('openbsd'):
macros = dict()
libraries = []
elif host_platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if host_platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=macros.items(),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if host_platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
else:
missing.append('linuxaudiodev')
if (host_platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8')
or host_platform.startswith("gnukfreebsd")):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if host_platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
else:
missing.append('sunaudiodev')
if host_platform == 'darwin':
# _scproxy
exts.append(Extension("_scproxy", [os.path.join(srcdir, "Mac/Modules/_scproxy.c")],
extra_link_args= [
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation'
]))
if host_platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
if int(os.uname()[2].split('.')[0]) >= 8:
# We're on Mac OS X 10.4 or later, the compiler should
# support '-Wno-deprecated-declarations'. This will
# surpress deprecation warnings for the Carbon extensions,
# these extensions wrap the Carbon APIs and even those
# parts that are deprecated.
carbon_extra_compile_args = ['-Wno-deprecated-declarations']
else:
carbon_extra_compile_args = []
# Mac OS X specific modules.
def macSrcExists(name1, name2=''):
if not name1:
return None
names = (name1,)
if name2:
names = (name1, name2)
path = os.path.join(srcdir, 'Mac', 'Modules', *names)
return os.path.exists(path)
def addMacExtension(name, kwds, extra_srcs=[]):
dirname = ''
if name[0] == '_':
dirname = name[1:].lower()
cname = name + '.c'
cmodulename = name + 'module.c'
# Check for NNN.c, NNNmodule.c, _nnn/NNN.c, _nnn/NNNmodule.c
if macSrcExists(cname):
srcs = [cname]
elif macSrcExists(cmodulename):
srcs = [cmodulename]
elif macSrcExists(dirname, cname):
# XXX(nnorwitz): If all the names ended with module, we
# wouldn't need this condition. ibcarbon is the only one.
srcs = [os.path.join(dirname, cname)]
elif macSrcExists(dirname, cmodulename):
srcs = [os.path.join(dirname, cmodulename)]
else:
raise RuntimeError("%s not found" % name)
# Here's the whole point: add the extension with sources
exts.append(Extension(name, srcs + extra_srcs, **kwds))
# Core Foundation
core_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'CoreFoundation'],
}
addMacExtension('_CF', core_kwds, ['cf/pycfbridge.c'])
addMacExtension('autoGIL', core_kwds)
# Carbon
carbon_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'Carbon'],
}
CARBON_EXTS = ['ColorPicker', 'gestalt', 'MacOS', 'Nav',
'OSATerminology', 'icglue',
# All these are in subdirs
'_AE', '_AH', '_App', '_CarbonEvt', '_Cm', '_Ctl',
'_Dlg', '_Drag', '_Evt', '_File', '_Folder', '_Fm',
'_Help', '_Icn', '_IBCarbon', '_List',
'_Menu', '_Mlte', '_OSA', '_Res', '_Qd', '_Qdoffs',
'_Scrap', '_Snd', '_TE',
]
for name in CARBON_EXTS:
addMacExtension(name, carbon_kwds)
# Workaround for a bug in the version of gcc shipped with Xcode 3.
# The _Win extension should build just like the other Carbon extensions, but
# this actually results in a hard crash of the linker.
#
if '-arch ppc64' in cflags and '-arch ppc' in cflags:
win_kwds = {'extra_compile_args': carbon_extra_compile_args + ['-arch', 'i386', '-arch', 'ppc'],
'extra_link_args': ['-framework', 'Carbon', '-arch', 'i386', '-arch', 'ppc'],
}
addMacExtension('_Win', win_kwds)
else:
addMacExtension('_Win', carbon_kwds)
# Application Services & QuickTime
app_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework','ApplicationServices'],
}
addMacExtension('_Launch', app_kwds)
addMacExtension('_CG', app_kwds)
exts.append( Extension('_Qt', ['qt/_Qtmodule.c'],
extra_compile_args=carbon_extra_compile_args,
extra_link_args=['-framework', 'QuickTime',
'-framework', 'Carbon']) )
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
return missing
def detect_tkinter_explicitly(self):
# Build _tkinter using explicit locations for Tcl/Tk.
#
# This is enabled when both arguments are given to ./configure:
#
# --with-tcltk-includes="-I/path/to/tclincludes \
# -I/path/to/tkincludes"
# --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \
# -L/path/to/tklibs -ltkm.n"
#
# These values can also be specified or overriden via make:
# make TCLTK_INCLUDES="..." TCLTK_LIBS="..."
#
# This can be useful for building and testing tkinter with multiple
# versions of Tcl/Tk. Note that a build of Tk depends on a particular
# build of Tcl so you need to specify both arguments and use care when
# overriding.
# The _TCLTK variables are created in the Makefile sharedmods target.
tcltk_includes = os.environ.get('_TCLTK_INCLUDES')
tcltk_libs = os.environ.get('_TCLTK_LIBS')
if not (tcltk_includes and tcltk_libs):
# Resume default configuration search.
return 0
extra_compile_args = tcltk_includes.split()
extra_link_args = tcltk_libs.split()
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
)
self.extensions.append(ext)
return 1
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in 'Tcl', 'Tk'
for H in 'Headers', 'Versions/Current/PrivateHeaders'
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
if is_macosx_sdk_path(F):
fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(os.path.join(sysroot, F[1:]),))
else:
fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(F,))
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
fp.close()
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Check whether --with-tcltk-includes and --with-tcltk-libs were
# configured or passed into the make target. If so, use these values
# to build tkinter and bypass the searches for Tcl and TK in standard
# locations.
if self.detect_tkinter_explicitly():
return
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
if (host_platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in host_platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if host_platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if host_platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if host_platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if host_platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if host_platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = [arg for arg in sysconfig.get_config_var("CONFIG_ARGS").split()
if (('--host=' in arg) or ('--build=' in arg))]
if not self.verbose:
config_args.append("-q")
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print "Failed to configure _ctypes module"
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec f in fficonfig
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if host_platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif host_platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif host_platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if host_platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
fp = open(ffi_h)
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
so_ext = sysconfig.get_config_var("SO")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0644, 0755)
self.set_dir_modes(self.install_dir, 0755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.so_ext): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
os.path.walk(dirname, self.set_dir_modes_visitor, mode)
def set_dir_modes_visitor(self, mode, dirname, names):
if os.path.islink(dirname): return
log.info("changing mode of %s to %o", dirname, mode)
if not self.dry_run: os.chmod(dirname, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = filter(None, CLASSIFIERS.split("\n")),
platforms = ["Many"],
# Build info
cmdclass = {'build_ext':PyBuildExt, 'install':PyBuildInstall,
'install_lib':PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# Scripts to install
scripts = ['Tools/scripts/pydoc', 'Tools/scripts/idle',
'Tools/scripts/2to3',
'Lib/smtpd.py']
)
# --install-platlib
if __name__ == '__main__':
main()
|
{
"content_hash": "1b31782638d5912a8d9afb7f0bf7f807",
"timestamp": "",
"source": "github",
"line_count": 2237,
"max_line_length": 112,
"avg_line_length": 43.625838176128745,
"alnum_prop": 0.513233802297343,
"repo_name": "nzavagli/UnrealPy",
"id": "6a6ad239f12780125c1f76795ebb3c5cc462aaca",
"size": "97661",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886156"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925097"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
"""
Data Ingestion Example [REST API]
---------------------------------
An example illustrating the data ingestion in FreeDiscovery
"""
from __future__ import print_function
import requests
import pandas as pd
import json
import os.path
pd.options.display.float_format = '{:,.3f}'.format
pd.options.display.expand_frame_repr = False
dataset_name = "treclegal09_2k_subset" # see list of available datasets
BASE_URL = "http://localhost:5001/api/v0" # FreeDiscovery server URL
if __name__ == '__main__':
print(" 0. Load the test dataset")
url = BASE_URL + '/example-dataset/{}'.format(dataset_name)
print(" GET", url)
input_ds = requests.get(url).json()
# To use a custom dataset, simply specify the following variables
# create a custom dataset definition for ingestion
data_dir = input_ds['metadata']['data_dir']
dataset_definition = [{'document_id': row['document_id'],
'file_path': os.path.join(data_dir, row['file_path'])} \
for row in input_ds['dataset']]
# 1. Ingest a dataset specified by folder path
print("\n1.a Load dataset and initalize feature extraction")
url = BASE_URL + '/feature-extraction'
print(" POST", url)
res = requests.post(url, json={'dataset_definition': dataset_definition,
'use_hashing': True}).json()
dsid = res['id']
print(" => received {}".format(list(res.keys())))
print(" => dsid = {}".format(dsid))
print("\n1.b Start feature extraction")
url = BASE_URL+'/feature-extraction/{}'.format(dsid)
print(" POST", url)
res = requests.post(url,)
# 2. Ingest a dataset specified by a path to each file in the dataset
print("\n2.a Load dataset and initalize feature extraction")
url = BASE_URL + '/feature-extraction'
print(" POST", url)
res = requests.post(url, json={'dataset_definition': dataset_definition,
'use_hashing': True}).json()
dsid = res['id']
print(" => received {}".format(list(res.keys())))
print(" => dsid = {}".format(dsid))
print("\n2.b Start feature extraction")
url = BASE_URL+'/feature-extraction/{}'.format(dsid)
print(" POST", url)
res = requests.post(url,)
print("\n2.d. check the parameters of the extracted features")
url = BASE_URL + '/feature-extraction/{}'.format(dsid)
print(' GET', url)
res = requests.get(url).json()
print('\n'.join([' - {}: {}'.format(key, val) for key, val in res.items() \
if "filenames" not in key]))
print("\n3. Examine the id mapping\n")
method = BASE_URL + "/feature-extraction/{}/id-mapping/flat".format(dsid)
print(' GET', method)
data = {'internal_id': [row['internal_id'] for row in input_ds['dataset'][:3]]}
print(' DATA:', json.dumps(data))
res = requests.post(method, data=data).json()
print(' Response:')
print(' ', json.dumps(res))
method = BASE_URL + "/feature-extraction/{}/id-mapping/nested".format(dsid)
print('\n GET', method)
data = {'data': [{'internal_id': row['internal_id']} for row in input_ds['dataset'][:3]]}
print(' DATA', json.dumps(data))
res = requests.post(method, json=data).json()
print(' Response:')
print(' ', json.dumps(res, indent=4))
# 4. Cleaning
print("\n5.a Delete the extracted features")
url = BASE_URL + '/feature-extraction/{}'.format(dsid)
print(" DELETE", url)
requests.delete(url)
|
{
"content_hash": "506f92602e4babdc6392730b0b96ba02",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 93,
"avg_line_length": 33.373831775700936,
"alnum_prop": 0.5950714085690283,
"repo_name": "kcompher/FreeDiscovUI",
"id": "0dc7bf0f9c66ab6bd0dfbdc68df0eb61d613050d",
"size": "3571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/REST_data_ingestion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "404"
},
{
"name": "Makefile",
"bytes": "598"
},
{
"name": "Nginx",
"bytes": "451"
},
{
"name": "Python",
"bytes": "333007"
},
{
"name": "Shell",
"bytes": "3721"
}
],
"symlink_target": ""
}
|
from .. import conf, drivers, ircmsgs, log
from twisted.names import client
from twisted.internet import reactor, error
from twisted.protocols.basic import LineReceiver
from twisted.internet.protocol import ReconnectingClientFactory
# This hack prevents the standard Twisted resolver from starting any
# threads, which allows for a clean shut-down in Twisted>=2.0
reactor.installResolver(client.createResolver())
try:
from OpenSSL import SSL
from twisted.internet import ssl
except ImportError:
drivers.log.debug('PyOpenSSL is not available, '
'cannot connect to SSL servers.')
SSL = None
class TwistedRunnerDriver(drivers.IrcDriver):
def name(self):
return self.__class__.__name__
def run(self):
try:
reactor.iterate(conf.supybot.drivers.poll())
except:
drivers.log.exception('Uncaught exception outside reactor:')
class SupyIrcProtocol(LineReceiver):
delimiter = '\n'
MAX_LENGTH = 1024
def __init__(self):
self.mostRecentCall = reactor.callLater(0.1, self.checkIrcForMsgs)
def lineReceived(self, line):
msg = drivers.parseMsg(line)
if msg is not None:
self.irc.feedMsg(msg)
def checkIrcForMsgs(self):
if self.connected:
msg = self.irc.takeMsg()
while msg:
self.transport.write(str(msg))
msg = self.irc.takeMsg()
self.mostRecentCall = reactor.callLater(0.1, self.checkIrcForMsgs)
def connectionLost(self, r):
self.mostRecentCall.cancel()
if r.check(error.ConnectionDone):
drivers.log.disconnect(self.factory.currentServer)
else:
drivers.log.disconnect(self.factory.currentServer, errorMsg(r))
if self.irc.zombie:
self.factory.stopTrying()
while self.irc.takeMsg():
continue
else:
self.irc.reset()
def connectionMade(self):
self.factory.resetDelay()
self.irc.driver = self
def die(self):
drivers.log.die(self.irc)
self.factory.stopTrying()
self.transport.loseConnection()
def reconnect(self, wait=None):
# We ignore wait here, because we handled our own waiting.
drivers.log.reconnect(self.irc.network)
self.transport.loseConnection()
def errorMsg(reason):
return reason.getErrorMessage()
class SupyReconnectingFactory(ReconnectingClientFactory, drivers.ServersMixin):
maxDelay = property(lambda self: conf.supybot.drivers.maxReconnectWait())
protocol = SupyIrcProtocol
def __init__(self, irc):
self.irc = irc
drivers.ServersMixin.__init__(self, irc)
(server, port) = self._getNextServer()
vhost = conf.supybot.protocols.irc.vhost()
if self.networkGroup.get('ssl').value:
self.connectSSL(server, port, vhost)
else:
self.connectTCP(server, port, vhost)
def connectTCP(self, server, port, vhost):
"""Connect to the server with a standard TCP connection."""
reactor.connectTCP(server, port, self, bindAddress=(vhost, 0))
def connectSSL(self, server, port, vhost):
"""Connect to the server using an SSL socket."""
drivers.log.info('Attempting an SSL connection.')
if SSL:
reactor.connectSSL(server, port, self,
ssl.ClientContextFactory(), bindAddress=(vhost, 0))
else:
drivers.log.error('PyOpenSSL is not available. Not connecting.')
def clientConnectionFailed(self, connector, r):
drivers.log.connectError(self.currentServer, errorMsg(r))
(connector.host, connector.port) = self._getNextServer()
ReconnectingClientFactory.clientConnectionFailed(self, connector,r)
def clientConnectionLost(self, connector, r):
(connector.host, connector.port) = self._getNextServer()
ReconnectingClientFactory.clientConnectionLost(self, connector, r)
def startedConnecting(self, connector):
drivers.log.connect(self.currentServer)
def buildProtocol(self, addr):
protocol = ReconnectingClientFactory.buildProtocol(self, addr)
protocol.irc = self.irc
return protocol
Driver = SupyReconnectingFactory
poller = TwistedRunnerDriver()
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
{
"content_hash": "79a279fb1d6c1290d882b5ca99705201",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 34.51968503937008,
"alnum_prop": 0.6621806569343066,
"repo_name": "frumiousbandersnatch/supybot-code",
"id": "3acabc07a68aded19aa773f0e43601680975d7af",
"size": "6007",
"binary": false,
"copies": "6",
"ref": "refs/heads/sobrieti",
"path": "src/drivers/Twisted.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1960809"
}
],
"symlink_target": ""
}
|
"""Convert-related functions"""
|
{
"content_hash": "6e64af3b5b101dd5ba882a7586cbe651",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.71875,
"repo_name": "GoogleCloudPlatform/oozie-to-airflow",
"id": "bcbf00db67c4e11a172fec273cde07cb14e4673d",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "o2a/converter/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "528273"
},
{
"name": "Shell",
"bytes": "57460"
},
{
"name": "Smarty",
"bytes": "31948"
}
],
"symlink_target": ""
}
|
"""Dataset generation for simulation.
"""
from __future__ import division, absolute_import
import abc
from copy import deepcopy
import numpy as np
from scipy.stats import pearsonr
from bcn.bias import BiasLowRank, BiasUnconstrained
from bcn.redundant_signal import RedundantSignal
from bcn.missing import Missing
def estimate_partial_signal_characterists(mixed, correlation_threshold, true_pairs=None, true_directions=None, true_stds=None, true_correlations=None):
"""Estimate correlations, pairs, directions and strandard deviations from a corrupted signal.
Parameters
----------
mixed : numpy.ndarray, shape=(n_samples, n_features)
The bias corrupted low-rank matrix from which the bias is to be recovered.
correlation_threshold : float
The threshold to use when estimating pairs from a correlation matrix (the higher the fewer pairs).
true_pairs : dict, values=('space' : numpy.ndarray, elements=int, shape=(n, 2))
Sequence of true pairs given as tuples for both spaces in a dict.
true_directions : dict, values=('space' : numpy.ndarray, elements=int, len=n)
Sequence of true directions, e.g. -1, +1 for both spaces in a dict.
true_stds : dict, values=('space' : numpy.ndarray, elements=int, shape=(n, 2))
Sequence of true standard deviations of each pair for both spaces in a dict.
true_correlations : dict, values=('space' : numpy.ndarray, shape=(n_samples, n_samples) or shape=(n_features, n_features))
True correlation matrices for both spaces in a dict.
Returns
-------
estimates : dict
Dictionary of estimated signal characteristics.
"""
estimates = {'feature': {'mixed': mixed.T, 'shape': mixed.T.shape},
'sample': {'mixed': mixed, 'shape': mixed.shape}}
for space in ['feature', 'sample']:
if true_correlations is not None:
estimates[space]['estimated_correlations'] = true_correlations[space]
else:
estimates[space]['estimated_correlations'] = estimate_correlations(
estimates[space]['mixed'])
if true_pairs is not None:
estimates[space]['estimated_pairs'] = true_pairs[space]
else:
estimates[space]['estimated_pairs'] = estimate_pairs(
estimates[space]['estimated_correlations'], correlation_threshold)
if true_stds is not None:
estimates[space]['estimated_stds'] = true_stds[space]
else:
estimates[space]['estimated_stds'] = estimate_stds(
estimates[space]['mixed'], estimates[space]['estimated_pairs'])
if true_directions is not None:
estimates[space]['estimated_directions'] = true_directions[space]
else:
estimates[space]['estimated_directions'] = estimate_directions(
estimates[space]['estimated_correlations'], estimates[space]['estimated_pairs'])
return estimates
def transpose_view(X, space):
"""Transpose of input matrix if required.
Parameters
----------
X : numpy.ndarray, shape=(n_samples, n_features)
A matrix that may need to be transposed (view only).
space : str, values=('sample', 'feature')
The space the matrix should be for (determines if transpossed or not).
Returns
-------
X_transpose : numpy.ndarray, shape=(n_features, n_samples) or shape=(n_samples, n_features)
Possibly transposed inpute matrix X.
"""
if space == 'feature':
X_transpose = X.T
if space == 'sample':
X_transpose = X
return X_transpose
def opposite(space):
"""Convert to opposite dimension.
Parameters
----------
space : str, values=('feature', 'sample')
Dimension.
Returns
-------
return : str, values=('feature', 'sample')
Dimension.
"""
if space == 'feature':
return 'sample'
if space == 'sample':
return 'feature'
def estimate_pairs(correlations, threshold=0.8):
"""Estimate pairs from a correlation matrix.
Parameters
----------
correlations : numpy.ndarray, shape=(n_samples, n_samples)
A correlation matrix. Can contain numpy.nan values.
threshold : float
The threshold below which correlations are not considered as pairs.
Returns
-------
pairs : numpy.ndarray, shape=(<= n_samples, 2)
A sequence of pairs which contain the indices of samples that are strongly correlated (as determined by the threshold).
"""
correlations = np.nan_to_num(correlations)
correlations[np.absolute(correlations) < threshold] = 0
pairs = np.vstack(np.nonzero(np.tril(correlations, -1))).T
indices = np.arange(len(pairs))
np.random.shuffle(indices)
pairs = np.asarray(pairs)
pairs = pairs[indices]
return pairs
def estimate_correlations(mixed):
"""Estimate correlations from a `mixed` matrix.
Parameters
----------
mixed : numpy.ndarray, shape=(n_samples, n_features)
A matrix that requires bias removal. Can contain numpy.nan values.
Returns
-------
correlations : numpy.ndarray, shape=(n_samples, n_samples)
"""
correlations = np.zeros((mixed.shape[0], mixed.shape[0])) * np.nan
for i, a in enumerate(mixed):
bool_indices_a = np.isfinite(a)
for j, b in enumerate(mixed):
if i == j:
correlations[i, j] = 1
else:
bool_indices_b = np.isfinite(b)
bool_indices = np.logical_and(bool_indices_a, bool_indices_b)
if np.sum(bool_indices) < 3:
continue
else:
r = pearsonr(a[bool_indices], b[bool_indices])[0]
correlations[i, j] = r
return correlations
def estimate_directions(correlations, pairs):
"""Estimate directions from a correlation matrix for specific pairs.
Parameters
----------
correlations : numpy.ndarray, shape=(n_samples, n_samples)
A correlation matrix. Can contain nan values.
pairs : numpy.ndarray, shape=(< n_samples, 2)
A sequence of pairs which contain the indices of samples that are strongly correlated.
Returns
-------
directions : numpy.ndarray, shape=(< n_samples)
A sequence of -1 or +1 which indicates the direction of the correlation (e.g. anti or normal).
"""
directions = np.sign(correlations[pairs[:, 0], pairs[:, 1]])
return directions
def estimate_stds(mixed, pairs):
"""Estimate standard deviations from a mixed` matrix for specific pairs.
Parameters
----------
mixed : numpy.ndarray, shape=(n_samples, n_features)
A matrix that requires bias removal. Can contain numpy.nan values.
pairs : numpy.ndarray, shape=(< n_samples, 2)
A sequence of pairs which contain the indices of samples that are strongly correlated.
Returns
-------
stds : numpy.ndarray, shape=(< n_samples)
A sequence of estimated standard deviations.
"""
stds = []
for pair in pairs:
bool_indices_a = np.isfinite(mixed[pair[0]])
std_a = np.std(mixed[pair[0]][bool_indices_a])
# NOTE No need to check because there would be no pair if there were not 3 overlapping finite values for the pair (see estimate correlations).
if np.sum(bool_indices_a) < 3:
std_a = np.nan
bool_indices_b = np.isfinite(mixed[pair[1]])
std_b = np.std(mixed[pair[1]][bool_indices_b])
if np.sum(bool_indices_b) < 3:
std_b = np.nan
stds.append([std_a, std_b])
stds = np.vstack(stds)
return stds
def random_permutation(shape):
"""
Random permutation of a matrix in both feature and sample space.
Parameters
----------
shape = tuple of int
Shape of the matrix to be permuted.
Returns
-------
d : dict, elements=dict
Mapping from old indices to new indices.
inverse : dict, elements=dict
Mapping from new indices to old indices.
"""
a = np.arange(shape[0], dtype=int)
b = np.arange(shape[1], dtype=int)
new_a = np.random.permutation(shape[0])
new_b = np.random.permutation(shape[1])
d = {'feature': dict(zip(b, new_b)), 'sample': dict(zip(a, new_a))}
inverse = {'feature': dict(zip(new_b, b)), 'sample': dict(zip(new_a, a))}
return d, inverse
def shuffle_matrix(matrix, d_sample, d_feature=None):
"""
Shuffle a matrix in feature and sample space.
Parameters
----------
matrix : numpy.ndarray, shape=(n_samples, n_features) or shape=(n_features, n_samples)
Matrix to be shuffled.
d_sample : dict
How to shuffle.
d_feature : dict
How to shuffle.
Returns
-------
new_matrix : numpy.ndarray, shape=(n_samples, n_features) or shape=(n_features, n_samples)
Shuffled matrix.
"""
if d_feature is None:
d_feature = d_sample
x_indices = np.asarray([d_sample[i] for i in xrange(matrix.shape[0])])
y_indices = np.asarray([d_feature[i] for i in xrange(matrix.shape[1])])
new_matrix = matrix[x_indices]
new_matrix = new_matrix[:, y_indices]
return new_matrix
def shuffle_pairs(pairs, d):
"""
Shuffle pairs with a given mapping.
Parameters
----------
pairs : numpy.ndarray, shape=(n ,2)
Old pairs.
d : dict
Mapping for the shuffle.
"""
new_pairs = np.zeros_like(pairs, dtype=int)
for i in xrange(pairs.shape[0]):
for j in xrange(pairs.shape[1]):
new_pairs[i, j] = d[pairs[i, j]]
return new_pairs
class DataSimulated(object):
def __init__(self, shape, rank, bias_model='gaussian', m_blocks_size=2, noise_amplitude=1.0, correlation_strength=1.0, missing_type='MAR', missing_fraction=0.1, image_source='../../tests/trump.png'):
"""Creates (simulates) and stores all the data of a bias recovery experiment.
Parameters
----------
shape : tuple of int
Shape of the mixed, signal, bias and missing matrix in the form of (n_samples, n_features).
rank : int
Rank of the low-rank decomposition.
bias_model : str
Bias model to be used.
m_blocks_size : int, default = 2
Size of each block (e.g. number of pairs). Factor to determine the number of blocks in the correlation matix of features or samples that are varying together (with differences only in degree, direction and scale). Fewer blocks are better for bias recovery.
noise_amplitude : float, default = None
Scale/amptitude of the bias (noise).
correlation_strength : float
Strength of all correlations in block matrix.
missing_type : {'MAR', 'NMAR', 'no-missing'}
The type if missing values, from none to censored.
missing_fraction : float
Percentage of missing values in missing matrix.
image_source : str
Path to the image used as bias.
"""
self.shape = shape
self.rank = rank
self.bias_model = bias_model
self.m_blocks_size = m_blocks_size
self.noise_amplitude = noise_amplitude
self.correlation_strength = correlation_strength
self.missing_type = missing_type
self.missing_fraction = missing_fraction
self.image_source = image_source
self.d = {'sample': {}, 'feature': {}}
# NOTE using the sample space to determine the m_blocks here.
m_blocks = self.shape[0] // self.m_blocks_size
# BiasUnconstrained(self.shape, bias_model='gaussian', noise_amplitude=1.0).generate()
bias_unshuffled = BiasLowRank(self.shape, self.rank, bias_model=self.bias_model,
noise_amplitude=self.noise_amplitude, image_source=self.image_source).generate()
self.map_forward_bias, self.map_backward_bias = random_permutation(
bias_unshuffled['X'].shape)
bias = shuffle_matrix(
bias_unshuffled['X'], self.map_forward_bias['sample'], self.map_forward_bias['feature'])
missing = Missing(self.shape, self.missing_type,
p_random=self.missing_fraction).generate()['X']
signal_unshuffled = RedundantSignal(
self.shape, 'random', m_blocks, self.correlation_strength).generate()
self.map_forward, self.map_backward = random_permutation(
signal_unshuffled['X'].shape)
signal = shuffle_matrix(
signal_unshuffled['X'], self.map_forward['sample'], self.map_forward['feature'])
mixed = signal + bias + missing
for space in ['sample', 'feature']:
self.d[space]['mixed'] = transpose_view(mixed, space)
self.d[space]['shape'] = self.d[space]['mixed'].shape
self.d[space]['signal_unshuffled'] = transpose_view(
signal_unshuffled['X'], space)
self.d[space]['signal'] = transpose_view(signal, space)
self.d[space]['true_missing'] = transpose_view(missing, space)
self.d[space]['true_bias_unshuffled'] = transpose_view(
bias_unshuffled['X'], space)
self.d[space]['true_bias'] = transpose_view(bias, space)
self.d[space]['true_correlations_unshuffled'] = signal_unshuffled[space]['correlation_matrix']
self.d[space]['true_correlations'] = shuffle_matrix(
signal_unshuffled[space]['correlation_matrix'], self.map_forward[space])
self.d[space]['true_pairs_unshuffled'] = signal_unshuffled[space]['pairs']
self.d[space]['true_pairs'] = shuffle_pairs(
signal_unshuffled[space]['pairs'], self.map_backward[space])
self.d[space]['true_stds'] = signal_unshuffled[space]['stds'][signal_unshuffled[space]['pairs']]
self.d[space]['true_directions'] = signal_unshuffled[space]['directions']
|
{
"content_hash": "b7f8eb8e3b978d88104664fc89487f27",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 268,
"avg_line_length": 38.5939226519337,
"alnum_prop": 0.6215732588934221,
"repo_name": "a378ec99/bcn",
"id": "775adb1f3b9459e05511f681ec9ed289e7c1281c",
"size": "13971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcn/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "866054"
},
{
"name": "Python",
"bytes": "119177"
}
],
"symlink_target": ""
}
|
"""
********************************************************************************
* Name: workspace.py
* Author: Nathan Swain & Scott Christensen
* Created On: August 5, 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
"""
import os
import sys
import shutil
import logging
from django.utils.functional import wraps
from django.http import HttpRequest
from django.utils.functional import SimpleLazyObject
from tethys_quotas.utilities import passes_quota, _get_storage_units
log = logging.getLogger("tethys." + __name__)
class TethysWorkspace:
"""
Defines objects that represent file workspaces (directories) for apps and users.
Attributes:
path(str): The absolute path to the workspace directory. Cannot be overwritten.
"""
def __init__(self, path):
"""
Constructor
"""
# Create the path if it doesn't already exist
if not os.path.exists(path):
os.makedirs(path)
# Validate that the path is a directory
self._path = path
def __repr__(self):
"""
Rendering
"""
return '<TethysWorkspace path="{0}">'.format(self._path)
@property
def path(self):
return self._path
@path.setter
def path(self, value):
"""
Don't allow overwriting the path property.
"""
pass
def files(self, full_path=False):
"""
Return a list of files that are in the workspace.
Args:
full_path(bool): Returns list of files with full path names when True. Defaults to False.
Returns:
list: A list of files in the workspace.
**Examples:**
::
# List file names
workspace.files()
# List full path file names
workspace.files(full_path=True)
"""
if full_path:
files = [
os.path.join(self._path, f)
for f in os.listdir(self._path)
if os.path.isfile(os.path.join(self._path, f))
]
else:
files = [
f
for f in os.listdir(self._path)
if os.path.isfile(os.path.join(self._path, f))
]
return files
def directories(self, full_path=False):
"""
Return a list of directories that are in the workspace.
Args:
full_path(bool): Returns list of directories with full path names when True. Defaults to False.
Returns:
list: A list of directories in the workspace.
**Examples:**
::
# List directory names
workspace.directories()
# List full path directory names
workspace.directories(full_path=True)
"""
if full_path:
directories = [
os.path.join(self._path, d)
for d in os.listdir(self._path)
if os.path.isdir(os.path.join(self._path, d))
]
else:
directories = [
d
for d in os.listdir(self._path)
if os.path.isdir(os.path.join(self._path, d))
]
return directories
def clear(self, exclude=None, exclude_files=False, exclude_directories=False):
"""
Remove all files and directories in the workspace.
Args:
exclude(iterable): A list or tuple of file and directory names to exclude from clearing operation.
exclude_files(bool): Excludes all files from clearing operation when True. Defaults to False.
exclude_directories(bool): Excludes all directories from clearing operation when True. Defaults to False.
**Examples:**
::
# Clear everything
workspace.clear()
# Clear directories only
workspace.clear(exclude_files=True)
# Clear files only
workspace.clear(exclude_directories=True)
# Clear all but specified files and directories
workspace.clear(exclude=['file1.txt', '/full/path/to/directory1', 'directory2', '/full/path/to/file2.txt'])
"""
if exclude is None:
exclude = list()
files = [
f
for f in os.listdir(self._path)
if os.path.isfile(os.path.join(self._path, f))
]
directories = [
d
for d in os.listdir(self._path)
if os.path.isdir(os.path.join(self._path, d))
]
if not exclude_files:
for file in files:
fullpath = os.path.join(self._path, file)
if file not in exclude and fullpath not in exclude:
os.remove(fullpath)
if not exclude_directories:
for directory in directories:
fullpath = os.path.join(self._path, directory)
if directory not in exclude and fullpath not in exclude:
shutil.rmtree(fullpath)
def remove(self, item):
"""
Remove a file or directory from the workspace.
Args:
item(str): Name of the item to remove from the workspace.
**Examples:**
::
workspace.remove('file.txt')
workspace.remove('/full/path/to/file.txt')
workspace.remove('relative/path/to/file.txt')
workspace.remove('directory')
workspace.remove('/full/path/to/directory')
workspace.remove('relative/path/to/directory')
**Note:** Though you can specify relative paths, the ``remove()`` method will not allow you to back into other directories using "../" or similar notation. Futhermore, absolute paths given must contain the path of the workspace to be valid.
""" # noqa: E501
# Sanitize to prevent backing into other directories or entering the home directory
full_path = (
item.replace("../", "")
.replace("./", "")
.replace("..\\", "")
.replace(".\\", "")
.replace("~/", "")
.replace("~\\", "")
)
if self._path not in full_path:
full_path = os.path.join(self._path, full_path)
if os.path.isdir(full_path):
shutil.rmtree(full_path)
elif os.path.isfile(full_path):
os.remove(full_path)
def get_size(self, units="b"):
total_size = 0
for file in self.files(True):
total_size += os.path.getsize(file)
if units.lower() == "b":
conversion_factor = 1
else:
storage_units = _get_storage_units()
conversion_factor = [
item[0] for item in storage_units if units.upper() in item[1]
][0]
return total_size / conversion_factor
def _get_user_workspace(app_class, user_or_request):
"""
Get the file workspace (directory) for the given User.
Args:
app_class(TethysApp): tethys app
user_or_request(User or HttpRequest): User or request object.
Returns:
tethys_apps.base.TethysWorkspace: An object representing the workspace.
"""
username = ""
from django.contrib.auth.models import User
if isinstance(user_or_request, User) or isinstance(
user_or_request, SimpleLazyObject
):
username = user_or_request.username
elif isinstance(user_or_request, HttpRequest):
username = user_or_request.user.username
elif user_or_request is None:
username = "anonymous_user"
else:
raise ValueError(
"Invalid type for argument 'user': must be either an User or HttpRequest object."
)
project_directory = os.path.dirname(sys.modules[app_class.__module__].__file__)
workspace_directory = os.path.join(
project_directory, "workspaces", "user_workspaces", username
)
return TethysWorkspace(workspace_directory)
def get_user_workspace(app_class_or_request, user_or_request) -> TethysWorkspace:
"""
Get the dedicated user workspace for the given app. If an HttpRequest is given, the workspace of the logged-in user will be returned (i.e. request.user).
Args:
app_class_or_request (TethysAppBase or HttpRequest): The Tethys app class that is defined in app.py or HttpRequest to app endpoint.
user_or_request (User or HttpRequest): Either an HttpRequest with active user session or Django User object.
Raises:
ValueError: if app_class_or_request or user_or_request are not correct types.
AssertionError: if quota for the user workspace has been exceeded.
Returns:
TethysWorkspace: workspace object bound to the user's workspace directory.
::
import os
from tethys_sdk.workspaces import get_user_workspace
from .app import MyFirstApp as app
def some_function(user):
user_workspace = get_user_workspace(app, user)
...
""" # noqa: E501
from tethys_apps.base.app_base import TethysAppBase
from tethys_apps.utilities import get_active_app
from django.contrib.auth.models import User
# Get app
if isinstance(app_class_or_request, TethysAppBase) or (
isinstance(app_class_or_request, type)
and issubclass(app_class_or_request, TethysAppBase)
):
app = app_class_or_request
elif isinstance(app_class_or_request, HttpRequest):
app = get_active_app(app_class_or_request, get_class=True)
else:
raise ValueError(
f'Argument "app_class_or_request" must be of type TethysAppBase or HttpRequest: '
f'"{type(app_class_or_request)}" given.'
)
# Get user
if isinstance(user_or_request, User) or isinstance(
user_or_request, SimpleLazyObject
):
user = user_or_request
elif isinstance(user_or_request, HttpRequest):
user = user_or_request.user
else:
raise ValueError(
f'Argument "user_or_request" must be of type HttpRequest or User: '
f'"{type(user_or_request)}" given.'
)
assert passes_quota(user, "user_workspace_quota")
return _get_user_workspace(app, user)
def user_workspace(controller):
"""
**Decorator:** Get the file workspace (directory) for the given User. Add an argument named "user_workspace" to your controller. The TethysWorkspace will be passed to via this argument.
Returns:
TethysWorkspace: An object representing the workspace.
**Example:**
::
import os
from my_first_app.app import MyFirstApp as app
from tethys_sdk.workspaces import user_workspace
@user_workspace
def a_controller(request, user_workspace):
\"""
Example controller that uses @user_workspace() decorator.
\"""
new_file_path = os.path.join(user_workspace.path, 'new_file.txt')
with open(new_file_path, 'w') as a_file:
a_file.write('...')
context = {}
return render(request, 'my_first_app/template.html', context)
""" # noqa:E501
@wraps(controller)
def wrapper(*args, **kwargs):
request = None
for _, arg in enumerate(args):
if isinstance(arg, HttpRequest):
request = arg
break
if request is None:
raise ValueError(
"No request given. The user_workspace decorator only works on controllers."
)
the_workspace = get_user_workspace(request, request.user)
return controller(*args, user_workspace=the_workspace, **kwargs)
return wrapper
def _get_app_workspace(app_class):
"""
Get the file workspace (directory) for the app.
Args:
app_class(TethysAppBase): The Tethys app class that is defined in app.py.
Returns:
tethys_apps.base.TethysWorkspace: An object representing the workspace.
"""
project_directory = os.path.dirname(sys.modules[app_class.__module__].__file__)
workspace_directory = os.path.join(project_directory, "workspaces", "app_workspace")
return TethysWorkspace(workspace_directory)
def get_app_workspace(app_or_request) -> TethysWorkspace:
"""
Get the app workspace for the active app of the given HttpRequest or the given Tethys App class.
Args:
app_or_request (TethysAppBase | HttpRequest): The Tethys App class or an HttpRequest to an app endpoint.
Raises:
ValueError: if object of type other than HttpRequest or TethysAppBase given.
AssertionError: if quota for the app workspace has been exceeded.
Returns:
TethysWorkspace: workspace object bound to the app workspace.
**Example:**
::
import os
from tethys_sdk.workspaces import get_app_workspace
from .app import MyFirstApp as app
def some_function():
app_workspace = get_app_workspace(app)
...
"""
from tethys_apps.base.app_base import TethysAppBase
from tethys_apps.utilities import get_active_app
# Get the active app
if isinstance(app_or_request, HttpRequest):
app = get_active_app(app_or_request, get_class=True)
elif isinstance(app_or_request, TethysAppBase) or (
isinstance(app_or_request, type) and issubclass(app_or_request, TethysAppBase)
):
app = app_or_request
else:
raise ValueError(
f'Argument "app_or_request" must be of type HttpRequest or TethysAppBase: '
f'"{type(app_or_request)}" given.'
)
assert passes_quota(app, "app_workspace_quota")
return _get_app_workspace(app)
def app_workspace(controller):
"""
**Decorator:** Get the file workspace (directory) for the app. Add an argument named "app_workspace" to your controller. The TethysWorkspace will be passed to via this argument.
Returns:
TethysWorkspace: An object representing the workspace.
**Example:**
::
import os
from my_first_app.app import MyFirstApp as app
from tethys_sdk.workspaces import app_workspace
@app_workspace
def a_controller(request, app_workspace):
\"""
Example controller that uses @app_workspace() decorator.
\"""
new_file_path = os.path.join(app_workspace.path, 'new_file.txt')
with open(new_file_path, 'w') as a_file:
a_file.write('...')
context = {}
return render(request, 'my_first_app/template.html', context)
""" # noqa:E501
@wraps(controller)
def wrapper(*args, **kwargs):
request = None
for _, arg in enumerate(args):
if isinstance(arg, HttpRequest):
request = arg
break
if request is None:
raise ValueError(
"No request given. The app_workspace decorator only works on controllers."
)
the_workspace = get_app_workspace(request)
return controller(*args, app_workspace=the_workspace, **kwargs)
return wrapper
|
{
"content_hash": "ec96dff8bddb4c6efce31e769f3b3395",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 248,
"avg_line_length": 30.864372469635626,
"alnum_prop": 0.592313241949236,
"repo_name": "tethysplatform/tethys",
"id": "446f53a68b2d4d15ffa0aba72c23a6971720af06",
"size": "15247",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tethys_apps/base/workspace.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "28593"
},
{
"name": "Dockerfile",
"bytes": "6216"
},
{
"name": "HTML",
"bytes": "170934"
},
{
"name": "JavaScript",
"bytes": "504074"
},
{
"name": "Less",
"bytes": "40369"
},
{
"name": "PostScript",
"bytes": "132"
},
{
"name": "Python",
"bytes": "2563716"
},
{
"name": "SCSS",
"bytes": "3903"
},
{
"name": "SaltStack",
"bytes": "11837"
},
{
"name": "Shell",
"bytes": "28642"
}
],
"symlink_target": ""
}
|
import ctypes
import glob
import os
import subprocess
if os.name == 'posix':
if os.path.isdir('/sys/devices/system/node'):
numa_nodes = glob.glob('/sys/devices/system/node/node*')
numa_nodes.sort()
print(int(os.path.basename(numa_nodes[-1])[4:]) + 1)
else:
subprocess.run(['sysctl', '-n', 'vm.ndomains'], check=False)
elif os.name == 'nt':
libkernel32 = ctypes.windll.kernel32
numa_count = ctypes.c_ulong()
libkernel32.GetNumaHighestNodeNumber(ctypes.pointer(numa_count))
print(numa_count.value + 1)
|
{
"content_hash": "43b7aa5f0a4b54b0208516a003ffadcd",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 28,
"alnum_prop": 0.6517857142857143,
"repo_name": "john-mcnamara-intel/dpdk",
"id": "1b7787787f71c86e4e817ba95ca2112c5f85074a",
"size": "666",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "buildtools/get-numa-count.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1623"
},
{
"name": "C",
"bytes": "39269990"
},
{
"name": "C++",
"bytes": "860345"
},
{
"name": "Makefile",
"bytes": "342834"
},
{
"name": "Meson",
"bytes": "144875"
},
{
"name": "Objective-C",
"bytes": "224248"
},
{
"name": "Python",
"bytes": "115929"
},
{
"name": "Shell",
"bytes": "77250"
},
{
"name": "SmPL",
"bytes": "2074"
}
],
"symlink_target": ""
}
|
mass_kg = int(input("What is your mass in kilograms? "))
mass_stone = mass_kg * 2.2 / 14
print("You weigh", round(mass_stone, 2), "stone.")
|
{
"content_hash": "4852557da6bdf2a9e9f4d1c5eda671f6",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 56,
"avg_line_length": 46.666666666666664,
"alnum_prop": 0.65,
"repo_name": "zeroonegit/python",
"id": "53c38e01ab30344925a3454cf7c4373df02ccd95",
"size": "435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python_programming/mass_stone.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "301"
},
{
"name": "Python",
"bytes": "67431"
}
],
"symlink_target": ""
}
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Sqrt(function_node.FunctionNode):
@property
def label(self):
return 'sqrt'
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
)
def forward(self, x):
self.retain_outputs((0,))
xp = cuda.get_array_module(*x)
return utils.force_array(xp.sqrt(x[0], dtype=x[0].dtype)),
def backward(self, indexes, grad_outputs):
gx = self.get_retained_outputs()[0]
gy = grad_outputs[0]
return gy / (gx * 2.0),
class RsqrtGPU(function_node.FunctionNode):
@property
def label(self):
return 'rsqrt'
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
)
def forward_gpu(self, inputs):
self.retain_outputs((0,))
x, = inputs
out = cuda.cupyx.rsqrt(x, dtype=x.dtype)
return utils.force_array(out),
def backward(self, indexes, grad_outputs):
y, = self.get_retained_outputs()
gy, = grad_outputs
return gy * (y ** 3) * -0.5,
def sqrt(x):
"""Elementwise square root function.
.. math::
y_i = \\sqrt x_i.
If the value of :math:`x_i` is negative, it returns ``Nan`` for :math:`y_i`
respect to underlying numpy and cupy specification.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Sqrt().apply((x,))[0]
def rsqrt(x):
"""Computes elementwise reciprocal of square root of input :math:`x_i`.
.. math::
y_i = {1 \\over \\sqrt x_i}.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output variable.
.. seealso:: :func:`~chainer.functions.sqrt`
"""
xp = cuda.get_array_module(x)
if xp is numpy:
return 1.0 / sqrt(x)
# CuPy provides `rsqrt` which is faster than `1.0 / sqrt(x)`.
return RsqrtGPU().apply((x,))[0]
|
{
"content_hash": "134b88d2821bd47bd75d7b7b4780f478",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 23.795698924731184,
"alnum_prop": 0.5779484862178039,
"repo_name": "ronekko/chainer",
"id": "3b3f045378fc11bba832b8f7b3544923dcf01b9a",
"size": "2213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainer/functions/math/sqrt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3722585"
}
],
"symlink_target": ""
}
|
"""This module contains functions for handling requests in relation to disk
offerings.
"""
from ec2stack import errors
from ec2stack.providers import cloudstack
def get_disk_offering(disk_name):
"""
Get the disk offering with the specified name.
@param disk_name: Name of the disk offering to get.
@return: Response.
"""
args = {'name': disk_name, 'command': 'listDiskOfferings'}
response = cloudstack.describe_item_request(
args, 'diskoffering', errors.invalid_disk_offering_name
)
return response
|
{
"content_hash": "9ac90792adc8650d9929e303772ba296",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 26.095238095238095,
"alnum_prop": 0.7025547445255474,
"repo_name": "terbolous/cloudstack-ec2stack",
"id": "8c2de44df0f4b53dc150abf866e0c9bb8d29509a",
"size": "1392",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ec2stack/providers/cloudstack/disk_offerings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "239301"
},
{
"name": "Shell",
"bytes": "5226"
}
],
"symlink_target": ""
}
|
import vtk
def ReadPolyData(filename):
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename)
reader.Update()
return reader.GetOutput()
def WritePolyData(input,filename):
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(filename)
writer.SetInputData(input)
writer.Write()
file_path = "/home/ksansom/caseFiles/mri/VWI_proj/case1/vmtk/case1_VCG.ply"
out_path = "/home/ksansom/caseFiles/mri/VWI_proj/case1/vmtk/case1_VCG_smooth.ply"
reader = vtk.vtkPLYReader()
reader.SetFileName(file_path)
reader.Update()
smooth = vtk.vtkSmoothPolyDataFilter()
smooth.SetInputConnection(reader.GetOutputPort())
smooth.SetNumberOfIterations(10)
smooth.BoundarySmoothingOff()
smooth.SetFeatureAngle(120)
smooth.SetEdgeAngle(90)
smooth.SetRelaxationFactor(.05)
writer = vtk.vtkPLYWriter()
writer.SetFileName(out_path)
writer.SetInputConnection(smooth.GetOutputPort())
writer.Write()
|
{
"content_hash": "5836f2c5e05769a1b2f4fd20b1fd6939",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 81,
"avg_line_length": 25.52777777777778,
"alnum_prop": 0.7780195865070729,
"repo_name": "kayarre/Tools",
"id": "b91ad69fb937b7c84cdeb706cb84cfbf593af743",
"size": "919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vtk/smooth_polydata.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2306"
},
{
"name": "Mako",
"bytes": "8541"
},
{
"name": "Python",
"bytes": "1125456"
}
],
"symlink_target": ""
}
|
import copy
import inspect
import logging
from django import forms
from django import template
from django.core import urlresolvers
from django.template.defaultfilters import slugify
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.template.defaultfilters import linebreaks, safe
from django.forms.forms import NON_FIELD_ERRORS
from horizon import base
from horizon import exceptions
from horizon.templatetags.horizon import has_permissions
from horizon.utils import html
LOG = logging.getLogger(__name__)
class WorkflowContext(dict):
def __init__(self, workflow, *args, **kwargs):
super(WorkflowContext, self).__init__(*args, **kwargs)
self._workflow = workflow
def __setitem__(self, key, val):
super(WorkflowContext, self).__setitem__(key, val)
return self._workflow._trigger_handlers(key)
def __delitem__(self, key):
return self.__setitem__(key, None)
def set(self, key, val):
return self.__setitem__(key, val)
def unset(self, key):
return self.__delitem__(key)
class ActionMetaclass(forms.forms.DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
# Pop Meta for later processing
opts = attrs.pop("Meta", None)
# Create our new class
cls = super(ActionMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Process options from Meta
cls.name = getattr(opts, "name", name)
cls.slug = getattr(opts, "slug", slugify(name))
cls.permissions = getattr(opts, "permissions", ())
cls.progress_message = getattr(opts,
"progress_message",
_("Processing..."))
cls.help_text = getattr(opts, "help_text", "")
cls.help_text_template = getattr(opts, "help_text_template", None)
return cls
class Action(forms.Form):
"""
An ``Action`` represents an atomic logical interaction you can have with
the system. This is easier to understand with a conceptual example: in the
context of a "launch instance" workflow, actions would include "naming
the instance", "selecting an image", and ultimately "launching the
instance".
Because ``Actions`` are always interactive, they always provide form
controls, and thus inherit from Django's ``Form`` class. However, they
have some additional intelligence added to them:
* ``Actions`` are aware of the permissions required to complete them.
* ``Actions`` have a meta-level concept of "help text" which is meant to be
displayed in such a way as to give context to the action regardless of
where the action is presented in a site or workflow.
* ``Actions`` understand how to handle their inputs and produce outputs,
much like :class:`~horizon.forms.SelfHandlingForm` does now.
``Action`` classes may define the following attributes in a ``Meta``
class within them:
.. attribute:: name
The verbose name for this action. Defaults to the name of the class.
.. attribute:: slug
A semi-unique slug for this action. Defaults to the "slugified" name
of the class.
.. attribute:: permissions
A list of permission names which this action requires in order to be
completed. Defaults to an empty list (``[]``).
.. attribute:: help_text
A string of simple help text to be displayed alongside the Action's
fields.
.. attribute:: help_text_template
A path to a template which contains more complex help text to be
displayed alongside the Action's fields. In conjunction with
:meth:`~horizon.workflows.Action.get_help_text` method you can
customize your help text template to display practically anything.
"""
__metaclass__ = ActionMetaclass
def __init__(self, request, context, *args, **kwargs):
if request.method == "POST":
super(Action, self).__init__(request.POST)
else:
super(Action, self).__init__(initial=context)
if not hasattr(self, "handle"):
raise AttributeError("The action %s must define a handle method."
% self.__class__.__name__)
self.request = request
self._populate_choices(request, context)
def __unicode__(self):
return force_unicode(self.name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _populate_choices(self, request, context):
for field_name, bound_field in self.fields.items():
meth = getattr(self, "populate_%s_choices" % field_name, None)
if meth is not None and callable(meth):
bound_field.choices = meth(request, context)
def get_help_text(self, extra_context=None):
""" Returns the help text for this step. """
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
context = template.RequestContext(self.request, extra_context)
text += tmpl.render(context)
else:
text += linebreaks(force_unicode(self.help_text))
return safe(text)
def add_error(self, message):
"""
Adds an error to the Action's Step based on API issues.
"""
self._get_errors()[NON_FIELD_ERRORS] = self.error_class([message])
def handle(self, request, context):
"""
Handles any requisite processing for this action. The method should
return either ``None`` or a dictionary of data to be passed to
:meth:`~horizon.workflows.Step.contribute`.
Returns ``None`` by default, effectively making it a no-op.
"""
return None
class Step(object):
"""
A step is a wrapper around an action which defines it's context in a
workflow. It knows about details such as:
* The workflow's context data (data passed from step to step).
* The data which must be present in the context to begin this step (the
step's dependencies).
* The keys which will be added to the context data upon completion of the
step.
* The connections between this step's fields and changes in the context
data (e.g. if that piece of data changes, what needs to be updated in
this step).
A ``Step`` class has the following attributes:
.. attribute:: action
The :class:`~horizon.workflows.Action` class which this step wraps.
.. attribute:: depends_on
A list of context data keys which this step requires in order to
begin interaction.
.. attribute:: contributes
A list of keys which this step will contribute to the workflow's
context data. Optional keys should still be listed, even if their
values may be set to ``None``.
.. attribute:: connections
A dictionary which maps context data key names to lists of callbacks.
The callbacks may be functions, dotted python paths to functions
which may be imported, or dotted strings beginning with ``"self"``
to indicate methods on the current ``Step`` instance.
.. attribute:: before
Another ``Step`` class. This optional attribute is used to provide
control over workflow ordering when steps are dynamically added to
workflows. The workflow mechanism will attempt to place the current
step before the step specified in the attribute.
.. attribute:: after
Another ``Step`` class. This attribute has the same purpose as
:meth:`~horizon.workflows.Step.before` except that it will instead
attempt to place the current step after the given step.
.. attribute:: help_text
A string of simple help text which will be prepended to the ``Action``
class' help text if desired.
.. attribute:: template_name
A path to a template which will be used to render this step. In
general the default common template should be used. Default:
``"horizon/common/_workflow_step.html"``.
.. attribute:: has_errors
A boolean value which indicates whether or not this step has any
errors on the action within it or in the scope of the workflow. This
attribute will only accurately reflect this status after validation
has occurred.
.. attribute:: slug
Inherited from the ``Action`` class.
.. attribute:: name
Inherited from the ``Action`` class.
.. attribute:: permissions
Inherited from the ``Action`` class.
"""
action_class = None
depends_on = ()
contributes = ()
connections = None
before = None
after = None
help_text = ""
template_name = "horizon/common/_workflow_step.html"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __unicode__(self):
return force_unicode(self.name)
def __init__(self, workflow):
super(Step, self).__init__()
self.workflow = workflow
cls = self.__class__.__name__
if not (self.action_class and issubclass(self.action_class, Action)):
raise AttributeError("You must specify an action for %s." % cls)
self.slug = self.action_class.slug
self.name = self.action_class.name
self.permissions = self.action_class.permissions
self.has_errors = False
self._handlers = {}
if self.connections is None:
# We want a dict, but don't want to declare a mutable type on the
# class directly.
self.connections = {}
# Gather our connection handlers and make sure they exist.
for key, handlers in self.connections.items():
self._handlers[key] = []
# TODO(gabriel): This is a poor substitute for broader handling
if not isinstance(handlers, (list, tuple)):
raise TypeError("The connection handlers for %s must be a "
"list or tuple." % cls)
for possible_handler in handlers:
if callable(possible_handler):
# If it's callable we know the function exists and is valid
self._handlers[key].append(possible_handler)
continue
elif not isinstance(possible_handler, basestring):
return TypeError("Connection handlers must be either "
"callables or strings.")
bits = possible_handler.split(".")
if bits[0] == "self":
root = self
for bit in bits[1:]:
try:
root = getattr(root, bit)
except AttributeError:
raise AttributeError("The connection handler %s "
"could not be found on %s."
% (possible_handler, cls))
handler = root
elif len(bits) == 1:
# Import by name from local module not supported
raise ValueError("Importing a local function as a string "
"is not supported for the connection "
"handler %s on %s."
% (possible_handler, cls))
else:
# Try a general import
module_name = ".".join(bits[:-1])
try:
mod = import_module(module_name)
handler = getattr(mod, bits[-1])
except ImportError:
raise ImportError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
except AttributeError:
raise AttributeError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
self._handlers[key].append(handler)
@property
def action(self):
if not getattr(self, "_action", None):
try:
# Hook in the action context customization.
workflow_context = dict(self.workflow.context)
context = self.prepare_action_context(self.workflow.request,
workflow_context)
self._action = self.action_class(self.workflow.request,
context)
except:
LOG.exception("Problem instantiating action class.")
raise
return self._action
def prepare_action_context(self, request, context):
"""
Allows for customization of how the workflow context is passed to the
action; this is the reverse of what "contribute" does to make the
action outputs sane for the workflow. Changes to the context are not
saved globally here. They are localized to the action.
Simply returns the unaltered context by default.
"""
return context
def get_id(self):
""" Returns the ID for this step. Suitable for use in HTML markup. """
return "%s__%s" % (self.workflow.slug, self.slug)
def _verify_contributions(self, context):
for key in self.contributes:
# Make sure we don't skip steps based on weird behavior of
# POST query dicts.
field = self.action.fields.get(key, None)
if field and field.required and not context.get(key):
context.pop(key, None)
failed_to_contribute = set(self.contributes)
failed_to_contribute -= set(context.keys())
if failed_to_contribute:
raise exceptions.WorkflowError("The following expected data was "
"not added to the workflow context "
"by the step %s: %s."
% (self.__class__,
failed_to_contribute))
return True
def contribute(self, data, context):
"""
Adds the data listed in ``contributes`` to the workflow's shared
context. By default, the context is simply updated with all the data
returned by the action.
Note that even if the value of one of the ``contributes`` keys is
not present (e.g. optional) the key should still be added to the
context with a value of ``None``.
"""
if data:
for key in self.contributes:
context[key] = data.get(key, None)
return context
def render(self):
""" Renders the step. """
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
context = template.RequestContext(self.workflow.request, extra_context)
return step_template.render(context)
def get_help_text(self):
""" Returns the help text for this step. """
text = linebreaks(force_unicode(self.help_text))
text += self.action.get_help_text()
return safe(text)
def add_error(self, message):
"""
Adds an error to the Step based on API issues.
"""
self.action.add_error(message)
class WorkflowMetaclass(type):
def __new__(mcs, name, bases, attrs):
super(WorkflowMetaclass, mcs).__new__(mcs, name, bases, attrs)
attrs["_cls_registry"] = set([])
return type.__new__(mcs, name, bases, attrs)
class UpdateMembersStep(Step):
"""A step that allows a user to add/remove members from a group.
.. attribute:: show_roles
Set to False to disable the display of the roles dropdown.
.. attribute:: available_list_title
The title used for the available list column.
.. attribute:: members_list_title
The title used for the members list column.
.. attribute:: no_available_text
The placeholder text used when the available list is empty.
.. attribute:: no_members_text
The placeholder text used when the members list is empty.
"""
template_name = "horizon/common/_workflow_step_update_members.html"
show_roles = True
available_list_title = _("All available")
members_list_title = _("Members")
no_available_text = _("None available.")
no_members_text = _("No members.")
class Workflow(html.HTMLElement):
"""
A Workflow is a collection of Steps. It's interface is very
straightforward, but it is responsible for handling some very
important tasks such as:
* Handling the injection, removal, and ordering of arbitrary steps.
* Determining if the workflow can be completed by a given user at runtime
based on all available information.
* Dispatching connections between steps to ensure that when context data
changes all the applicable callback functions are executed.
* Verifying/validating the overall data integrity and subsequently
triggering the final method to complete the workflow.
The ``Workflow`` class has the following attributes:
.. attribute:: name
The verbose name for this workflow which will be displayed to the user.
Defaults to the class name.
.. attribute:: slug
The unique slug for this workflow. Required.
.. attribute:: steps
Read-only access to the final ordered set of step instances for
this workflow.
.. attribute:: default_steps
A list of :class:`~horizon.workflows.Step` classes which serve as the
starting point for this workflow's ordered steps. Defaults to an empty
list (``[]``).
.. attribute:: finalize_button_name
The name which will appear on the submit button for the workflow's
form. Defaults to ``"Save"``.
.. attribute:: success_message
A string which will be displayed to the user upon successful completion
of the workflow. Defaults to
``"{{ workflow.name }} completed successfully."``
.. attribute:: failure_message
A string which will be displayed to the user upon failure to complete
the workflow. Defaults to ``"{{ workflow.name }} did not complete."``
.. attribute:: depends_on
A roll-up list of all the ``depends_on`` values compiled from the
workflow's steps.
.. attribute:: contributions
A roll-up list of all the ``contributes`` values compiled from the
workflow's steps.
.. attribute:: template_name
Path to the template which should be used to render this workflow.
In general the default common template should be used. Default:
``"horizon/common/_workflow.html"``.
.. attribute:: entry_point
The slug of the step which should initially be active when the
workflow is rendered. This can be passed in upon initialization of
the workflow, or set anytime after initialization but before calling
either ``get_entry_point`` or ``render``.
.. attribute:: redirect_param_name
The name of a parameter used for tracking the URL to redirect to upon
completion of the workflow. Defaults to ``"next"``.
.. attribute:: object
The object (if any) which this workflow relates to. In the case of
a workflow which creates a new resource the object would be the created
resource after the relevant creation steps have been undertaken. In
the case of a workflow which updates a resource it would be the
resource being updated after it has been retrieved.
"""
__metaclass__ = WorkflowMetaclass
slug = None
default_steps = ()
template_name = "horizon/common/_workflow.html"
finalize_button_name = _("Save")
success_message = _("%s completed successfully.")
failure_message = _("%s did not complete.")
redirect_param_name = "next"
_registerable_class = Step
def __unicode__(self):
return self.name
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
if self.slug is None:
raise AttributeError("The workflow %s must have a slug."
% self.__class__.__name__)
self.name = getattr(self, "name", self.__class__.__name__)
self.request = request
self.depends_on = set([])
self.contributions = set([])
self.entry_point = entry_point
self.object = None
# Put together our steps in order. Note that we pre-register
# non-default steps so that we can identify them and subsequently
# insert them in order correctly.
self._registry = dict([(step_class, step_class(self)) for step_class
in self.__class__._cls_registry
if step_class not in self.default_steps])
self._gather_steps()
# Determine all the context data we need to end up with.
for step in self.steps:
self.depends_on = self.depends_on | set(step.depends_on)
self.contributions = self.contributions | set(step.contributes)
# Initialize our context. For ease we can preseed it with a
# regular dictionary. This should happen after steps have been
# registered and ordered.
self.context = WorkflowContext(self)
context_seed = context_seed or {}
clean_seed = dict([(key, val)
for key, val in context_seed.items()
if key in self.contributions | self.depends_on])
self.context_seed = clean_seed
self.context.update(clean_seed)
if request and request.method == "POST":
for step in self.steps:
valid = step.action.is_valid()
# Be sure to use the CLEANED data if the workflow is valid.
if valid:
data = step.action.cleaned_data
else:
data = request.POST
self.context = step.contribute(data, self.context)
@property
def steps(self):
if getattr(self, "_ordered_steps", None) is None:
self._gather_steps()
return self._ordered_steps
def get_step(self, slug):
""" Returns the instantiated step matching the given slug. """
for step in self.steps:
if step.slug == slug:
return step
def _gather_steps(self):
ordered_step_classes = self._order_steps()
for default_step in self.default_steps:
self.register(default_step)
self._registry[default_step] = default_step(self)
self._ordered_steps = [self._registry[step_class]
for step_class in ordered_step_classes
if has_permissions(self.request.user,
self._registry[step_class])]
def _order_steps(self):
steps = list(copy.copy(self.default_steps))
additional = self._registry.keys()
for step in additional:
try:
min_pos = steps.index(step.after)
except ValueError:
min_pos = 0
try:
max_pos = steps.index(step.before)
except ValueError:
max_pos = len(steps)
if min_pos > max_pos:
raise exceptions.WorkflowError("The step %(new)s can't be "
"placed between the steps "
"%(after)s and %(before)s; the "
"step %(before)s comes before "
"%(after)s."
% {"new": additional,
"after": step.after,
"before": step.before})
steps.insert(max_pos, step)
return steps
def get_entry_point(self):
"""
Returns the slug of the step which the workflow should begin on.
This method takes into account both already-available data and errors
within the steps.
"""
# If we have a valid specified entry point, use it.
if self.entry_point:
if self.get_step(self.entry_point):
return self.entry_point
# Otherwise fall back to calculating the appropriate entry point.
for step in self.steps:
if step.has_errors:
return step.slug
try:
step._verify_contributions(self.context)
except exceptions.WorkflowError:
return step.slug
# If nothing else, just return the first step.
return self.steps[0].slug
def _trigger_handlers(self, key):
responses = []
handlers = [(step.slug, f) for step in self.steps
for f in step._handlers.get(key, [])]
for slug, handler in handlers:
responses.append((slug, handler(self.request, self.context)))
return responses
@classmethod
def register(cls, step_class):
""" Registers a :class:`~horizon.workflows.Step` with the workflow. """
if not inspect.isclass(step_class):
raise ValueError('Only classes may be registered.')
elif not issubclass(step_class, cls._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% cls._registerable_class.__name__)
if step_class in cls._cls_registry:
return False
else:
cls._cls_registry.add(step_class)
return True
@classmethod
def unregister(cls, step_class):
"""
Unregisters a :class:`~horizon.workflows.Step` from the workflow.
"""
try:
cls._cls_registry.remove(step_class)
except KeyError:
raise base.NotRegistered('%s is not registered' % cls)
return cls._unregister(step_class)
def validate(self, context):
"""
Hook for custom context data validation. Should return a boolean
value or raise :class:`~horizon.exceptions.WorkflowValidationError`.
"""
return True
def is_valid(self):
"""
Verified that all required data is present in the context and
calls the ``validate`` method to allow for finer-grained checks
on the context data.
"""
missing = self.depends_on - set(self.context.keys())
if missing:
raise exceptions.WorkflowValidationError(
"Unable to complete the workflow. The values %s are "
"required but not present." % ", ".join(missing))
# Validate each step. Cycle through all of them to catch all errors
# in one pass before returning.
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
if not steps_valid:
return steps_valid
return self.validate(self.context)
def finalize(self):
"""
Finalizes a workflow by running through all the actions in order
and calling their ``handle`` methods. Returns ``True`` on full success,
or ``False`` for a partial success, e.g. there were non-critical
errors. (If it failed completely the function wouldn't return.)
"""
partial = False
for step in self.steps:
try:
data = step.action.handle(self.request, self.context)
if data is True or data is None:
continue
elif data is False:
partial = True
else:
self.context = step.contribute(data or {}, self.context)
except:
partial = True
exceptions.handle(self.request)
if not self.handle(self.request, self.context):
partial = True
return not partial
def handle(self, request, context):
"""
Handles any final processing for this workflow. Should return a boolean
value indicating success.
"""
return True
def get_success_url(self):
"""
Returns a URL to redirect the user to upon completion. By default it
will attempt to parse a ``success_url`` attribute on the workflow,
which can take the form of a reversible URL pattern name, or a
standard HTTP URL.
"""
try:
return urlresolvers.reverse(self.success_url)
except urlresolvers.NoReverseMatch:
return self.success_url
def format_status_message(self, message):
"""
Hook to allow customization of the message returned to the user
upon successful or unsuccessful completion of the workflow.
By default it simply inserts the workflow's name into the message
string.
"""
if "%s" in message:
return message % self.name
else:
return message
def render(self):
""" Renders the workflow. """
workflow_template = template.loader.get_template(self.template_name)
extra_context = {"workflow": self}
if self.request.is_ajax():
extra_context['modal'] = True
context = template.RequestContext(self.request, extra_context)
return workflow_template.render(context)
def get_absolute_url(self):
""" Returns the canonical URL for this workflow.
This is used for the POST action attribute on the form element
wrapping the workflow.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the workflow was requested.
"""
return self.request.get_full_path().partition('?')[0]
def add_error_to_step(self, message, slug):
"""
Adds an error to the workflow's Step with the
specifed slug based on API issues. This is useful
when you wish for API errors to appear as errors on
the form rather than using the messages framework.
"""
step = self.get_step(slug)
if step:
step.add_error(message)
|
{
"content_hash": "ee6617911c45725abcd00f2fc666b730",
"timestamp": "",
"source": "github",
"line_count": 829,
"max_line_length": 79,
"avg_line_length": 37.882991556091675,
"alnum_prop": 0.5856392294220666,
"repo_name": "99cloud/keystone_register",
"id": "8ed1334ec5bd6df93e38fd0d1b1e1d8501842f0c",
"size": "32055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon/workflows/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "268926"
},
{
"name": "Python",
"bytes": "1535870"
},
{
"name": "Shell",
"bytes": "12674"
}
],
"symlink_target": ""
}
|
import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
uri = "/wga/apiac/credentials"
requires_modules = ["wga"]
requires_version = "9.0.7"
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve the stored ISAM credential
"""
return isamAppliance.invoke_get("Retrieve the stored ISAM credential",
"{0}".format(uri),
requires_modules=requires_modules, requires_version=requires_version)
def add(isamAppliance, admin_id, admin_pwd, admin_domain="Default", check_mode=False, force=False):
"""
Store the ISAM administrator credentials
"""
exist, warnings = _check(isamAppliance)
if force is True or exist is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post("Store the ISAM administrator credentials",
"{0}".format(uri),
{
'admin_id': admin_id,
'admin_pwd': admin_pwd,
'admin_domain': admin_domain
},
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def delete(isamAppliance, check_mode=False, force=False):
"""
Delete the stored ISAM administrator credential
"""
exist, warnings = _check(isamAppliance)
if force is True or exist is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_delete("Delete the stored ISAM administrator credential",
"{0}".format(uri),
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def _check(isamAppliance, check_mode=False, force=False):
ret_obj = get(isamAppliance)
if ret_obj['data'] == {}:
return False, ret_obj['warnings']
elif ret_obj['data']['admin_id'] is None:
return False, ret_obj['warnings']
else:
return True, ret_obj['warnings']
|
{
"content_hash": "f5e72f73b6d3aa1a64637c43907dd54f",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 116,
"avg_line_length": 37.59701492537314,
"alnum_prop": 0.5716554188169909,
"repo_name": "IBM-Security/ibmsecurity",
"id": "b93e53727df6173b290259b09a8510f54cad5e76",
"size": "2519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibmsecurity/isam/web/api_access_control/utilities/credential.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1501984"
}
],
"symlink_target": ""
}
|
from bluebottle.fsm.state import register, Transition, AllStates
from bluebottle.funding.states import BasePaymentStateMachine, BankAccountStateMachine
from bluebottle.funding_flutterwave.models import FlutterwavePayment, FlutterwaveBankAccount
from django.utils.translation import gettext_lazy as _
@register(FlutterwavePayment)
class FlutterwavePaymentStateMachine(BasePaymentStateMachine):
request_refund = None
refund_requested = None
@register(FlutterwaveBankAccount)
class FlutterwaveBankAccountStateMachine(BankAccountStateMachine):
migrate_to_lipisha = Transition(
AllStates(),
BankAccountStateMachine.rejected,
name=_("Migrate to Lipisha"),
description=_("Migrate to Lipisha account"),
automatic=False
)
|
{
"content_hash": "f51ff8ed020ed661a39e6cadba592ac7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 92,
"avg_line_length": 33.69565217391305,
"alnum_prop": 0.7858064516129032,
"repo_name": "onepercentclub/bluebottle",
"id": "e1df954de931863aa2bd58b8b739647d08ec03cd",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/funding_flutterwave/states.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
__authors__ = ("Al Korgun <alkorgun@gmail.com>", "John Smith <mrdoctorwho@gmail.com>")
__version__ = "2.3"
__license__ = "MIT"
"""
Implements a single-threaded longpoll client
"""
import select
import socket
import json
import httplib
import threading
import time
import vkapi as api
import utils
from __main__ import *
SOCKET_CHECK_TIMEOUT = 10
LONGPOLL_RETRY_COUNT = 10
LONGPOLL_RETRY_TIMEOUT = 10
SELECT_WAIT = 25
OPENER_LIFETIME = 60
CODE_SKIP = -1
CODE_FINE = 0
CODE_ERROR = 1
TYPE_MSG = 4
TYPE_MSG_EDIT = 5
TYPE_MSG_READ_IN = 6 # we read the message
TYPE_MSG_READ_OUT = 7 # they read the message
TYPE_PRS_IN = 8
TYPE_PRS_OUT = 9
TYPE_TYPING = 61
FLAG_OUT = 2
FLAG_CHAT = 16
MIN_CHAT_UID = 2000000000
TCP_KEEPINTVL = 60
TCP_KEEPIDLE = 60
def debug(message, *args):
if DEBUG_POLL:
logger.debug(message, *args)
def read(opener, source):
"""
Read a socket ignoring errors
Args:
opener: a socket to read
source: the user's jid
Returns:
JSON data or an empty string
"""
try:
data = opener.read()
except (httplib.BadStatusLine, socket.error, socket.timeout) as e:
data = ""
logger.warning("longpoll: got error `%s` (jid: %s)", e.message, source)
return data
def processPollResult(user, data):
"""
Processes a poll result
Decides whether to send a chat/groupchat message or presence or just pass the iteration
Args:
user: the User object
data: a valid json with poll result
Returns:
CODE_SKIP: just skip iteration, not adding the user to poll again
CODE_FINE: add user for the next iteration
CODE_ERROR: user should be added to the init buffer
"""
debug("longpoll: processing result (jid: %s)", user.source)
retcode = CODE_FINE
try:
data = json.loads(data)
except ValueError:
logger.error("longpoll: no data. Gonna request again (jid: %s)",
user.source)
retcode = CODE_ERROR
return retcode
if "failed" in data:
logger.debug("longpoll: failed. Searching for a new server (jid: %s)", user.source)
retcode = CODE_ERROR
else:
user.vk.pollConfig["ts"] = data["ts"]
for evt in data.get("updates", ()):
typ = evt.pop(0)
debug("longpoll: got updates, processing event %s with arguments %s (jid: %s)",
typ, str(evt), user.source)
if typ == TYPE_MSG: # new message
message = None
mid, flags, uid, date, body, subject, attachments = evt
if subject:
subject = subject.get("title")
out = flags & FLAG_OUT
chat = (uid > MIN_CHAT_UID) # a groupchat always has uid > 2000000000
# there is no point to request messages if there's only a single emoji attachment
# we actually only need to request for new messages if there are complex attachments in it (e.g. photos)
if len(attachments) == 1 and "emoji" in attachments:
attachments = None
if not out:
if not attachments and not chat:
message = [{"out": 0, "from_id": uid, "id": mid, "date": date, "text": body}]
# we substract 1 from msg id b/c VK now has reverse history so we need to ask what happened before this exact message
utils.runThread(user.sendMessages, (False, message, mid - 1, uid), "sendMessages-%s" % user.source)
elif typ == TYPE_MSG_READ_OUT:
uid, mid, _ = evt
cache = user.msgCacheByUser.get(uid)
if cache:
xmppMID = cache["xmpp"]
cache.clear()
sendChatMarker(user.source, vk2xmpp(uid), xmppMID)
elif typ == TYPE_PRS_IN: # user has joined
uid = abs(evt[0])
sendPresence(user.source, vk2xmpp(uid), hash=USER_CAPS_HASH)
elif typ == TYPE_PRS_OUT: # user has left
uid = abs(evt[0])
sendPresence(user.source, vk2xmpp(uid), "unavailable")
elif typ == TYPE_TYPING: # user is typing
uid = evt[0]
if uid not in user.typing:
sendMessage(user.source, vk2xmpp(uid), typ="composing")
user.typing[uid] = time.time()
retcode = CODE_FINE
return retcode
def configureSocket(sock):
# see man(7) tcp
debug("setting socket parameters...")
try:
# enable keepalive probes
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# the interval between subsequential keepalive probes, regardless of what the connection has exchanged in the meantime
# overrides tcp_keepalive_intvl
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, TCP_KEEPINTVL)
# the interval between the last data packet sent (simple ACKs are not considered data) and the first keepalive probe;
# after the connection is marked to need keepalive, this counter is not used any further
# overrides tcp_keepalive_time
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, TCP_KEEPIDLE)
except (AttributeError, OSError):
debug("unable to set socket parameters")
# TODO: make it abstract, to reuse in Steampunk
class Poll(object):
"""
Class used to handle longpoll
"""
__list = {}
__buff = set()
__lock = threading.Lock()
clear = staticmethod(__list.clear)
watchdogRunning = False
@classmethod
def init(cls):
cls.watchdogRunning ^= True
cls.watchdog()
@classmethod
def __add(cls, user):
"""
Issues a readable socket to use it in select()
Adds user in buffer if a error occurred
Adds user in cls.__list if no errors
"""
if user.source in Users:
# in case the new instance was created
user = Users[user.source]
opener = user.vk.makePoll()
debug("longpoll: user has been added to poll (jid: %s)", user.source)
if opener:
sock = opener.sock
configureSocket(sock)
cls.__list[sock] = (user, opener)
return opener
logger.warning("longpoll: got null opener! (jid: %s)", user.source)
cls.__addToBuffer(user)
@classmethod
def add(cls, some_user):
"""
Adds the User class object to poll
"""
debug("longpoll: adding user to poll (jid: %s)", some_user.source)
with cls.__lock:
if some_user in cls.__buff:
return None
# check if someone is trying to add an already existing user
for sock, (user, opener) in cls.__list.iteritems():
if some_user == user:
break
else:
try:
cls.__add(some_user)
except api.LongPollError as e:
logger.debug("longpoll: failed to make poll: %s (jid: %s)", e.message, some_user.source)
cls.__addToBuffer(some_user)
except Exception:
crashLog("poll.add")
@classmethod
def __addToBuffer(cls, user):
"""
Adds user to the list of "bad" users
The list is mostly contain users whose poll
request was failed for some reasons
Args:
user: the user object
"""
cls.__buff.add(user)
logger.debug("longpoll: adding user to the init buffer (jid: %s)", user.source)
utils.runThread(cls.handleUser, (user,), "handleBuffer-%s" % user.source)
@classmethod
def __removeFromBuffer(cls, user):
"""
Instantly removes a user from the buffer
Args:
user: the user object
"""
if user in cls.__buff:
cls.__buff.remove(user)
@classmethod
def removeFromBuffer(cls, user):
"""
Removes a user from the buffer
Args:
user: the user object
"""
with cls.__lock:
cls.__removeFromBuffer(user)
@classmethod
def handleUser(cls, user):
"""
Tries to reinitialize poll for LONGPOLL_RETRY_COUNT every LONGPOLL_RETRY_TIMEOUT seconds
As soon as poll is initialized the user will be removed from buffer
Args:
user: the user object
"""
for _ in xrange(LONGPOLL_RETRY_COUNT):
if user.source in Users:
user = Users[user.source] # we might have a new instance here
if user.vk.initPoll():
with cls.__lock:
logger.debug("longpoll: successfully initialized longpoll (jid: %s)",
user.source)
cls.__add(user)
cls.__removeFromBuffer(user)
break
else:
logger.debug("longpoll: while we were wasting our time"
", the user has left (jid: %s)", user.source)
cls.removeFromBuffer(user)
return None
time.sleep(LONGPOLL_RETRY_TIMEOUT)
else:
cls.removeFromBuffer(user)
logger.error("longpoll: failed to add user to poll in 10 retries"
" (jid: %s)", user.source)
@classmethod
def process(cls):
"""
Processes poll sockets by select.select()
As soon as socket will be ready for reading, user.processPollResult() is called
Read processPollResult.__doc__ to learn more about status codes
"""
while ALIVE:
socks = cls.__list.keys()
if not socks:
time.sleep(0.02)
continue
try:
ready, error = select.select(socks, [], socks, SELECT_WAIT)[::2]
except (select.error, socket.error, socket.timeout) as e:
logger.error("longpoll: %s", e.message)
continue
for sock in error:
with cls.__lock:
# We will just re-add the user to poll
# in case if anything weird happen to the socket
try:
cls.__add(cls.__list.pop(sock)[0])
except KeyError:
continue
for sock in ready:
with cls.__lock:
try:
user, opener = cls.__list.pop(sock)
except KeyError:
continue
# Update the user instance
user = Users.get(user.source)
if user:
cls.processResult(user, opener)
with cls.__lock:
for sock, (user, opener) in cls.__list.items():
if hasattr(user, "vk") and not user.vk.online:
logger.debug("longpoll: user is not online, so removing them from poll"
" (jid: %s)", user.source)
try:
del cls.__list[sock]
except KeyError:
pass
@classmethod
@utils.threaded
def processResult(cls, user, opener):
"""
Processes the select result (see above)
Handles answers from user.processPollResult()
Decides if need to add user to poll or not
"""
data = read(opener, user.source)
result = utils.execute(processPollResult, (user, data,))
debug("longpoll: result=%s (jid: %s)", result, user.source)
if result == CODE_SKIP:
return None
# if we set user.vk.pollInitialized to False
# then makePoll() will throw an exception
# by doing so, we force the user's poll to be reinitialized
if result == CODE_ERROR:
user.vk.pollInitialized = False
cls.add(user)
@classmethod
@utils.threaded
def watchdog(cls):
while cls.watchdog:
for sock, (user, opener) in cls.__list.items():
if (time.time() - opener.created) > OPENER_LIFETIME:
with cls.__lock:
try:
del cls.__list[sock]
cls.processResult(user, opener)
except KeyError:
pass
time.sleep(SOCKET_CHECK_TIMEOUT)
|
{
"content_hash": "a2a9e2af25f4580e3405c1c735e666ae",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 122,
"avg_line_length": 27.986301369863014,
"alnum_prop": 0.6714635340186001,
"repo_name": "mrDoctorWho/vk4xmpp",
"id": "74cfcd9b16d5385b3060abd7271018e390d8a47e",
"size": "10264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/longpoll.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "941"
},
{
"name": "JavaScript",
"bytes": "3542"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Perl",
"bytes": "3238"
},
{
"name": "Python",
"bytes": "376269"
},
{
"name": "Ruby",
"bytes": "10069"
},
{
"name": "Shell",
"bytes": "3254"
}
],
"symlink_target": ""
}
|
import sincfilter
import numpy as np
def indexOfOutliers(a, threshold_sigma=4, normFreq=.45):
"""Identify indices of outlier points in a timeseries
Inputs:
a (1d np array) Input data. a is assumed to represent
equally spaced data
Optional Inputs:
threshold_sigma How many sigma away from mean must a point
be to be considered an outlier
normFreq Input arg to sincfilter, defines how agressively
to smooth.
Returns:
1d array of booleans of length a. Value set to true if
element is an outlier
Description
Data is heavily high pass filtered, with the assumption that
residuals are mostly gaussian with a few outlying points.
These points are id'd, and their indices returns
"""
numPointsInFilter = np.floor(len(a)/20.)
if numPointsInFilter % 2 == 1:
numPointsInFilter += 1
filt = sincfilter.highPass(np.asarray(a), normFreq, numPointsInFilter)
rms = np.std(filt)
idx = np.abs(filt) > threshold_sigma*rms
return idx
|
{
"content_hash": "88d2b356e5e32ce52b7fbeed634df87c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 29.783783783783782,
"alnum_prop": 0.6497277676950998,
"repo_name": "barentsen/dave",
"id": "d535194480f60a2908d293a9921083f532833668",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/outliers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28133"
},
{
"name": "C++",
"bytes": "94566"
},
{
"name": "Fortran",
"bytes": "326733"
},
{
"name": "Jupyter Notebook",
"bytes": "826626"
},
{
"name": "M",
"bytes": "239"
},
{
"name": "MATLAB",
"bytes": "696786"
},
{
"name": "Makefile",
"bytes": "257"
},
{
"name": "Python",
"bytes": "966118"
},
{
"name": "TeX",
"bytes": "7449"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.db.models import Q
from django.db import connection
from api.api_views import APIView
# noinspection PyProtectedMember
from api.fields import get_boolean_value
from api.status import HTTP_201_CREATED, HTTP_200_OK
from api.signals import user_relationship_changed
from api.exceptions import PermissionDenied
from api.accounts.user.base.serializers import ApiKeysSerializer, UserSerializer, ExtendedUserSerializer
from api.accounts.user.utils import get_user, get_users
from api.accounts.messages import LOG_USER_CREATE, LOG_USER_UPDATE, LOG_USER_DELETE
from api.task.response import SuccessTaskResponse, FailureTaskResponse
from gui.models import User, AdminPermission
from vms.models import Dc
class UserView(APIView):
serializer = UserSerializer
dc_bound = False
order_by_default = order_by_fields = ('username',)
order_by_field_map = {'created': 'id'}
def __init__(self, request, username, data, many=False):
super(UserView, self).__init__(request)
self.username = username
self.data = data
self.many = many
sr = ('dc_bound', 'default_dc')
pr = ()
if not settings.ACL_ENABLED and self.data:
self.data.pop('groups', None)
if self.extended:
self.serializer = ExtendedUserSerializer
pr = ('roles__dc_set',)
if username:
self.user = get_user(request, username, sr=sr, pr=pr, data=data)
else:
if self.full or self.extended:
if self.extended:
pr = ('roles', 'roles__dc_set')
else:
pr = ('roles',)
else:
sr = ()
pr = ()
self.user = get_users(request, data=data, sr=sr, pr=pr, where=Q(is_active=self.active),
order_by=self.order_by)
def is_active(self, data):
return self.request.method == 'GET' and get_boolean_value(data.get('active', True))
@property
def active(self):
if self.data:
return self.is_active(self.data)
else:
return True
def user_modify(self, update=False, serializer=None):
affected_groups = ()
if not serializer:
serializer = self.serializer
user = self.user
ser = serializer(self.request, user, data=self.data, partial=update)
if not ser.is_valid():
return FailureTaskResponse(self.request, ser.errors, obj=user, dc_bound=False)
ser.save()
if update:
msg = LOG_USER_UPDATE
status = HTTP_200_OK
else:
msg = LOG_USER_CREATE
status = HTTP_201_CREATED
res = SuccessTaskResponse(self.request, ser.data, status=status, obj=user, msg=msg, owner=ser.object,
detail_dict=ser.detail_dict(), dc_bound=False)
task_id = res.data.get('task_id')
if serializer == UserSerializer:
# User's is_staff attribute was changed -> Clear the cached list of super admins
if ser.is_staff_changed:
User.clear_super_admin_ids()
# User's groups were changed, which may affect the cached list of DC admins for DCs which are attached
# to these groups. So we need to clear the list of admins cached for each affected DC
# noinspection PyProtectedMember
if user._roles_to_save is not None:
# noinspection PyProtectedMember
affected_groups = set(user._roles_to_save)
affected_groups.update(ser.old_roles)
affected_dcs = Dc.objects.distinct().filter(roles__in=affected_groups,
roles__permissions__id=AdminPermission.id)
for dc in affected_dcs:
User.clear_dc_admin_ids(dc)
# User was removed from some groups and may loose access to DCs which are attached to this group
# So we better set his current_dc to default_dc
if ser.old_roles and not user.is_staff:
user.reset_current_dc()
connection.on_commit(lambda: user_relationship_changed.send(task_id, user_name=user.username, # Signal!
affected_groups=tuple(
group.id for group in affected_groups)))
return res
def get(self):
return self._get(self.user, self.data, self.many, field_name='username')
def post(self):
if not self.request.user.is_staff and self.data:
self.data.pop('dc_bound', None) # default DC binding cannot be changed when creating object
return self.user_modify(update=False)
def put(self):
return self.user_modify(update=True)
def delete(self):
user = self.user
# Predefined users can not be deleted
if user.id in (settings.ADMIN_USER, settings.SYSTEM_USER, self.request.user.id):
raise PermissionDenied
relations = user.get_relations()
if relations:
message = {
'detail': _('Cannot delete user, because he has relations to some objects.'),
'relations': relations
}
return FailureTaskResponse(self.request, message, obj=user, dc_bound=False)
dd = {'email': user.email, 'date_joined': user.date_joined}
username = user.username
was_staff = user.is_staff
old_roles = list(user.roles.all())
ser = self.serializer(self.request, user)
ser.object.delete()
res = SuccessTaskResponse(self.request, None, obj=user, msg=LOG_USER_DELETE, detail_dict=dd, dc_bound=False)
task_id = res.data.get('task_id')
connection.on_commit(lambda: user_relationship_changed.send(task_id, user_name=username, # Signal!
affected_groups=tuple(
group.id for group in old_roles)))
# User was removed, which may affect the cached list of DC admins for DCs which are attached to user's groups
# So we need to clear the list of admins cached for each affected DC
affected_dcs = Dc.objects.distinct().filter(roles__in=old_roles, roles__permissions__id=AdminPermission.id)
for dc in affected_dcs:
User.clear_dc_admin_ids(dc)
if was_staff:
User.clear_super_admin_ids()
return res
def api_key(self):
# Allow show and regenerate API keys only for logged in users
if self.request.auth == 'api_key':
raise PermissionDenied
if self.request.method.lower() == 'get':
return self._get(self.user, self.data, serializer=ApiKeysSerializer)
else:
# noinspection PyTypeChecker
return self.user_modify(update=True, serializer=ApiKeysSerializer)
|
{
"content_hash": "d665305a4523ebff7856306c19b65b07",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 117,
"avg_line_length": 40.6875,
"alnum_prop": 0.5926546571707862,
"repo_name": "erigones/esdc-ce",
"id": "e16bf8a26d8d4429b424c805de6f1267c8453d89",
"size": "7161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/accounts/user/base/api_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
}
|
import django.db.utils
from django.db import transaction
from rest_framework import viewsets, status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from strichliste import settings
from .serializers import TransactionSerializer
from .serializers import TransactionValueZero, TransactionValueError
from .models import User, Transaction
class UserViewSet(viewsets.ViewSet):
"""ViewSet for Users
This ViewSet allows access to and creation of Users.
Currently neither authentication nor authorization are supported
"""
@staticmethod
def create(request) -> Response:
"""Create a user
:param request: HTTP Request
:return: Response
"""
name = request.data.get('name')
mail_address = request.data.get('mail_address')
if name is None:
return Response(data={'msg': "No name provided"}, status=status.HTTP_400_BAD_REQUEST)
user = User(name=name, mail_address=mail_address)
try:
user.save()
except django.db.utils.IntegrityError:
return Response(data={'msg': "user {} already exists".format(name)}, status=status.HTTP_409_CONFLICT)
return Response(data=user.to_full_dict(), status=status.HTTP_201_CREATED)
@staticmethod
def list(request) -> Response:
"""List users
:param request: HTTP Request
:return: Response
"""
paginator = LimitOffsetPagination()
paginator.max_limit = 250
paginator.default_limit = 100
users = paginator.paginate_queryset(User.objects.filter(active=True), request)
return Response(
data={'entries': [x.to_dict() for x in users], 'limit': paginator.limit,
'offset': paginator.offset, 'overall_count': paginator.count},
status=status.HTTP_200_OK)
@staticmethod
def retrieve(request, pk=None) -> Response:
"""Retrieve a user by primary key
:param request: HTTP Request
:param pk: User primary key
:return:
"""
try:
user = User.objects.get(id=pk)
except User.DoesNotExist:
return Response(data={'msg': 'user {} not found'.format(pk)}, status=status.HTTP_404_NOT_FOUND)
return Response(data=user.to_full_dict())
class UserTransactionViewSet(viewsets.ViewSet):
"""ViewSet for Transactions per User
The url must provide a primary key for a user.
This ViewSet allows access to and creation of Transaction for a single User.
Currently neither authentication nor authorization are supported
"""
@staticmethod
def list(request, user_pk=None) -> Response:
"""List transactions for a single user
:param request: Request send from the client
:param user_pk: Primary key to identify a user
:return: Response
"""
try:
user = User.objects.get(id=user_pk)
except User.DoesNotExist:
return Response(data={'msg': 'user {} not found'.format(user_pk)}, status=status.HTTP_404_NOT_FOUND)
paginator = LimitOffsetPagination()
paginator.max_limit = 250
paginator.default_limit = 100
transactions = paginator.paginate_queryset(Transaction.objects.filter(user=user), request)
return Response(data={'entries': [x.to_dict() for x in transactions], 'limit': paginator.limit,
'offset': paginator.offset, 'overall_count': paginator.count},
status=status.HTTP_200_OK)
@staticmethod
def retrieve(request, pk=None, user_pk=None) -> Response:
"""Retrieve single transaction for a user
:param request: Request send from the client
:param pk: Primary key to identify a transaction
:param user_pk: Primary key to identify a user
:return: Response
"""
user = User.objects.get(id=user_pk)
transactions = list(Transaction.objects.filter(user=user, id=pk))
if len(transactions) == 0:
return Response(data={'msg': 'transaction not found'}, status=status.HTTP_404_NOT_FOUND)
assert len(transactions) == 1, "Primary key is not unique"
return Response(data=transactions[0].to_dict())
@staticmethod
@transaction.atomic
def create(request, user_pk=None) -> Response:
"""Create a new transaction for a user
:param request: Request send from the client
:param user_pk: Primary key to identify a user
:return: Response
"""
value = request.data.get('value')
if value is None:
return Response(data={'msg': 'Value missing'}, status=status.HTTP_400_BAD_REQUEST)
try:
serializer = TransactionSerializer(data={'user': user_pk, 'value': value})
user = User.objects.get(pk=user_pk)
serializer.is_valid(raise_exception=True)
serializer.save()
user.balance += value
user.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
except KeyError as e:
return Response(data={'msg': e}, status=status.HTTP_404_NOT_FOUND)
except TransactionValueZero as e:
return Response(data={'msg': str(e)}, status=status.HTTP_400_BAD_REQUEST)
except TransactionValueError as e:
return Response(data={'msg': str(e)}, status=status.HTTP_403_FORBIDDEN)
class TransactionViewSet(viewsets.ViewSet):
"""ViewSet for Transactions
This ViewSet allows access to all transactions.
Currently neither authentication nor authorization are supported
"""
@staticmethod
def list(request):
"""List transactions for all users
:param request: Request send from the client
:return: Response
"""
paginator = LimitOffsetPagination()
paginator.max_limit = 250
paginator.default_limit = 100
transactions = paginator.paginate_queryset(Transaction.objects.all(), request)
return Response(data={'entries': [x.to_dict() for x in transactions], 'limit': paginator.limit,
'offset': paginator.offset, 'overall_count': paginator.count},
status=status.HTTP_200_OK)
@staticmethod
def retrieve(request, pk=None) -> Response:
"""Retrieve single transaction
:param request: Request send from the client
:param pk: Primary key to identify a transaction
:return: Response
"""
transactions = list(Transaction.objects.filter(id=pk))
if len(transactions) == 0:
return Response(data={'msg': 'transaction not found'}, status=status.HTTP_404_NOT_FOUND)
assert len(transactions) == 1, "Private key should identify a single transaction"
return Response(data=transactions[0].to_dict())
class DebugViewSet(viewsets.ViewSet):
@staticmethod
def clear():
Transaction.objects.all().delete()
User.objects.all().delete()
return "All cleared"
@staticmethod
def check_balance():
result = [user.balance == user.calc_balance() for user in User.objects.all()]
return 'Everything matches' if all(result) else 'Differences detected'
@staticmethod
def list(request):
if settings.DEBUG:
return Response(data={'msg': 'Debug active'})
else:
return Response(status=status.HTTP_501_NOT_IMPLEMENTED)
@staticmethod
def retrieve(request, pk=None) -> Response:
if settings.DEBUG:
commands = {'clear': DebugViewSet.clear,
'check_balance': DebugViewSet.check_balance}
res = commands[pk]()
return Response(data={'msg': res})
else:
return Response(status=status.HTTP_501_NOT_IMPLEMENTED)
|
{
"content_hash": "005d69d8c683629fddcefbce002312e2",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 113,
"avg_line_length": 37.97115384615385,
"alnum_prop": 0.6362370220308939,
"repo_name": "Don42/strichliste-django",
"id": "1cc678ca9e6090d62dff6fdf3469642c8726f024",
"size": "7898",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "strichliste/strichliste/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12191"
}
],
"symlink_target": ""
}
|
from ConfigParser import NoSectionError
import cPickle
from twisted.plugin import IPlugin, getPlugins
from twisted.python import log
from txzmq import ZmqFactory, ZmqEndpoint, ZmqPubConnection
import zope.interface
import zope.interface.verify
from automatron.controller.router import DEFAULT_SUB_ENDPOINT
from automatron.core.event import EventManager
class IAutomatronPluginFactory(IPlugin):
def __call__(controller):
"""
Create a new controller.
"""
class PluginManager(EventManager):
def __init__(self, controller):
super(PluginManager, self).__init__(controller)
self.load_plugins()
try:
config = self.controller.config_file.items('router')
except NoSectionError:
config = {}
zmq_factory = ZmqFactory()
sub_endpoint = ZmqEndpoint('connect', config.get('sub-endpoint', DEFAULT_SUB_ENDPOINT))
self.zmq_pub = ZmqPubConnection(zmq_factory, sub_endpoint)
def load_plugins(self):
plugin_classes = list(getPlugins(IAutomatronPluginFactory))
for plugin_class in plugin_classes:
try:
zope.interface.verify.verifyObject(IAutomatronPluginFactory, plugin_class)
except (zope.interface.verify.BrokenImplementation, zope.interface.verify.BrokenMethodImplementation) as e:
log.err(e, 'Plugin %s is broken' % plugin_class.__name__)
continue
self.register_event_handler(plugin_class(self.controller))
def emit(self, event, *args):
tag = '%s.%s' % (event.interface.getName(), event.getName())
self.zmq_pub.publish(cPickle.dumps(args), tag)
def emit_internal(self, event, *args):
tag = '%s.%s' % (event.interface.getName(), event.getName())
self.dispatch_event(tag, *args)
|
{
"content_hash": "ea164040b72f2bb242ba71a8445c0fa7",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 119,
"avg_line_length": 37.979166666666664,
"alnum_prop": 0.6708721886999451,
"repo_name": "automatron/automatron",
"id": "ff0dd6f1f1aa11d08454ddb24d91feff913762d3",
"size": "1823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automatron/backend/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31259"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UniversalCalendar'
db.create_table('gtfs_universalcalendar', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gtfs.Source'], null=True)),
('service', self.gf('django.db.models.fields.related.ForeignKey')(related_name='all_dates', to=orm['gtfs.Service'])),
('date', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('gtfs', ['UniversalCalendar'])
# Adding unique constraint on 'UniversalCalendar', fields ['source', 'service', 'date']
db.create_unique('gtfs_universalcalendar', ['source_id', 'service_id', 'date'])
def backwards(self, orm):
# Removing unique constraint on 'UniversalCalendar', fields ['source', 'service', 'date']
db.delete_unique('gtfs_universalcalendar', ['source_id', 'service_id', 'date'])
# Deleting model 'UniversalCalendar'
db.delete_table('gtfs_universalcalendar')
models = {
'gtfs.agency': {
'Meta': {'unique_together': "(('source', 'agency_id'),)", 'object_name': 'Agency'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.TextField', [], {}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'gtfs.block': {
'Meta': {'unique_together': "(('source', 'block_id'),)", 'object_name': 'Block'},
'block_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.calendar': {
'Meta': {'object_name': 'Calendar'},
'end_date': ('django.db.models.fields.DateField', [], {}),
'friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'service': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gtfs.Service']", 'unique': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gtfs.calendardate': {
'Meta': {'object_name': 'CalendarDate'},
'date': ('django.db.models.fields.DateField', [], {}),
'exception_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calendar_exceptions'", 'to': "orm['gtfs.Service']"})
},
'gtfs.fare': {
'Meta': {'unique_together': "(('source', 'fare_id'),)", 'object_name': 'Fare'},
'currency_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'fare_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.IntegerField', [], {}),
'price': ('django.db.models.fields.FloatField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'transfer_duration': ('django.db.models.fields.IntegerField', [], {}),
'transfers': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'gtfs.farerule': {
'Meta': {'object_name': 'FareRule'},
'contains': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_contains'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'destination': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_destinations'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'fare': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['gtfs.Fare']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_origins'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rules'", 'null': 'True', 'to': "orm['gtfs.Route']"})
},
'gtfs.frequency': {
'Meta': {'object_name': 'Frequency'},
'end_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'end_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'headway_secs': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'frequencies'", 'to': "orm['gtfs.Trip']"})
},
'gtfs.route': {
'Meta': {'unique_together': "(('agency', 'route_id'),)", 'object_name': 'Route'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'routes'", 'null': 'True', 'to': "orm['gtfs.Agency']"}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.TextField', [], {}),
'route_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'route_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'text_color': ('django.db.models.fields.TextField', [], {'max_length': '6', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'})
},
'gtfs.service': {
'Meta': {'unique_together': "(('source', 'service_id'),)", 'object_name': 'Service'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.shape': {
'Meta': {'unique_together': "(('source', 'shape_id'),)", 'object_name': 'Shape'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.contrib.gis.db.models.fields.LineStringField', [], {'null': 'True'}),
'shape_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'gtfs.stop': {
'Meta': {'unique_together': "(('source', 'stop_id'),)", 'object_name': 'Stop'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'location_type': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'parent_station': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_stops'", 'null': 'True', 'to': "orm['gtfs.Stop']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'stop_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zone': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stops'", 'null': 'True', 'to': "orm['gtfs.Zone']"})
},
'gtfs.stoptime': {
'Meta': {'ordering': "('trip', 'stop_sequence')", 'object_name': 'StopTime'},
'arrival_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'arrival_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'departure_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'departure_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'drop_off_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickup_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shape_dist_travelled': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'times'", 'to': "orm['gtfs.Stop']"}),
'stop_headsign': ('django.db.models.fields.TextField', [], {}),
'stop_sequence': ('django.db.models.fields.IntegerField', [], {}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stop_times'", 'to': "orm['gtfs.Trip']"})
},
'gtfs.transfer': {
'Meta': {'object_name': 'Transfer'},
'from_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfers_from'", 'to': "orm['gtfs.Stop']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_transfer_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'to_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfers_to'", 'to': "orm['gtfs.Stop']"}),
'transfer_type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'gtfs.trip': {
'Meta': {'unique_together': "(('service', 'trip_id'), ('route', 'trip_id'))", 'object_name': 'Trip'},
'block': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trips'", 'null': 'True', 'to': "orm['gtfs.Block']"}),
'direction_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'headsign': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trips'", 'to': "orm['gtfs.Route']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trips'", 'to': "orm['gtfs.Service']"}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trips'", 'null': 'True', 'to': "orm['gtfs.Shape']"}),
'short_name': ('django.db.models.fields.TextField', [], {}),
'trip_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'gtfs.universalcalendar': {
'Meta': {'unique_together': "(('source', 'service', 'date'),)", 'object_name': 'UniversalCalendar'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_dates'", 'to': "orm['gtfs.Service']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.zone': {
'Meta': {'unique_together': "(('source', 'zone_id'),)", 'object_name': 'Zone'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'zone_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'})
}
}
complete_apps = ['gtfs']
|
{
"content_hash": "de757b38d90a0f83997d49c3e750e81a",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 166,
"avg_line_length": 73.98461538461538,
"alnum_prop": 0.53954391072295,
"repo_name": "rcoup/traveldash",
"id": "3abd2545fddd4460667dcd824d12fc7bc65ce97a",
"size": "14445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traveldash/gtfs/migrations/0012_auto__add_universalcalendar__add_unique_universalcalendar_source_servi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8656"
},
{
"name": "JavaScript",
"bytes": "10854"
},
{
"name": "Python",
"bytes": "493266"
},
{
"name": "Ruby",
"bytes": "874"
}
],
"symlink_target": ""
}
|
"""
defines class :class:`mdecl_wrapper_t` that allows to work on set of
declarations, as it was one declaration.
The :class:`class <mdecl_wrapper_t>` allows user to not write "for" loops
within the code.
"""
import os
class call_redirector_t(object):
"""Internal class used to call some function of objects"""
def __init__(self, name, decls):
"""creates call_redirector_t instance.
:param name: name of method, to be called on every object in the
`decls` list
:param decls: list of objects
"""
object.__init__(self)
self.name = name
self.decls = decls
def __call__(self, *arguments, **keywords):
"""calls method :attr:`call_redirector_t.name` on every object
within the :attr:`call_redirector_t.decls` list"""
for d in self.decls:
callable_ = getattr(d, self.name)
callable_(*arguments, **keywords)
class mdecl_wrapper_t(object):
"""
multiple declarations class wrapper
The main purpose of this class is to allow an user to work on many
declarations, as they were only one single declaration.
For example, instead of writing `for` loop like the following
.. code-block:: python
for c in global_namespace.classes():
c.attribute = "xxxx"
you can write:
.. code-block:: python
global_namespace.classes().attribute = "xxxx"
The same functionality could be applied on "set" methods too.
"""
def __init__(self, decls):
""":param decls: list of declarations to operate on.
:type decls: list of :class:`declaration wrappers <decl_wrapper_t>`
"""
object.__init__(self)
self.__dict__['declarations'] = decls
def __bool__(self):
return bool(self.declarations)
def __len__(self):
"""returns the number of declarations"""
return len(self.declarations)
def __getitem__(self, index):
"""provides access to declaration"""
return self.declarations[index]
def __iter__(self):
return iter(self.declarations)
def __ensure_attribute(self, name):
invalid_decls = [d for d in self.declarations if not hasattr(d, name)]
sep = os.linesep + ' '
if invalid_decls:
raise RuntimeError((
"Next declarations don't have '%s' attribute: %s")
% (name, sep.join(map(str, invalid_decls))))
def __setattr__(self, name, value):
"""Updates the value of attribute on all declarations.
:param name: name of attribute
:param value: new value of attribute
"""
self.__ensure_attribute(name)
for d in self.declarations:
setattr(d, name, value)
def __getattr__(self, name):
""":param name: name of method
"""
return call_redirector_t(name, self.declarations)
def __contains__(self, item):
return item in self.declarations
def to_list(self):
l = []
for d in self.declarations:
l.append(d)
return l
|
{
"content_hash": "820bf2488f0a025a22766e88df6f8335",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 28.412844036697248,
"alnum_prop": 0.5970293832741362,
"repo_name": "CIBC-Internal/itk",
"id": "abb096645aa3c5ba67f5bd0e8547af213b9f9714",
"size": "3292",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Modules/ThirdParty/pygccxml/src/pygccxml/declarations/mdecl_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "306"
},
{
"name": "C",
"bytes": "30093589"
},
{
"name": "C++",
"bytes": "46793708"
},
{
"name": "CMake",
"bytes": "2111149"
},
{
"name": "CSS",
"bytes": "24960"
},
{
"name": "DIGITAL Command Language",
"bytes": "709"
},
{
"name": "Fortran",
"bytes": "2260380"
},
{
"name": "HTML",
"bytes": "208088"
},
{
"name": "Io",
"bytes": "1833"
},
{
"name": "Java",
"bytes": "28598"
},
{
"name": "Lex",
"bytes": "6877"
},
{
"name": "Makefile",
"bytes": "212859"
},
{
"name": "Objective-C",
"bytes": "49279"
},
{
"name": "Objective-C++",
"bytes": "6591"
},
{
"name": "OpenEdge ABL",
"bytes": "85244"
},
{
"name": "Perl",
"bytes": "18552"
},
{
"name": "Python",
"bytes": "886554"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "Shell",
"bytes": "122388"
},
{
"name": "Tcl",
"bytes": "74786"
},
{
"name": "WebAssembly",
"bytes": "4056"
},
{
"name": "XSLT",
"bytes": "195448"
},
{
"name": "Yacc",
"bytes": "20428"
}
],
"symlink_target": ""
}
|
"""Support for LaMetric time services."""
from __future__ import annotations
from collections.abc import Sequence
from demetriek import (
AlarmSound,
Chart,
LaMetricError,
Model,
Notification,
NotificationIconType,
NotificationPriority,
NotificationSound,
Simple,
Sound,
)
import voluptuous as vol
from homeassistant.const import CONF_DEVICE_ID, CONF_ICON
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from .const import (
CONF_CYCLES,
CONF_DATA,
CONF_ICON_TYPE,
CONF_MESSAGE,
CONF_PRIORITY,
CONF_SOUND,
DOMAIN,
SERVICE_CHART,
SERVICE_MESSAGE,
)
from .coordinator import LaMetricDataUpdateCoordinator
from .helpers import async_get_coordinator_by_device_id
SERVICE_BASE_SCHEMA = vol.Schema(
{
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_CYCLES, default=1): cv.positive_int,
vol.Optional(CONF_ICON_TYPE, default=NotificationIconType.NONE): vol.Coerce(
NotificationIconType
),
vol.Optional(CONF_PRIORITY, default=NotificationPriority.INFO): vol.Coerce(
NotificationPriority
),
vol.Optional(CONF_SOUND): vol.Any(
vol.Coerce(AlarmSound), vol.Coerce(NotificationSound)
),
}
)
SERVICE_MESSAGE_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Required(CONF_MESSAGE): cv.string,
vol.Optional(CONF_ICON): cv.string,
}
)
SERVICE_CHART_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Required(CONF_DATA): vol.All(cv.ensure_list, [vol.Coerce(int)]),
}
)
@callback
def async_setup_services(hass: HomeAssistant) -> None:
"""Set up services for the LaMetric integration."""
async def _async_service_chart(call: ServiceCall) -> None:
"""Send a chart to a LaMetric device."""
coordinator = async_get_coordinator_by_device_id(
hass, call.data[CONF_DEVICE_ID]
)
await async_send_notification(
coordinator, call, [Chart(data=call.data[CONF_DATA])]
)
async def _async_service_message(call: ServiceCall) -> None:
"""Send a message to a LaMetric device."""
coordinator = async_get_coordinator_by_device_id(
hass, call.data[CONF_DEVICE_ID]
)
await async_send_notification(
coordinator,
call,
[
Simple(
icon=call.data.get(CONF_ICON),
text=call.data[CONF_MESSAGE],
)
],
)
hass.services.async_register(
DOMAIN,
SERVICE_CHART,
_async_service_chart,
schema=SERVICE_CHART_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_MESSAGE,
_async_service_message,
schema=SERVICE_MESSAGE_SCHEMA,
)
async def async_send_notification(
coordinator: LaMetricDataUpdateCoordinator,
call: ServiceCall,
frames: Sequence[Chart | Simple],
) -> None:
"""Send a notification to an LaMetric device."""
sound = None
if CONF_SOUND in call.data:
sound = Sound(id=call.data[CONF_SOUND], category=None)
notification = Notification(
icon_type=NotificationIconType(call.data[CONF_ICON_TYPE]),
priority=NotificationPriority(call.data.get(CONF_PRIORITY)),
model=Model(
frames=frames,
cycles=call.data[CONF_CYCLES],
sound=sound,
),
)
try:
await coordinator.lametric.notify(notification=notification)
except LaMetricError as ex:
raise HomeAssistantError("Could not send LaMetric notification") from ex
|
{
"content_hash": "3e549d4ae88ff89e17c4690284ee7908",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 84,
"avg_line_length": 27.83823529411765,
"alnum_prop": 0.6378763866877971,
"repo_name": "w1ll1am23/home-assistant",
"id": "2cbdfff6fd8fe882e69a1fe45b098078db88965d",
"size": "3786",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/lametric/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import random
print(random.randint(0,9))
|
{
"content_hash": "d8a034365d0f6c873101b6a17db895c0",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 14,
"alnum_prop": 0.7619047619047619,
"repo_name": "HarendraSingh22/Python-Guide-for-Beginners",
"id": "4dfeb5c8335a224d7fb7e4a978c2fbeb5a4c743a",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/randomNumber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6480"
}
],
"symlink_target": ""
}
|
import time
from flask import request
from flask_restful import abort
from funcy import project
from peewee import IntegrityError
import urllib2
import json
from redash import models
from redash.permissions import require_permission, require_admin_or_owner, is_admin_or_owner, \
require_permission_or_owner, require_admin
from redash.handlers.base import BaseResource, require_fields, get_object_or_404
from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email
def invite_user(org, inviter, user):
invite_url = invite_link_for_user(user)
send_invite_email(inviter, user, invite_url, org)
return invite_url
def requestUserByEmail(email):
try:
url = urllib2.urlopen('http://slapi.guaguaxiche.com/slapi/redash/getuserbyemail?email=%s' % email)
content = url.read()
return json.loads(content)
except Exception as e:
return {'code': -1, 'message': u'User info query failed.'}
class UserListResource(BaseResource):
@require_permission('list_users')
def get(self):
return [u.to_dict() for u in models.User.all(self.current_org)]
@require_admin
def post(self):
req = request.get_json(force=True)
# require_fields(req, ('name', 'email'))
require_fields(req, ('email', ))
email = req['email'] if '@' in req['email'] else '%s@guaguaxiche.com' % req['email']
user = models.User(org=self.current_org,
# name=req['name'],
email=email,
groups=[self.current_org.default_group.id])
try:
userinfo = requestUserByEmail(email)
if userinfo['code'] == 0:
user.active = True
user.name = userinfo['username']
user.gg_args = {'cities': userinfo['cities']}
else:
abort(400, message=userinfo['message'])
user.save()
except IntegrityError as e:
if "email" in e.message:
abort(400, message='Email already taken.')
abort(500)
self.record_event({
'action': 'create',
'timestamp': int(time.time()),
'object_id': user.id,
'object_type': 'user'
})
invite_url = invite_user(self.current_org, self.current_user, user)
d = user.to_dict()
d['invite_link'] = invite_url
return d
class UserInviteResource(BaseResource):
@require_admin
def post(self, user_id):
user = models.User.get_by_id_and_org(user_id, self.current_org)
invite_url = invite_user(self.current_org, self.current_user, user)
d = user.to_dict()
d['invite_link'] = invite_url
return d
class UserResetPasswordResource(BaseResource):
@require_admin
def post(self, user_id):
user = models.User.get_by_id_and_org(user_id, self.current_org)
reset_link = send_password_reset_email(user)
class UserResource(BaseResource):
def get(self, user_id):
require_permission_or_owner('list_users', user_id)
user = get_object_or_404(models.User.get_by_id_and_org, user_id, self.current_org)
return user.to_dict(with_api_key=is_admin_or_owner(user_id))
def post(self, user_id):
require_admin_or_owner(user_id)
user = models.User.get_by_id_and_org(user_id, self.current_org)
req = request.get_json(True)
params = project(req, ('email', 'name', 'password', 'old_password', 'groups'))
if 'password' in params and 'old_password' not in params:
abort(403, message="Must provide current password to update password.")
if 'old_password' in params and not user.verify_password(params['old_password']):
abort(403, message="Incorrect current password.")
if 'password' in params:
user.hash_password(params.pop('password'))
params.pop('old_password')
if 'groups' in params and not self.current_user.has_permission('admin'):
abort(403, message="Must be admin to change groups membership.")
try:
user.update_instance(**params)
except IntegrityError as e:
if "email" in e.message:
message = "Email already taken."
else:
message = "Error updating record"
abort(400, message=message)
self.record_event({
'action': 'edit',
'timestamp': int(time.time()),
'object_id': user.id,
'object_type': 'user',
'updated_fields': params.keys()
})
return user.to_dict(with_api_key=is_admin_or_owner(user_id))
|
{
"content_hash": "fdfc7b5ea1f5275fb7c30a564c88e216",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 108,
"avg_line_length": 32.9375,
"alnum_prop": 0.6015180265654649,
"repo_name": "guaguadev/redash",
"id": "1df3d6ac0d56c8995a4c063469c4e1433c03ccf2",
"size": "4743",
"binary": false,
"copies": "1",
"ref": "refs/heads/guagua",
"path": "redash/handlers/users.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "239783"
},
{
"name": "HTML",
"bytes": "121423"
},
{
"name": "JavaScript",
"bytes": "279730"
},
{
"name": "Makefile",
"bytes": "955"
},
{
"name": "Nginx",
"bytes": "577"
},
{
"name": "Python",
"bytes": "501609"
},
{
"name": "Ruby",
"bytes": "709"
},
{
"name": "Shell",
"bytes": "43388"
}
],
"symlink_target": ""
}
|
import collections
import ConfigParser
import io
class Error(Exception):
pass
class InvalidConfigError(Error):
"""Indicates an error parsing a GreenPiThumb wiring config."""
pass
class IllegalGpioPinNumberError(Error):
"""Indicates an attempt to parse a GPIO pin with an invalid value."""
class IllegalAdcChannelError(Error):
"""Indicates an attempt to parse an ADC channel with an invalid value."""
class DuplicateGpioPinNumberError(Error):
"""Indicates an attempt to parse a GPIO pin with an invalid value."""
class DuplicateAdcChannelError(Error):
"""Indicates an attempt to parse an ADC channel with an invalid value."""
# Represents GreenPiThumb's Rapsberry Pi GPIO pin configuration.
_GpioPinConfig = collections.namedtuple('_GpioPinConfig', [
'pump', 'dht11', 'soil_moisture_1', 'soil_moisture_2', 'mcp3008_clk',
'mcp3008_dout', 'mcp3008_din', 'mcp3008_cs_shdn'
])
def _validate_gpio_pin_config(gpio_config):
"""Validates a GPIO pin configuration.
Args:
gpio_config: The GPIO configuration object to validate.
Raises:
DuplicateGpioPinNumberError when the same GPIO pin is assigned to
multiple components.
"""
used_pins = set()
for pin in [
gpio_config.pump, gpio_config.dht11, gpio_config.mcp3008_clk,
gpio_config.mcp3008_dout, gpio_config.mcp3008_din,
gpio_config.mcp3008_cs_shdn
]:
if pin in used_pins:
raise DuplicateGpioPinNumberError(
'GPIO pin cannot be assigned to multiple components: %d' % pin)
used_pins.add(pin)
class _AdcChannelConfig(object):
"""Represents GreenPiThumb's ADC channel configuration."""
def __init__(self, soil_moisture_sensor, light_sensor):
self._soil_moisture_sensor = soil_moisture_sensor
self._light_sensor = light_sensor
@property
def soil_moisture_sensor(self):
return self._soil_moisture_sensor
@property
def light_sensor(self):
return self._light_sensor
def _validate_adc_channel_config(adc_config):
"""Validates an ADC channel configuration.
Args:
adc_config: The ADC channel configuration to validate
Raises:
DuplicateAdcChannelError when the same ADC channel is assigned to
multiple components.
"""
if adc_config.soil_moisture_sensor == adc_config.light_sensor:
raise DuplicateAdcChannelError(
'Soil moisture sensor and light sensor cannot have the same ADC '
'channel: %d' % adc_config.soil_moisture_sensor)
class _WiringConfig(object):
"""Represents GreenPiThumb's wiring configuration."""
def __init__(self, gpio_pin_config, adc_channel_config):
self._gpio_pin_config = gpio_pin_config
self._adc_channel_config = adc_channel_config
@property
def gpio_pins(self):
return self._gpio_pin_config
@property
def adc_channels(self):
return self._adc_channel_config
def _parse_gpio_pin(pin_raw):
"""Parses a GPIO pin value from the configuration file.
Parses a GPIO pin value. Must be a valid Raspberry Pi GPIO pin number. Must
be a value from 2 to 27.
Args:
pin_raw: The raw GPIO pin value from the configuration file.
Returns:
The parsed GPIO pin value as an int.
Raises:
IllegalGpioPinNumberError when the value is invalid.
"""
try:
pin = int(pin_raw)
except ValueError:
raise IllegalGpioPinNumberError(
'Invalid GPIO pin: %s. Pin must be a value from 2 to 27. '
'Be sure to use BCM numbering, not BOARD numbering.' % pin_raw)
if not (2 <= pin <= 27):
raise IllegalGpioPinNumberError(
'Invalid GPIO pin: %s. Pin must be a value from 2 to 27. '
'Be sure to use BCM numbering, not BOARD numbering.' % pin_raw)
return pin
def _parse_adc_channel(channel_raw):
"""Parses an ADC channel value from the configuration file.
Parses an ADC channel value. Must be a value from 0 to 7.
Args:
channel_raw: The raw ADC channel value from the configuration file.
Returns:
The parsed channel value as an int.
Raises:
IllegalAdcChannelError when the value is invalid.
"""
try:
channel = int(channel_raw)
except ValueError:
raise IllegalAdcChannelError(
'Invalid ADC channel: %s. Channel must be a value from 0 to 7.' %
channel_raw)
if not (0 <= channel <= 7):
raise IllegalAdcChannelError(
'Invalid ADC channel: %s. Channel must be a value from 0 to 7.' %
channel_raw)
return channel
def parse(config_data):
"""Parse GreenPiThumb wiring configuration from text.
Given the contents of a GreenPiThumb wiring configuration file, parses
the configuration into a wiring config object.
Args:
config_data: The contents of a GreenPiThumb configuration file.
Returns:
A wiring configuration object with the following properties:
* gpio_pins.pump
* gpio_pins.dht11
* gpio_pins.soil_moisture_1
* gpio_pins.soil_moisture_2
* gpio_pins.mcp3008_clk
* gpio_pins.mcp3008_dout
* gpio_pins.mcp3008_din
* gpio_pins.mcp3008_cs_shdn
* adc_channels.soil_moisture_sensor
* adc_channels.light_sensor
"""
raw_parser = ConfigParser.RawConfigParser()
try:
raw_parser.readfp(io.BytesIO(config_data))
gpio_pin_config = _GpioPinConfig(
pump=_parse_gpio_pin(raw_parser.get('gpio_pins', 'pump')),
dht11=_parse_gpio_pin(raw_parser.get('gpio_pins', 'dht11')),
soil_moisture_1=_parse_gpio_pin(
raw_parser.get('gpio_pins', 'soil_moisture_1')),
soil_moisture_2=_parse_gpio_pin(
raw_parser.get('gpio_pins', 'soil_moisture_2')),
mcp3008_clk=_parse_gpio_pin(
raw_parser.get('gpio_pins', 'mcp3008_clk')),
mcp3008_din=_parse_gpio_pin(
raw_parser.get('gpio_pins', 'mcp3008_din')),
mcp3008_dout=_parse_gpio_pin(
raw_parser.get('gpio_pins', 'mcp3008_dout')),
mcp3008_cs_shdn=_parse_gpio_pin(
raw_parser.get('gpio_pins', 'mcp3008_cs_shdn')))
_validate_gpio_pin_config(gpio_pin_config)
adc_channel_config = _AdcChannelConfig(
soil_moisture_sensor=_parse_adc_channel(
raw_parser.get('adc_channels', 'soil_moisture_sensor')),
light_sensor=_parse_adc_channel(
raw_parser.get('adc_channels', 'light_sensor')))
_validate_adc_channel_config(adc_channel_config)
return _WiringConfig(gpio_pin_config, adc_channel_config)
except ConfigParser.Error as ex:
raise InvalidConfigError('Failed to parse wiring config', ex)
|
{
"content_hash": "77be6968cc37019dc3d617523c362ffa",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 79,
"avg_line_length": 32.801886792452834,
"alnum_prop": 0.6371872303710094,
"repo_name": "mtlynch/GreenPiThumb",
"id": "ead257d3839283d7d71dcabd364723ada94aab9b",
"size": "6954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "greenpithumb/wiring_config_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81302"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
}
|
import json
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.core.serializers.json import Serializer as JSONSerializer
from django.core.serializers.python import _get_model
from django.utils import six
from djmoney.models.fields import MoneyField
from djmoney.utils import get_currency_field_name
from moneyed import Money
Serializer = JSONSerializer
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
for obj in json.loads(stream_or_string):
money_fields = {}
fields = {}
Model = _get_model(obj["model"])
for (field_name, field_value) in six.iteritems(obj['fields']):
field = Model._meta.get_field(field_name)
if isinstance(field, MoneyField) and field_value is not None:
money_fields[field_name] = Money(field_value, obj['fields'][get_currency_field_name(field_name)])
else:
fields[field_name] = field_value
obj['fields'] = fields
for obj in PythonDeserializer([obj], **options):
for field, value in money_fields.items():
setattr(obj.object, field, value)
yield obj
except GeneratorExit:
raise
|
{
"content_hash": "49b69a56a0355911832eb184f7bad9cf",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 117,
"avg_line_length": 38.24390243902439,
"alnum_prop": 0.6352040816326531,
"repo_name": "tsouvarev/django-money",
"id": "9b905dc6cbfb3f0b8732cef4f38ffd7affcb12eb",
"size": "1583",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "djmoney/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65353"
},
{
"name": "Shell",
"bytes": "3667"
}
],
"symlink_target": ""
}
|
"""Utilities for handling nodes."""
import itertools as itt
import logging
from itertools import chain
from typing import Set, Tuple, Type, Union
from networkx import relabel_nodes
from ..constants import ANNOTATIONS, CITATION, EVIDENCE, INCREASES, RELATION
from ..dsl import BaseAbundance, BaseEntity, ListAbundance, Reaction
__all__ = [
"flatten_list_abundance",
"list_abundance_cartesian_expansion",
"reaction_cartesian_expansion",
]
logger = logging.getLogger(__name__)
def flatten_list_abundance(node: ListAbundance) -> ListAbundance:
"""Flattens the complex or composite abundance."""
return node.__class__(
list(
chain.from_iterable(
(flatten_list_abundance(member).members if isinstance(member, ListAbundance) else [member])
for member in node.members
)
)
)
def list_abundance_expansion(graph) -> None:
"""Flatten list abundances."""
mapping = {node: flatten_list_abundance(node) for node in graph if isinstance(node, ListAbundance)}
relabel_nodes(graph, mapping, copy=False)
def list_abundance_cartesian_expansion(graph) -> None:
"""Expand all list abundances to simple subject-predicate-object networks."""
for u, v, d in list(graph.edges(data=True)):
if CITATION not in d:
continue
if isinstance(u, ListAbundance) and isinstance(v, ListAbundance):
for u_member, v_member in itt.product(u.members, v.members):
graph.add_qualified_edge(
u_member,
v_member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(u, ListAbundance):
for member in u.members:
graph.add_qualified_edge(
member,
v,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(v, ListAbundance):
for member in v.members:
graph.add_qualified_edge(
u,
member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
_remove_list_abundance_nodes(graph)
def _reaction_cartesian_expansion_unqualified_helper(
graph,
u: BaseEntity,
v: BaseEntity,
d: dict,
) -> None:
"""Help deal with cartesian expansion in unqualified edges."""
if isinstance(u, Reaction) and isinstance(v, Reaction):
enzymes = _get_catalysts_in_reaction(u) | _get_catalysts_in_reaction(v)
for reactant, product in chain(itt.product(u.reactants, u.products), itt.product(v.reactants, v.products)):
if reactant in enzymes or product in enzymes:
continue
graph.add_unqualified_edge(reactant, product, INCREASES)
for product, reactant in itt.product(u.products, u.reactants):
if reactant in enzymes or product in enzymes:
continue
graph.add_unqualified_edge(
product,
reactant,
d[RELATION],
)
elif isinstance(u, Reaction):
enzymes = _get_catalysts_in_reaction(u)
for product in u.products:
# Skip create increases edges between enzymes
if product in enzymes:
continue
# Only add edge between v and reaction if the node is not part of the reaction
# In practice skips hasReactant, hasProduct edges
if v not in u.products and v not in u.reactants:
graph.add_unqualified_edge(product, v, INCREASES)
for reactant in u.reactants:
graph.add_unqualified_edge(reactant, product, INCREASES)
elif isinstance(v, Reaction):
enzymes = _get_catalysts_in_reaction(v)
for reactant in v.reactants:
# Skip create increases edges between enzymes
if reactant in enzymes:
continue
# Only add edge between v and reaction if the node is not part of the reaction
# In practice skips hasReactant, hasProduct edges
if u not in v.products and u not in v.reactants:
graph.add_unqualified_edge(u, reactant, INCREASES)
for product in v.products:
graph.add_unqualified_edge(reactant, product, INCREASES)
def _get_catalysts_in_reaction(reaction: Reaction) -> Set[BaseAbundance]:
"""Return nodes that are both in reactants and reactions in a reaction."""
# TODO replace with reaction.get_catalysts()
return set(reaction.reactants).intersection(reaction.products)
def reaction_cartesian_expansion(graph, accept_unqualified_edges: bool = True) -> None:
"""Expand all reactions to simple subject-predicate-object networks."""
for u, v, d in list(graph.edges(data=True)):
# Deal with unqualified edges
if CITATION not in d and accept_unqualified_edges:
_reaction_cartesian_expansion_unqualified_helper(graph, u, v, d)
continue
if isinstance(u, Reaction) and isinstance(v, Reaction):
catalysts = _get_catalysts_in_reaction(u) | _get_catalysts_in_reaction(v)
for reactant, product in chain(
itt.product(u.reactants, u.products),
itt.product(v.reactants, v.products),
):
if reactant in catalysts or product in catalysts:
continue
graph.add_increases(
reactant,
product,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
for product, reactant in itt.product(u.products, u.reactants):
if reactant in catalysts or product in catalysts:
continue
graph.add_qualified_edge(
product,
reactant,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(u, Reaction):
catalysts = _get_catalysts_in_reaction(u)
for product in u.products:
# Skip create increases edges between enzymes
if product in catalysts:
continue
# Only add edge between v and reaction if the node is not part of the reaction
# In practice skips hasReactant, hasProduct edges
if v not in u.products and v not in u.reactants:
graph.add_increases(
product,
v,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
for reactant in u.reactants:
graph.add_increases(
reactant,
product,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(v, Reaction):
catalysts = _get_catalysts_in_reaction(v)
for reactant in v.reactants:
# Skip create increases edges between enzymes
if reactant in catalysts:
continue
# Only add edge between v and reaction if the node is not part of the reaction
# In practice skips hasReactant, hasProduct edges
if u not in v.products and u not in v.reactants:
graph.add_increases(
u,
reactant,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
for product in v.products:
graph.add_increases(
reactant,
product,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
_remove_reaction_nodes(graph)
def remove_reified_nodes(graph) -> None:
"""Remove complex nodes."""
_remove_list_abundance_nodes(graph)
_remove_reaction_nodes(graph)
def _remove_list_abundance_nodes(graph):
_remove_typed_nodes(graph, ListAbundance)
def _remove_reaction_nodes(graph):
_remove_typed_nodes(graph, Reaction)
def _remove_typed_nodes(
graph,
cls: Union[Type[BaseEntity], Tuple[Type[BaseEntity], ...]],
) -> None:
graph.remove_nodes_from({node for node in graph if isinstance(node, cls)})
|
{
"content_hash": "1c42f4b0d9b41a210b7d5f361bbe622f",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 115,
"avg_line_length": 36.034749034749034,
"alnum_prop": 0.5549126754526947,
"repo_name": "pybel/pybel",
"id": "58cde2ae6c481749f9af2bff0b055b0e49392fa4",
"size": "9358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pybel/struct/node_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "880"
},
{
"name": "JavaScript",
"bytes": "9473"
},
{
"name": "Jupyter Notebook",
"bytes": "52170"
},
{
"name": "Python",
"bytes": "1475429"
}
],
"symlink_target": ""
}
|
import collections.abc
from html.parser import HTMLParser
from mastodon import StreamListener, MastodonError, Mastodon as MastodonAPI
from collections import OrderedDict
from queue import Queue
from kovot import Message, Response, Speaker
from logging import Logger
from typing import Iterator, List, Tuple
__all__ = ['Mastodon']
TOOT_LIMIT: int = 500
CACHE_SIZE: int = 16
class Mastodon(collections.abc.Iterable):
"""
An implementation of Stream for communicating by Mastodon.
Attributes
----------
logger: logging.logger
A logger instance dealing with messages sent by this instance.
api: mastodon.Mastodon
A Mastodon instance wrapping APIs of the connected Mastodon instance.
"""
def __init__(
self,
logger: Logger,
client_id: str,
client_secret: str,
access_token: str,
api_base_url: str,
reply_everyone: bool = False
):
self.logger = logger
self.api = MastodonAPI(
client_id,
client_secret,
access_token,
api_base_url
)
self.myself = self.api.account_verify_credentials()
self.reply_everyone = reply_everyone
self._cached = dict()
def __iter__(self) -> Iterator:
listener = _TootListener()
self.api.stream_user(listener, run_async=True)
self._cached = listener.cache
return iter(listener)
def post(self, response: Response) -> bool:
self.logger.info("Trying to toot: " + response.text)
if response.message is not None and response.message.id_ in self._cached:
in_reply_to = self._cached[response.message.id_]
if self.reply_everyone:
for user in reversed(in_reply_to['mentions']):
if user['id'] != self.myself['id']:
response.text = '@%s %s' % (user['acct'], response.text)
response.text = '@%s %s' % (in_reply_to['account']['acct'], response.text)
if len(response.text) > TOOT_LIMIT:
self.logger.error('Length of given status has exceeded the limit: %d' % len(response.text))
return False
try:
if response.message is None:
result = self.api.status_post(response.text)
else:
result = self.api.status_post(response.text, in_reply_to_id=response.message.id_)
self.logger.info('Updated: ' + str(result))
except MastodonError:
self.logger.error('An API error has occured.')
return False
return True
class _CleansingParser(HTMLParser):
"""
This class provides a function which removes HTML tags appearing in toots.
"""
def __init__(self, *args, **kwargs):
super(_CleansingParser, self).__init__(*args, **kwargs)
self.sb: List[str] = []
self._skip: bool = False
def __str__(self) -> str:
return ''.join(self.sb)
def _append(self, text: str) -> None:
if self._skip:
return
self.sb.append(text)
def handle_starttag(self, tag: str, attrs: List[Tuple[str, str]]) -> None:
self._strip = False
attr = {name: value for name, value in attrs}
classes = attr['class'].split() if 'class' in attr else []
if tag == 'br':
self._append('\n')
if tag == 'a' and 'mention' in classes:
self._skip = True
def handle_endtag(self, tag: str) -> None:
if self._skip and tag == 'a':
self._skip = False
self._strip = True
def handle_data(self, data: str) -> None:
if self._strip:
data = data.lstrip()
self._strip = False
self._append(data)
class _TootListener(collections.abc.Iterable, StreamListener):
"""
A listener collecting only toots related to the connected account, that is, mentions sent to the account.
This listener **cannot** generate multiple iterators due to some implemental issue for duplicating them.
Attributes
----------
queue: queue.Queue
A queue for processing collected toots in order.
cache: collections.OrderedDict
This attribute caches recent raw results given by Mastodon API.
"""
def __init__(self):
self.queue = Queue()
self.cache = OrderedDict()
def __iter__(self) -> Iterator[Message]:
while True:
raw = self.queue.get()
self.cache[raw['id']] = raw
while len(self.cache) > CACHE_SIZE:
self.cache.popitem(last=False)
content = self._cleanse_html(raw['content'])
speaker = Speaker(raw['account']['display_name'])
yield Message(content, id_=raw['id'], speaker=speaker)
def on_notification(self, notification: dict) -> None:
if notification['type'] == 'mention':
self.queue.put(notification['status'])
@staticmethod
def _cleanse_html(html: str) -> str:
parser = _CleansingParser(convert_charrefs=True)
parser.feed(html)
return str(parser)
|
{
"content_hash": "401ec493501930078dc1fbc8c8bc168b",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 109,
"avg_line_length": 34.06622516556291,
"alnum_prop": 0.5880637636080871,
"repo_name": "kenkov/kovot",
"id": "da5b9bdfca9b1e5f87ef0efdec8877a0e11563e8",
"size": "5191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kovot/stream/mastodon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "131"
},
{
"name": "Python",
"bytes": "25516"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
}
|
from random import random
import requests
import ray
from ray import serve
ray.init(num_cpus=8)
serve.start()
# Our pipeline will be structured as follows:
# - Input comes in, the composed model sends it to model_one
# - model_one outputs a random number between 0 and 1, if the value is
# greater than 0.5, then the data is sent to model_two
# - otherwise, the data is returned to the user.
# Let's define two models that just print out the data they received.
@serve.deployment
def model_one(data):
print("Model 1 called with data ", data)
return random()
model_one.deploy()
@serve.deployment
def model_two(data):
print("Model 2 called with data ", data)
return data
model_two.deploy()
# max_concurrent_queries is optional. By default, if you pass in an async
# function, Ray Serve sets the limit to a high number.
@serve.deployment(max_concurrent_queries=10, route_prefix="/composed")
class ComposedModel:
def __init__(self):
self.model_one = model_one.get_handle()
self.model_two = model_two.get_handle()
# This method can be called concurrently!
async def __call__(self, starlette_request):
data = await starlette_request.body()
score = await self.model_one.remote(data=data)
if score > 0.5:
result = await self.model_two.remote(data=data)
result = {"model_used": 2, "score": score}
else:
result = {"model_used": 1, "score": score}
return result
ComposedModel.deploy()
for _ in range(5):
resp = requests.get("http://127.0.0.1:8000/composed", data="hey!")
print(resp.json())
# Output
# {'model_used': 2, 'score': 0.6250189863595503}
# {'model_used': 1, 'score': 0.03146855349621436}
# {'model_used': 2, 'score': 0.6916977560006987}
# {'model_used': 2, 'score': 0.8169693450866928}
# {'model_used': 2, 'score': 0.9540681979573862}
|
{
"content_hash": "c58bf9ff20a379e3b3a48fc6857de2d6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 73,
"avg_line_length": 27.691176470588236,
"alnum_prop": 0.6675517790759427,
"repo_name": "pcmoritz/ray-1",
"id": "0e4a15b459f2a37a5e1733a33aefc4899c1445b1",
"size": "1883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/serve/examples/doc/snippet_model_composition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
import collections
import time
import threading
from ._exceptions import ProtonException, ConnectionException, LinkException, Timeout
from ._delivery import Delivery
from ._endpoints import Endpoint, Link
from ._events import Handler
from ._url import Url
from ._reactor import Container
from ._handlers import MessagingHandler, IncomingMessageHandler
from typing import Callable, Optional, Union, TYPE_CHECKING, List
try:
from typing import Literal
except ImportError:
class Literal:
def __class_getitem__(cls, item):
pass
if TYPE_CHECKING:
from ._transport import SSLDomain
from ._reactor import Backoff, SenderOption, ReceiverOption, Connection
from ._endpoints import Receiver, Sender, Terminus
from ._events import Event
from ._message import Message
class BlockingLink:
def __init__(self, connection: 'BlockingConnection', link: Union['Sender', 'Receiver']) -> None:
self.connection = connection
self.link = link
self.connection.wait(lambda: not (self.link.state & Endpoint.REMOTE_UNINIT),
msg="Opening link %s" % link.name)
self._checkClosed()
def _waitForClose(self, timeout=1):
try:
self.connection.wait(lambda: self.link.state & Endpoint.REMOTE_CLOSED,
timeout=timeout,
msg="Opening link %s" % self.link.name)
except Timeout as e:
pass
self._checkClosed()
def _checkClosed(self) -> None:
if self.link.state & Endpoint.REMOTE_CLOSED:
self.link.close()
if not self.connection.closing:
raise LinkDetached(self.link)
def close(self):
"""
Close the link.
"""
self.link.close()
self.connection.wait(lambda: not (self.link.state & Endpoint.REMOTE_ACTIVE),
msg="Closing link %s" % self.link.name)
# Access to other link attributes.
def __getattr__(self, name):
return getattr(self.link, name)
class SendException(ProtonException):
"""
Exception used to indicate an exceptional state/condition on a send request.
:param state: The delivery state which caused the exception.
"""
def __init__(self, state: int) -> None:
self.state = state
def _is_settled(delivery: Delivery) -> bool:
return delivery.settled or delivery.link.snd_settle_mode == Link.SND_SETTLED
class BlockingSender(BlockingLink):
"""
A synchronous sender wrapper. This is typically created by calling
:meth:`BlockingConnection.create_sender`.
"""
def __init__(self, connection: 'BlockingConnection', sender: 'Sender') -> None:
super(BlockingSender, self).__init__(connection, sender)
if self.link.target and self.link.target.address and self.link.target.address != self.link.remote_target.address:
# this may be followed by a detach, which may contain an error condition, so wait a little...
self._waitForClose()
# ...but close ourselves if peer does not
self.link.close()
raise LinkException("Failed to open sender %s, target does not match" % self.link.name)
def send(self, msg, timeout=False, error_states=None):
"""
Blocking send which will return only when the send is complete
and the message settled.
:param timeout: Timeout in seconds. If ``False``, the value of ``timeout`` used in the
constructor of the :class:`BlockingConnection` object used in the constructor will be used.
If ``None``, there is no timeout. Any other value is treated as a timeout in seconds.
:type timeout: ``None``, ``False``, ``float``
:param error_states: List of delivery flags which when present in Delivery object
will cause a :class:`SendException` exception to be raised. If ``None``, these
will default to a list containing :const:`proton.Delivery.REJECTED` and :const:`proton.Delivery.RELEASED`.
:type error_states: ``list``
:return: Delivery object for this message.
:rtype: :class:`proton.Delivery`
"""
delivery = self.link.send(msg)
self.connection.wait(lambda: _is_settled(delivery), msg="Sending on sender %s" % self.link.name,
timeout=timeout)
if delivery.link.snd_settle_mode != Link.SND_SETTLED:
delivery.settle()
bad = error_states
if bad is None:
bad = [Delivery.REJECTED, Delivery.RELEASED]
if delivery.remote_state in bad:
raise SendException(delivery.remote_state)
return delivery
class Fetcher(MessagingHandler):
"""
A message handler for blocking receivers.
:param connection:
:type connection: :class:
:param prefetch:
:type prefetch:
"""
def __init__(self, connection, prefetch):
super(Fetcher, self).__init__(prefetch=prefetch, auto_accept=False)
self.connection = connection
self.incoming = collections.deque([])
self.unsettled = collections.deque([])
def on_message(self, event: 'Event') -> None:
self.incoming.append((event.message, event.delivery))
self.connection.container.yield_() # Wake up the wait() loop to handle the message.
def on_link_error(self, event: 'Event') -> None:
if event.link.state & Endpoint.LOCAL_ACTIVE:
event.link.close()
if not self.connection.closing:
raise LinkDetached(event.link)
def on_connection_error(self, event: 'Event') -> None:
if not self.connection.closing:
raise ConnectionClosed(event.connection)
@property
def has_message(self) -> int:
"""
The number of messages that have been received and are waiting to be
retrieved with :meth:`pop`.
"""
return len(self.incoming)
def pop(self) -> 'Message':
"""
Get the next available incoming message. If the message is unsettled, its
delivery object is moved onto the unsettled queue, and can be settled with
a call to :meth:`settle`.
"""
message, delivery = self.incoming.popleft()
if not delivery.settled:
self.unsettled.append(delivery)
return message
def settle(self, state: Optional[int] = None) -> None:
"""
Settle the next message previously taken with :meth:`pop`.
:param state:
:type state:
"""
delivery = self.unsettled.popleft()
if state:
delivery.update(state)
delivery.settle()
class BlockingReceiver(BlockingLink):
"""
A synchronous receiver wrapper. This is typically created by calling
:meth:`BlockingConnection.create_receiver`.
"""
def __init__(
self,
connection: 'BlockingConnection',
receiver: 'Receiver',
fetcher: Optional[Fetcher],
credit: int = 1
) -> None:
super(BlockingReceiver, self).__init__(connection, receiver)
if self.link.source and self.link.source.address and self.link.source.address != self.link.remote_source.address:
# this may be followed by a detach, which may contain an error condition, so wait a little...
self._waitForClose()
# ...but close ourselves if peer does not
self.link.close()
raise LinkException("Failed to open receiver %s, source does not match" % self.link.name)
if credit:
receiver.flow(credit)
self.fetcher = fetcher
self.container = connection.container
def __del__(self):
self.fetcher = None
# The next line causes a core dump if the Proton-C reactor finalizes
# first. The self.container reference prevents out of order reactor
# finalization. It may not be set if exception in BlockingLink.__init__
if hasattr(self, "container"):
self.link.handler = None # implicit call to reactor
def receive(self, timeout=False):
"""
Blocking receive call which will return only when a message is received or
a timeout (if supplied) occurs.
:param timeout: Timeout in seconds. If ``False``, the value of ``timeout`` used in the
constructor of the :class:`BlockingConnection` object used in the constructor will be used.
If ``None``, there is no timeout. Any other value is treated as a timeout in seconds.
:type timeout: ``None``, ``False``, ``float``
"""
if not self.fetcher:
raise Exception("Can't call receive on this receiver as a handler was not provided")
if not self.link.credit:
self.link.flow(1)
self.connection.wait(lambda: self.fetcher.has_message, msg="Receiving on receiver %s" % self.link.name,
timeout=timeout)
return self.fetcher.pop()
def accept(self) -> None:
"""
Accept and settle the received message. The delivery is set to
:const:`proton.Delivery.ACCEPTED`.
"""
self.settle(Delivery.ACCEPTED)
def reject(self) -> None:
"""
Reject the received message. The delivery is set to
:const:`proton.Delivery.REJECTED`.
"""
self.settle(Delivery.REJECTED)
def release(self, delivered: bool = True) -> None:
"""
Release the received message.
:param delivered: If ``True``, the message delivery is being set to
:const:`proton.Delivery.MODIFIED`, ie being returned to the sender
and annotated. If ``False``, the message is returned without
annotations and the delivery set to :const:`proton.Delivery.RELEASED`.
"""
if delivered:
self.settle(Delivery.MODIFIED)
else:
self.settle(Delivery.RELEASED)
def settle(self, state=None):
"""
Settle any received messages.
:param state: Update the delivery of all unsettled messages with the
supplied state, then settle them.
:type state: ``None`` or a valid delivery state (see
:class:`proton.Delivery`.
"""
if not self.fetcher:
raise Exception("Can't call accept/reject etc on this receiver as a handler was not provided")
self.fetcher.settle(state)
class LinkDetached(LinkException):
"""
The exception raised when the remote peer unexpectedly closes a link in a blocking
context, or an unexpected link error occurs.
:param link: The link which closed unexpectedly.
"""
def __init__(self, link: Link) -> None:
self.link = link
if link.is_sender:
txt = "sender %s to %s closed" % (link.name, link.target.address)
else:
txt = "receiver %s from %s closed" % (link.name, link.source.address)
if link.remote_condition:
txt += " due to: %s" % link.remote_condition
self.condition = link.remote_condition.name
else:
txt += " by peer"
self.condition = None
super(LinkDetached, self).__init__(txt)
class ConnectionClosed(ConnectionException):
"""
The exception raised when the remote peer unexpectedly closes a connection in a blocking
context, or an unexpected connection error occurs.
:param connection: The connection which closed unexpectedly.
"""
def __init__(self, connection: 'Connection') -> None:
self.connection = connection
txt = "Connection %s closed" % connection.hostname
if connection.remote_condition:
txt += " due to: %s" % connection.remote_condition
self.condition = connection.remote_condition.name
else:
txt += " by peer"
self.condition = None
super(ConnectionClosed, self).__init__(txt)
class BlockingConnection(Handler):
"""
A synchronous style connection wrapper.
This object's implementation uses OS resources. To ensure they
are released when the object is no longer in use, make sure that
object operations are enclosed in a try block and that close() is
always executed on exit.
:param url: The connection URL.
:type url: ``str``
:param timeout: Connection timeout in seconds. If ``None``, defaults to 60 seconds.
:type timeout: ``None`` or float
:param container: Container to process the events on the connection. If ``None``,
a new :class:`proton.Container` will be created.
:param ssl_domain:
:param heartbeat: A value in seconds indicating the desired frequency of
heartbeats used to test the underlying socket is alive.
:type heartbeat: ``float``
:param urls: A list of connection URLs to try to connect to.
:type urls: ``list``[``str``]
:param kwargs: Container keyword arguments. See :class:`proton.reactor.Container`
for a list of the valid kwargs.
"""
def __init__(self, url=None, timeout=None, container=None, ssl_domain=None, heartbeat=None, urls=None,
reconnect=None, **kwargs):
self.disconnected = False
self.timeout = timeout or 60
self.container = container or Container()
self.container.timeout = self.timeout
self.container.start()
self.conn = None
self.closing = False
# Preserve previous behaviour if neither reconnect nor urls are supplied
if url is not None and urls is None and reconnect is None:
reconnect = False
url = Url(url).defaults()
failed = True
try:
self.conn = self.container.connect(url=url, handler=self, ssl_domain=ssl_domain, reconnect=reconnect,
heartbeat=heartbeat, urls=urls, **kwargs)
self.wait(lambda: not (self.conn.state & Endpoint.REMOTE_UNINIT),
msg="Opening connection")
failed = False
finally:
if failed and self.conn:
self.close()
def create_sender(
self,
address: Optional[str],
handler: Optional[Handler] = None,
name: Optional[str] = None,
options: Optional[Union['SenderOption', List['SenderOption']]] = None
) -> BlockingSender:
"""
Create a blocking sender.
:param address: Address of target node.
:type address: ``str``
:param handler: Event handler for this sender.
:type handler: Any child class of :class:`proton.Handler`
:param name: Sender name.
:type name: ``str``
:param options: A single option, or a list of sender options
:type options: :class:`SenderOption` or [SenderOption, SenderOption, ...]
:return: New blocking sender instance.
:rtype: :class:`BlockingSender`
"""
return BlockingSender(self, self.container.create_sender(self.conn, address, name=name, handler=handler,
options=options))
def create_receiver(
self,
address: Optional[str] = None,
credit: Optional[int] = None,
dynamic: bool = False,
handler: Optional[Handler] = None,
name: Optional[str] = None,
options: Optional[Union['ReceiverOption', List['ReceiverOption']]] = None
) -> BlockingReceiver:
"""
Create a blocking receiver.
:param address: Address of source node.
:param credit: Initial link flow credit. If not set, will default to 1.
:param dynamic: If ``True``, indicates dynamic creation of the receiver.
:param handler: Event handler for this receiver.
:param name: Receiver name.
:param options: A single option, or a list of receiver options
:return: New blocking receiver instance.
"""
prefetch = credit
if handler:
fetcher = None
if prefetch is None:
prefetch = 1
else:
fetcher = Fetcher(self, credit)
return BlockingReceiver(
self,
self.container.create_receiver(self.conn, address, name=name, dynamic=dynamic, handler=handler or fetcher,
options=options), fetcher, credit=prefetch)
def close(self) -> None:
"""
Close the connection.
"""
# TODO: provide stronger interrupt protection on cleanup. See PEP 419
if self.closing:
return
self.closing = True
self.container.errors = []
try:
if self.conn:
self.conn.close()
self.wait(lambda: not (self.conn.state & Endpoint.REMOTE_ACTIVE),
msg="Closing connection")
if self.conn.transport:
# Close tail to force transport cleanup without waiting/hanging for peer close frame.
self.conn.transport.close_tail()
finally:
self.conn.free()
# Nothing left to block on. Allow reactor to clean up.
self.run()
if self.conn:
self.conn.handler = None # break cyclical reference
self.conn = None
self.container.stop_events()
self.container = None
@property
def url(self) -> str:
"""
The address for this connection.
"""
return self.conn and self.conn.connected_address
def _is_closed(self) -> int:
return self.conn.state & (Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED)
def run(self) -> None:
"""
Hand control over to the event loop (e.g. if waiting indefinitely for incoming messages)
"""
while self.container.process():
pass
self.container.stop()
self.container.process()
def wait(self, condition, timeout=False, msg=None):
"""
Process events until ``condition()`` returns ``True``.
:param condition: Condition which determines when the wait will end.
:type condition: Function which returns ``bool``
:param timeout: Timeout in seconds. If ``False``, the value of ``timeout`` used in the
constructor of this object will be used. If ``None``, there is no timeout. Any other
value is treated as a timeout in seconds.
:type timeout: ``None``, ``False``, ``float``
:param msg: Context message for :class:`proton.Timeout` exception
:type msg: ``str``
"""
if timeout is False:
timeout = self.timeout
if timeout is None:
while not condition() and not self.disconnected:
self.container.process()
else:
container_timeout = self.container.timeout
self.container.timeout = timeout
try:
deadline = time.time() + timeout
while not condition() and not self.disconnected:
self.container.process()
if deadline < time.time():
txt = "Connection %s timed out" % self.url
if msg:
txt += ": " + msg
raise Timeout(txt)
finally:
self.container.timeout = container_timeout
if self.disconnected and not self._is_closed():
raise ConnectionException(
"Connection %s disconnected: %s" % (self.url, self.disconnected))
def on_link_remote_close(self, event: 'Event') -> None:
"""
Event callback for when the remote terminus closes.
"""
if event.link.state & Endpoint.LOCAL_ACTIVE:
event.link.close()
if not self.closing:
raise LinkDetached(event.link)
def on_connection_remote_close(self, event: 'Event') -> None:
"""
Event callback for when the link peer closes the connection.
"""
if event.connection.state & Endpoint.LOCAL_ACTIVE:
event.connection.close()
if not self.closing:
raise ConnectionClosed(event.connection)
def on_transport_tail_closed(self, event: 'Event') -> None:
self.on_transport_closed(event)
def on_transport_head_closed(self, event: 'Event') -> None:
self.on_transport_closed(event)
def on_transport_closed(self, event: 'Event') -> None:
if not self.closing:
self.disconnected = event.transport.condition or "unknown"
class AtomicCount:
def __init__(self, start: int = 0, step: int = 1) -> None:
"""Thread-safe atomic counter. Start at start, increment by step."""
self.count, self.step = start, step
self.lock = threading.Lock()
def next(self) -> int:
"""Get the next value"""
self.lock.acquire()
self.count += self.step
result = self.count
self.lock.release()
return result
class SyncRequestResponse(IncomingMessageHandler):
"""
Implementation of the synchronous request-response (aka RPC) pattern.
A single instance can send many requests to the same or different
addresses.
:param connection: Connection for requests and responses.
:param address: Address for all requests. If not specified, each request
must have the address property set. Successive messages may have
different addresses.
"""
correlation_id = AtomicCount()
def __init__(self, connection: BlockingConnection, address: Optional[str] = None) -> None:
super(SyncRequestResponse, self).__init__()
self.connection = connection
self.address = address
self.sender = self.connection.create_sender(self.address)
# dynamic=true generates a unique address dynamically for this receiver.
# credit=1 because we want to receive 1 response message initially.
self.receiver = self.connection.create_receiver(None, dynamic=True, credit=1, handler=self)
self.response = None
def call(self, request: 'Message') -> 'Message':
"""
Send a request message, wait for and return the response message.
:param request: Request message. If ``self.address`` is not set the
request message address must be set and will be used.
"""
if not self.address and not request.address:
raise ValueError("Request message has no address: %s" % request)
request.reply_to = self.reply_to
request.correlation_id = correlation_id = str(self.correlation_id.next())
self.sender.send(request)
def wakeup():
return self.response and (self.response.correlation_id == correlation_id)
self.connection.wait(wakeup, msg="Waiting for response")
response = self.response
self.response = None # Ready for next response.
self.receiver.flow(1) # Set up credit for the next response.
return response
@property
def reply_to(self) -> str:
"""
The dynamic address of our receiver.
"""
return self.receiver.remote_source.address
def on_message(self, event: 'Event') -> None:
"""
Called when we receive a message for our receiver.
:param event: The event which occurs when a message is received.
"""
self.response = event.message
self.connection.container.yield_() # Wake up the wait() loop to handle the message.
|
{
"content_hash": "227b8e14bb11ca1300fd89215a4e90f6",
"timestamp": "",
"source": "github",
"line_count": 614,
"max_line_length": 121,
"avg_line_length": 38.60749185667753,
"alnum_prop": 0.6076355199325036,
"repo_name": "ChugR/qpid-proton",
"id": "f14db9f109a37dbd8225c335ee798b5b29cbbbc6",
"size": "24495",
"binary": false,
"copies": "1",
"ref": "refs/heads/tls2_wip",
"path": "python/proton/_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1731812"
},
{
"name": "C++",
"bytes": "795366"
},
{
"name": "CMake",
"bytes": "113200"
},
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Go",
"bytes": "305659"
},
{
"name": "Objective-C",
"bytes": "6108"
},
{
"name": "Python",
"bytes": "654425"
},
{
"name": "Ruby",
"bytes": "397997"
},
{
"name": "Shell",
"bytes": "14583"
}
],
"symlink_target": ""
}
|
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class DatabaseAccountsOperations(object):
"""DatabaseAccountsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Version of the API to be used with the client request. The current version is 2015-04-08. Constant value: "2015-04-08".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-04-08"
self.config = config
def get(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the properties of an existing Azure DocumentDB database
account.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabaseAccount
<azure.mgmt.documentdb.models.DatabaseAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def patch(
self, resource_group_name, account_name, tags, custom_headers=None, raw=False, **operation_config):
"""Patches the properties of an existing Azure DocumentDB database
account.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param tags:
:type tags: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DatabaseAccount
<azure.mgmt.documentdb.models.DatabaseAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
update_parameters = models.DatabaseAccountPatchParameters(tags=tags)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(update_parameters, 'DatabaseAccountPatchParameters')
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def create_or_update(
self, resource_group_name, account_name, create_update_parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates an Azure DocumentDB database account.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param create_update_parameters: The parameters to provide for the
current database account.
:type create_update_parameters:
:class:`DatabaseAccountCreateUpdateParameters
<azure.mgmt.documentdb.models.DatabaseAccountCreateUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DatabaseAccount
<azure.mgmt.documentdb.models.DatabaseAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(create_update_parameters, 'DatabaseAccountCreateUpdateParameters')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing Azure DocumentDB database account.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def failover_priority_change(
self, resource_group_name, account_name, failover_policies=None, custom_headers=None, raw=False, **operation_config):
"""Changes the failover priority for the Azure DocumentDB database
account. A failover priority of 0 indicates a write region. The maximum
value for a failover priority = (total number of regions - 1). Failover
priority values must be unique for each of the regions in which the
database account exists.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param failover_policies: List of failover policies.
:type failover_policies: list of :class:`FailoverPolicy
<azure.mgmt.documentdb.models.FailoverPolicy>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
failover_parameters = models.FailoverPolicies(failover_policies=failover_policies)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/failoverPriorityChange'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(failover_parameters, 'FailoverPolicies')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the Azure DocumentDB database accounts available under the
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabaseAccountPaged
<azure.mgmt.documentdb.models.DatabaseAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/databaseAccounts'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DatabaseAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DatabaseAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the Azure DocumentDB database accounts available under the
given resource group.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabaseAccountPaged
<azure.mgmt.documentdb.models.DatabaseAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DatabaseAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DatabaseAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the access keys for the specified Azure DocumentDB database
account.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabaseAccountListKeysResult
<azure.mgmt.documentdb.models.DatabaseAccountListKeysResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listKeys'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountListKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_connection_strings(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the connection strings for the specified Azure DocumentDB
database account.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabaseAccountListConnectionStringsResult
<azure.mgmt.documentdb.models.DatabaseAccountListConnectionStringsResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listConnectionStrings'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountListConnectionStringsResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_read_only_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the read-only access keys for the specified Azure DocumentDB
database account.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabaseAccountListReadOnlyKeysResult
<azure.mgmt.documentdb.models.DatabaseAccountListReadOnlyKeysResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/readonlykeys'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseAccountListReadOnlyKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, account_name, key_kind, custom_headers=None, raw=False, **operation_config):
"""Regenerates an access key for the specified Azure DocumentDB database
account.
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param account_name: DocumentDB database account name.
:type account_name: str
:param key_kind: The access key to regenerate. Possible values
include: 'primary', 'secondary', 'primaryReadonly',
'secondaryReadonly'
:type key_kind: str or :class:`KeyKind
<azure.mgmt.documentdb.models.KeyKind>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
key_to_regenerate = models.DatabaseAccountRegenerateKeyParameters(key_kind=key_kind)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/regenerateKey'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_to_regenerate, 'DatabaseAccountRegenerateKeyParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def check_name_exists(
self, account_name, custom_headers=None, raw=False, **operation_config):
"""Checks that the Azure DocumentDB account name already exists. A valid
account name may contain only lowercase letters, numbers, and the '-'
character, and must be between 3 and 50 characters.
:param account_name: DocumentDB database account name.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/providers/Microsoft.DocumentDB/databaseAccountNames/{accountName}'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = (response.status_code == 200)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
{
"content_hash": "526eebf03fee478bfc8e5b37d156a0ba",
"timestamp": "",
"source": "github",
"line_count": 907,
"max_line_length": 168,
"avg_line_length": 46.99779492833517,
"alnum_prop": 0.6451075609355573,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "2bb39b1f6839c8dfc8fbd165cd4df7840f018d2c",
"size": "43101",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "azure-mgmt-documentdb/azure/mgmt/documentdb/operations/database_accounts_operations.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
import re
from pathlib import Path
import nox
@nox.session(python=["3.6", "3.7", "3.8", "3.9", "3.10", "3.11"])
def update_python_dependencies(session):
session.install("pip-tools")
session.run(
"pip-compile",
"--generate-hashes",
"requirements.in",
"--allow-unsafe",
"--upgrade",
"--output-file",
f"docker/build_scripts/requirements{session.python}.txt",
)
@nox.session(python="3.9")
def update_python_tools(session):
session.install("pip-tools")
session.run(
"pip-compile",
"--generate-hashes",
"requirements-base-tools.in",
"--upgrade",
"--output-file",
"docker/build_scripts/requirements-base-tools.txt",
)
tools = Path("requirements-tools.in").read_text().split("\n")
for tool in tools:
if tool.strip() == "":
continue
tmp_file = Path(session.create_tmp()) / f"{tool}.in"
tmp_file.write_text(f"{tool}\n")
session.run(
"pip-compile",
"--generate-hashes",
str(tmp_file),
"--upgrade",
"--output-file",
f"docker/build_scripts/requirements-tools/{tool}",
)
@nox.session(python="3.9", reuse_venv=True)
def update_native_dependencies(session):
session.install("lastversion!=1.6.0,!=2.0.0", "packaging", "requests")
session.run("python", "update_native_dependencies.py", *session.posargs)
|
{
"content_hash": "ff3d9aae8c04e5b746af0543dd1031b9",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 28.647058823529413,
"alnum_prop": 0.5687885010266941,
"repo_name": "pypa/manylinux",
"id": "1f7f165ec4f909863f5cf00e25c4b3df9e52efa2",
"size": "1461",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "noxfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "366"
},
{
"name": "CMake",
"bytes": "472"
},
{
"name": "Dockerfile",
"bytes": "7339"
},
{
"name": "Python",
"bytes": "14433"
},
{
"name": "Shell",
"bytes": "46366"
}
],
"symlink_target": ""
}
|
from statistics import mean
from .items import RESONATOR_ENERGY
class Portal(object):
def __init__(self):
self.resonators = [None] * 8
self.mods = [None] * 4
self._installed_multi_hacks = []
self._installed_heat_sinks = []
self._installed_link_amps = []
@property
def level(self):
""" Portal Level
"""
resonators = [r for r in self.resonators if r]
if not resonators:
return 1
return sum(resonators) // 8
@property
def energy(self):
""" Portal Energy
"""
return sum(RESONATOR_ENERGY[r] for r in self.resonators if r)
@property
def linkable_range(self):
""" Link linkable_range
Portal Link Range = 160 m * (Average Resonators Level ^ 4)
"""
resonators = [r for r in self.resonators if r]
if not resonators:
return 0
linkable_range = int(160 * (mean(resonators) ** 4))
if self._installed_link_amps:
link_amps = sorted(self._installed_link_amps, reverse=True)
base = link_amps[0]
for n, link_amp in enumerate(link_amps[1:]):
if n == 0:
base += (link_amp * 0.25)
else:
base += (link_amp * 0.125)
return int(linkable_range * base)
return int(linkable_range)
def _calc_influence_value_by_mods(self, mods):
""" Calculate the influence value by MODs
"""
mods = sorted(mods, reverse=True)
if not mods:
return []
most_rare = mods[:1]
rest_of_mods = [mod / 2 for mod in mods[1:]]
return most_rare + rest_of_mods
def hacks_before_burnout(self):
""" Return the hack possible number of portal.
"""
count = 4
for mh in self._calc_influence_value_by_mods(self._installed_multi_hacks):
count += mh
return count
def cooldown(self):
""" Return Cooldown decrease.
"""
seconds = 300
for hs in self._calc_influence_value_by_mods(self._installed_heat_sinks):
seconds *= (100 - hs) / 100
return int(seconds)
|
{
"content_hash": "abb0f3be52c1a1179d2699738545aa10",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 82,
"avg_line_length": 29.210526315789473,
"alnum_prop": 0.5364864864864864,
"repo_name": "kk6/ingress-portal-calculator",
"id": "3b8df57e776334c25dc16b01635209dbba28fc09",
"size": "2244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipc/portal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5014"
}
],
"symlink_target": ""
}
|
"""The multinic extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import multinic
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "os-multinic"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class MultinicController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(MultinicController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('add_fixed_ip')
@extensions.expected_errors(404)
@validation.schema(multinic.add_fixed_ip)
def _add_fixed_ip(self, req, id, body):
"""Adds an IP on a given network to an instance."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
network_id = body['add_fixed_ip']['network_id']
try:
self.compute_api.add_fixed_ip(context, instance, network_id)
except exception.NoMoreFixedIps as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return webob.Response(status_int=202)
@wsgi.action('remove_fixed_ip')
@extensions.expected_errors((400, 404))
@validation.schema(multinic.remove_fixed_ip)
def _remove_fixed_ip(self, req, id, body):
"""Removes an IP from an instance."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
address = body['remove_fixed_ip']['address']
try:
self.compute_api.remove_fixed_ip(context, instance, address)
except exception.FixedIpNotFoundForSpecificInstance as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return webob.Response(status_int=202)
# Note: The class name is as it has to be for this to be loaded as an
# extension--only first character capitalized.
class Multinic(extensions.V3APIExtensionBase):
"""Multiple network support."""
name = "Multinic"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = MultinicController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
{
"content_hash": "2b94240249b3b009abf428f81c49f400",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 33.77215189873418,
"alnum_prop": 0.6619190404797601,
"repo_name": "srajag/nova",
"id": "b3218624567cb47f13ac7bc91efa537a26406154",
"size": "3304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/plugins/v3/multinic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ViewQuestions.display_order'
db.add_column('website_viewquestions', 'display_order',
self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ViewQuestions.display_order'
db.delete_column('website_viewquestions', 'display_order')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userpageview': {
'Meta': {'object_name': 'UserPageView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page_view_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.view': {
'Meta': {'object_name': 'View'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.viewquestions': {
'Meta': {'object_name': 'ViewQuestions'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
{
"content_hash": "5f3bf59cd0e5f14562d0cd14cde0bdd9",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 200,
"avg_line_length": 93.37562604340567,
"alnum_prop": 0.5472180504898806,
"repo_name": "solarpermit/solarpermit",
"id": "259f003aa593c75c6f879b88d7959fd48b355dda",
"size": "55956",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "website/migrations/0081_auto__add_field_viewquestions_display_order.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "126992"
},
{
"name": "JavaScript",
"bytes": "808802"
},
{
"name": "Python",
"bytes": "6625868"
}
],
"symlink_target": ""
}
|
import datetime
import requests
from django.test import TestCase
from wikidata import wikidata
from person.models import Person
from person.util import parse_name_surname_initials, parse_surname_comma_surname_prefix
class TestFindName(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Person.objects.create(forename='Jan Peter', surname='Balkenende', initials='J.P.')
cls.p2 = Person.objects.create(forename='Jan', surname='Balkenende', initials='J.')
cls.p3 = Person.objects.create(forename='Jan', surname='Balkenende', surname_prefix='van', initials='J.')
cls.p4 = Person.objects.create(forename='Jan Peter', surname='Balkenende', surname_prefix='van', initials='J.P.')
cls.p5 = Person.objects.create(forename='Fatma', surname='Koşer Kaya', surname_prefix='', initials='F.')
cls.p6 = Person.objects.create(forename='Jan Peter', surname='Balkenende', initials='')
cls.p7 = Person.objects.create(forename='', surname='van Raak', initials='')
cls.p8 = Person.objects.create(forename='', surname='Grapperhaus', initials='F.B.J.')
cls.p9 = Person.objects.create(forename='Ferdinand', surname='Grapperhaus', initials='F.B.J.')
def test_find_by_fullname(self):
p_found = Person.find_by_fullname('Jan Peter Balkenende')
self.assertEqual(self.p1, p_found)
p_found = Person.find_by_fullname('Jan Balkenende')
self.assertEqual(self.p2, p_found)
p_found = Person.find_by_fullname('Jan van Balkenende')
self.assertEqual(self.p3, p_found)
p_found = Person.find_by_fullname('Jan Peter van Balkenende')
self.assertEqual(self.p4, p_found)
p_found = Person.find_by_fullname('Jan Jaap van Balkenende')
self.assertEqual(None, p_found)
p_found = Person.find_by_fullname('van Raak')
self.assertEqual(self.p7, p_found)
def test_find_by_surname_initials(self):
p_found = Person.find_surname_initials('Balkenende', 'J.P.')
self.assertEqual(p_found, self.p1)
p_found = Person.find_surname_initials('Balkenende', 'J.')
self.assertEqual(p_found, self.p2)
p_found = Person.find_surname_initials('Balkenende', '')
self.assertEqual(p_found, None)
p_found = Person.find_surname_initials('van der Steur', 'J.P.')
self.assertEqual(p_found, None)
p_found = Person.find_surname_initials('Koşer Kaya', 'F.')
self.assertEqual(p_found, self.p5)
def test_find_surname_multiple(self):
p_found = Person.find_surname_initials('Grapperhaus', 'F.B.J.')
self.assertEqual(p_found, self.p9)
class TestNamePrefix(TestCase):
def test_find_name_prefix(self):
name = 'Ard van der Steur'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, 'van der')
name = 'Ard van derSteur'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, 'van')
name = 'Ard van de Steur'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, 'van de')
name = 'Ard van Steur'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, 'van')
name = 'Gerard \'t Hooft'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, '\'t')
name = 'Jan Peter Balkenende'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, '')
name = 'Boris van der Ham'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, 'van der')
name = 'van der Ham'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, 'van der')
name = 'van derHam'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, 'van')
name = 'von Martels'
prefix, pos = Person.find_prefix(name)
self.assertEqual(prefix, 'von')
def test_parse_surname_surname_prefix(self):
surname_expected = 'Ham'
surname_prefix_expected = 'van der'
surname, surname_prefix = parse_surname_comma_surname_prefix('Ham, van der')
self.assertEqual(surname, surname_expected)
self.assertEqual(surname_prefix, surname_prefix_expected)
class TestCreatePerson(TestCase):
def test_create_person(self):
forename = 'Mark'
surname = 'Rutte'
person = Person.objects.create(forename=forename, surname=surname)
self.assertEqual(Person.objects.count(), 1)
self.assertTrue(Person.person_exists(forename, surname))
person.update_info(language='nl')
person.save()
self.assertEqual(person.wikidata_id, 'Q57792')
self.assertEqual(person.wikimedia_image_name.split('.')[1], 'jpg')
response = requests.get(person.wikimedia_image_url, timeout=60)
self.assertEqual(response.status_code, 200)
self.assertEqual(person.birthdate, datetime.date(1967, 2, 14))
self.assertEqual(person.slug, 'mark-rutte')
class TestWikidataNameParts(TestCase):
def test_fatma_koser_kaya(self):
wikidata_id = 'Q467610' # Fatma Koşer Kaya
wikidata_item = wikidata.WikidataItem(wikidata_id)
fullname = wikidata_item.get_label()
forename, surname, surname_prefix = Person.get_name_parts(fullname, wikidata_item)
self.assertEqual(forename, 'Fatma')
self.assertEqual(surname, 'Koşer Kaya')
self.assertEqual(surname_prefix, '')
def test_jan_peter_balkenende(self):
wikidata_id = 'Q133386'
wikidata_item = wikidata.WikidataItem(wikidata_id)
fullname = wikidata_item.get_label()
forename, surname, surname_prefix = Person.get_name_parts(fullname, wikidata_item)
self.assertEqual(forename, 'Jan Peter')
self.assertEqual(surname, 'Balkenende')
self.assertEqual(surname_prefix, '')
def test_jan_kees_de_jager(self):
wikidata_id = 'Q1666631'
wikidata_item = wikidata.WikidataItem(wikidata_id)
fullname = wikidata_item.get_label()
forename, surname, surname_prefix = Person.get_name_parts(fullname, wikidata_item)
self.assertEqual(forename, 'Jan Kees')
self.assertEqual(surname, 'Jager')
self.assertEqual(surname_prefix, 'de')
def test_sjoerd_sjoerdsma(self):
wikidata_id = 'Q516335'
wikidata_item = wikidata.WikidataItem(wikidata_id)
fullname = wikidata_item.get_label()
forename, surname, surname_prefix = Person.get_name_parts(fullname, wikidata_item)
self.assertEqual(forename, 'Sjoerd')
self.assertEqual(surname, 'Sjoerdsma')
self.assertEqual(surname_prefix, '')
def test_sybrand_van_haersma_buma(self):
wikidata_id = 'Q377266'
wikidata_item = wikidata.WikidataItem(wikidata_id)
fullname = wikidata_item.get_label()
forename, surname, surname_prefix = Person.get_name_parts(fullname, wikidata_item)
self.assertEqual(forename, 'Sybrand')
self.assertEqual(surname, 'Haersma Buma')
self.assertEqual(surname_prefix, 'van')
def test_chantal_nijkerken_de_haan(self):
wikidata_id = 'Q19830701'
wikidata_item = wikidata.WikidataItem(wikidata_id)
fullname = wikidata_item.get_label()
forename, surname, surname_prefix = Person.get_name_parts(fullname, wikidata_item)
self.assertEqual(forename, 'Chantal')
self.assertEqual(surname, 'Nijkerken-de Haan')
def test_leendert_de_lange(self):
wikidata_id = 'Q19839084'
wikidata_item = wikidata.WikidataItem(wikidata_id)
fullname = wikidata_item.get_label()
forename, surname, surname_prefix = Person.get_name_parts(fullname, wikidata_item)
self.assertEqual(forename, 'Leendert')
self.assertEqual(surname_prefix, 'de')
self.assertEqual(surname, 'Lange')
def test_melanie_schultz_van_hagen(self):
wikidata_id = 'Q435886'
wikidata_item = wikidata.WikidataItem(wikidata_id)
fullname = wikidata_item.get_label()
forename, surname, surname_prefix = Person.get_name_parts(fullname, wikidata_item)
self.assertEqual(forename, 'Melanie')
self.assertEqual(surname_prefix, '')
self.assertEqual(surname, 'Schultz van Haegen')
class TestParseName(TestCase):
""" Tests name parsing """
initials_expected = 'P.A.'
surname_expected = 'Dijkstra'
def check_result(self, initials, surname):
self.assertEqual(initials, self.initials_expected)
self.assertEqual(surname, self.surname_expected)
def test_initials_surname(self):
name = 'P.A. Dijkstra'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
def test_initials_surname_forname(self):
name = 'P.A. (Pia) Dijkstra'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
name = 'P.A. (Pia)Dijkstra'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
def test_surname_initials(self):
name = 'Dijkstra, P.A.'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
name = 'Dijkstra,P.A.'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
name = 'Dijkstra P.A.'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
name = 'Dijkstra P.A. (Pia)'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
name = 'Dijkstra, (Pia) P.A.'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
def test_surname_initials_forname(self):
name = 'Dijkstra, P.A.(Pia)'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
name = 'Dijkstra, P.A. (Pia)'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
name = 'Dijkstra,P.A.(Pia)'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.check_result(initials, surname)
def test_surname_prefix(self):
name = 'van Dijkstra, P.A.(Pia)'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.assertEqual(surname_prefix, 'van')
self.check_result(initials, surname)
name = 'Dijkstra van, P.A. (Pia)'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.assertEqual(surname_prefix, 'van')
self.check_result(initials, surname)
name = 'van Dijkstra,P.A.(Pia)'
initials, surname, surname_prefix = parse_name_surname_initials(name)
self.assertEqual(surname_prefix, 'van')
self.check_result(initials, surname)
# TODO BR: fix and enable
# def test_initials_multicharacter(self):
# name = 'A.Th.B. Bijleveld-Schouten'
# initials, surname, surname_prefix = parse_name_surname_initials(name)
# self.assertEqual(initials, 'A.Th.B.')
|
{
"content_hash": "7974a7817af765c441c6df9d9bc3cc57",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 121,
"avg_line_length": 44.13899613899614,
"alnum_prop": 0.6544786564030791,
"repo_name": "openkamer/openkamer",
"id": "154ad90a03c36b1254d6209b43d70202841798f5",
"size": "11436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "person/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "442"
},
{
"name": "CSS",
"bytes": "11171"
},
{
"name": "HTML",
"bytes": "154052"
},
{
"name": "JavaScript",
"bytes": "1051"
},
{
"name": "Python",
"bytes": "513282"
},
{
"name": "Shell",
"bytes": "157"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from oscar.apps.promotions.models import Image, MultiImage, RawHTML, HandPickedProductList, OrderedProduct, AutomaticProductList, TabbedBlock, \
PagePromotion, KeywordPromotion, SingleProduct
class OrderProductInline(admin.TabularInline):
model = OrderedProduct
class HandPickedProductListAdmin(admin.ModelAdmin):
inlines = [OrderProductInline]
class PagePromotionAdmin(admin.ModelAdmin):
list_display = ['page_url', 'content_object', 'position']
exclude = ['clicks']
def get_form(self, request, obj=None, **kwargs):
form = super(PagePromotionAdmin,self).get_form(request, obj, **kwargs)
# Only allow links to models within the promotions app
form.base_fields['content_type'].queryset = form.base_fields['content_type'].queryset.filter(app_label='promotions')
return form
class KeywordPromotionAdmin(admin.ModelAdmin):
list_display = ['keyword', 'position', 'clicks']
readonly_fields = ['clicks']
admin.site.register(Image)
admin.site.register(MultiImage)
admin.site.register(RawHTML)
admin.site.register(HandPickedProductList, HandPickedProductListAdmin)
admin.site.register(AutomaticProductList)
admin.site.register(TabbedBlock)
admin.site.register(PagePromotion, PagePromotionAdmin)
admin.site.register(KeywordPromotion, KeywordPromotionAdmin)
admin.site.register(SingleProduct)
|
{
"content_hash": "bb020ec841f0cc0207699ee28f886b9d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 144,
"avg_line_length": 37.578947368421055,
"alnum_prop": 0.7514005602240896,
"repo_name": "saadbinakhlaq/django-oscar",
"id": "a1f5674be9d95b29d5a15a26de3b93326d21c33e",
"size": "1428",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "oscar/apps/promotions/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', '..','libs'))
sys.path.insert(0, os.path.join(here, '..', '..','external_libs'))
#============================ define ==========================================
#============================ imports =========================================
# built-in
import argparse
import socket
import time
import pprint
import threading
import json
import traceback
# requirements
import requests
import bottle
from bottle import hook
# SmartMeshSDK
from SmartMeshSDK import sdk_version
from SmartMeshSDK.utils import JsonManager
# DustCli
from dustCli import DustCli
#============================ helpers =========================================
pp = pprint.PrettyPrinter(indent=4)
#============================ classes =========================================
class JsonServer(object):
def __init__(self, tcpport, autoaddmgr, autodeletemgr, serialport, configfilename):
# store params
self.tcpport = tcpport
self.autoaddmgr = autoaddmgr
self.autodeletemgr = autodeletemgr
self.serialport = serialport
self.configfilename = configfilename
# local variables
self.jsonManager = JsonManager.JsonManager(
autoaddmgr = autoaddmgr,
autodeletemgr = autodeletemgr,
serialport = serialport,
configfilename = configfilename,
notifCb = self._notif_cb,
)
#=== CLI interface
self.cli = DustCli.DustCli(
quit_cb = self._clihandle_quit,
versions = {
'SmartMesh SDK': sdk_version.VERSION,
},
)
self.cli.registerCommand(
name = 'status',
alias = 's',
description = 'get the current status of the application',
params = [],
callback = self._clihandle_status,
)
self.cli.registerCommand(
name = 'serialports',
alias = 'sp',
description = 'list the available serialports',
params = [],
callback = self._clihandle_serialports,
)
self.cli.registerCommand(
name = 'connectmanager',
alias = 'cm',
description = 'connect to a manager\'s API serial port',
params = ['serialport'],
callback = self._clihandle_connectmanager,
)
self.cli.registerCommand(
name = 'disconnectmanager',
alias = 'dm',
description = 'disconnect from a manager\'s API serial port',
params = ['serialport'],
callback = self._clihandle_disconnectmanager,
)
#=== web server
self.websrv = bottle.Bottle()
#=== root
self.websrv.route('/', 'GET', self._webhandle_root_GET)
#=== static
self.websrv.route('/static/<filename>', 'GET', self._webhandle_static)
#=== status
self.websrv.route('/api/v1/status', 'GET', self._webhandle_status_GET)
#=== raw
self.websrv.route('/api/v1/raw', 'POST', self._webhandle_raw_POST)
#=== oap
# /info
self.websrv.route('/api/v1/oap/<mac>/info', 'GET', self._webhandle_oap_info_GET)
self.websrv.route('/api/v1/oap/<mac>/0', 'GET', self._webhandle_oap_info_GET)
# /main
self.websrv.route('/api/v1/oap/<mac>/main', 'GET', self._webhandle_oap_main_GET)
self.websrv.route('/api/v1/oap/<mac>/1', 'GET', self._webhandle_oap_main_GET)
self.websrv.route('/api/v1/oap/<mac>/main', 'PUT', self._webhandle_oap_main_PUT)
self.websrv.route('/api/v1/oap/<mac>/1', 'PUT', self._webhandle_oap_main_PUT)
# /digital_in
self.websrv.route('/api/v1/oap/<mac>/digital_in/D0', 'GET', self._webhandle_oap_digital_in_D0_GET)
self.websrv.route('/api/v1/oap/<mac>/2/0', 'GET', self._webhandle_oap_digital_in_D0_GET)
self.websrv.route('/api/v1/oap/<mac>/digital_in/D0', 'PUT', self._webhandle_oap_digital_in_D0_PUT)
self.websrv.route('/api/v1/oap/<mac>/2/0', 'PUT', self._webhandle_oap_digital_in_D0_PUT)
self.websrv.route('/api/v1/oap/<mac>/digital_in/D1', 'GET', self._webhandle_oap_digital_in_D1_GET)
self.websrv.route('/api/v1/oap/<mac>/2/1', 'GET', self._webhandle_oap_digital_in_D1_GET)
self.websrv.route('/api/v1/oap/<mac>/digital_in/D1', 'PUT', self._webhandle_oap_digital_in_D1_PUT)
self.websrv.route('/api/v1/oap/<mac>/2/1', 'PUT', self._webhandle_oap_digital_in_D1_PUT)
self.websrv.route('/api/v1/oap/<mac>/digital_in/D2', 'GET', self._webhandle_oap_digital_in_D2_GET)
self.websrv.route('/api/v1/oap/<mac>/2/2', 'GET', self._webhandle_oap_digital_in_D2_GET)
self.websrv.route('/api/v1/oap/<mac>/digital_in/D2', 'PUT', self._webhandle_oap_digital_in_D2_PUT)
self.websrv.route('/api/v1/oap/<mac>/2/2', 'PUT', self._webhandle_oap_digital_in_D2_PUT)
self.websrv.route('/api/v1/oap/<mac>/digital_in/D3', 'GET', self._webhandle_oap_digital_in_D3_GET)
self.websrv.route('/api/v1/oap/<mac>/2/3', 'GET', self._webhandle_oap_digital_in_D3_GET)
self.websrv.route('/api/v1/oap/<mac>/digital_in/D3', 'PUT', self._webhandle_oap_digital_in_D3_PUT)
self.websrv.route('/api/v1/oap/<mac>/2/3', 'PUT', self._webhandle_oap_digital_in_D3_PUT)
# /digital_out
self.websrv.route('/api/v1/oap/<mac>/digital_out/D4', 'PUT', self._webhandle_oap_digital_out_D4_PUT)
self.websrv.route('/api/v1/oap/<mac>/3/0', 'PUT', self._webhandle_oap_digital_out_D4_PUT)
self.websrv.route('/api/v1/oap/<mac>/digital_out/D5', 'PUT', self._webhandle_oap_digital_out_D5_PUT)
self.websrv.route('/api/v1/oap/<mac>/3/1', 'PUT', self._webhandle_oap_digital_out_D5_PUT)
self.websrv.route('/api/v1/oap/<mac>/digital_out/INDICATOR_0', 'PUT', self._webhandle_oap_digital_out_INDICATOR_0_PUT)
self.websrv.route('/api/v1/oap/<mac>/3/2', 'PUT', self._webhandle_oap_digital_out_INDICATOR_0_PUT)
# /analog
self.websrv.route('/api/v1/oap/<mac>/analog/A0', 'GET', self._webhandle_oap_analog_A0_GET)
self.websrv.route('/api/v1/oap/<mac>/4/0', 'GET', self._webhandle_oap_analog_A0_GET)
self.websrv.route('/api/v1/oap/<mac>/analog/A0', 'PUT', self._webhandle_oap_analog_A0_PUT)
self.websrv.route('/api/v1/oap/<mac>/4/0', 'PUT', self._webhandle_oap_analog_A0_PUT)
self.websrv.route('/api/v1/oap/<mac>/analog/A1', 'GET', self._webhandle_oap_analog_A1_GET)
self.websrv.route('/api/v1/oap/<mac>/4/1', 'GET', self._webhandle_oap_analog_A1_GET)
self.websrv.route('/api/v1/oap/<mac>/analog/A1', 'PUT', self._webhandle_oap_analog_A1_PUT)
self.websrv.route('/api/v1/oap/<mac>/4/1', 'PUT', self._webhandle_oap_analog_A1_PUT)
self.websrv.route('/api/v1/oap/<mac>/analog/A2', 'GET', self._webhandle_oap_analog_A2_GET)
self.websrv.route('/api/v1/oap/<mac>/4/2', 'GET', self._webhandle_oap_analog_A2_GET)
self.websrv.route('/api/v1/oap/<mac>/analog/A2', 'PUT', self._webhandle_oap_analog_A2_PUT)
self.websrv.route('/api/v1/oap/<mac>/4/2', 'PUT', self._webhandle_oap_analog_A2_PUT)
self.websrv.route('/api/v1/oap/<mac>/analog/A3', 'GET', self._webhandle_oap_analog_A3_GET)
self.websrv.route('/api/v1/oap/<mac>/4/3', 'GET', self._webhandle_oap_analog_A3_GET)
self.websrv.route('/api/v1/oap/<mac>/analog/A3', 'PUT', self._webhandle_oap_analog_A3_PUT)
self.websrv.route('/api/v1/oap/<mac>/4/3', 'PUT', self._webhandle_oap_analog_A3_PUT)
# /temperature
self.websrv.route('/api/v1/oap/<mac>/temperature', 'GET', self._webhandle_oap_temperature_GET)
self.websrv.route('/api/v1/oap/<mac>/5', 'GET', self._webhandle_oap_temperature_GET)
self.websrv.route('/api/v1/oap/<mac>/temperature', 'PUT', self._webhandle_oap_temperature_PUT)
self.websrv.route('/api/v1/oap/<mac>/5', 'PUT', self._webhandle_oap_temperature_PUT)
# /pkgen
self.websrv.route('/api/v1/oap/<mac>/pkgen/echo', 'GET', self._webhandle_oap_pkgen_echo_GET)
self.websrv.route('/api/v1/oap/<mac>/254/0', 'GET', self._webhandle_oap_pkgen_echo_GET)
self.websrv.route('/api/v1/oap/<mac>/pkgen', 'PUT', self._webhandle_oap_pkgen_PUT)
self.websrv.route('/api/v1/oap/<mac>/254', 'PUT', self._webhandle_oap_pkgen_PUT)
#=== helpers
self.websrv.route('/api/v1/helpers/serialports', 'GET', self._webhandle_helpers_serialports_GET)
self.websrv.route('/api/v1/helpers/motes', 'GET', self._webhandle_helpers_motes_GET)
self.websrv.route('/api/v1/helpers/oapmotes', 'GET', self._webhandle_helpers_oapmotes_GET)
self.websrv.route('/api/v1/helpers/snapshot', 'POST', self._webhandle_helpers_snapshot_POST)
self.websrv.route('/api/v1/helpers/snapshot', 'GET', self._webhandle_helpers_snapshot_GET)
#=== config
self.websrv.route('/api/v1/config', 'GET', self._webhandle_config_GET)
self.websrv.route('/api/v1/config', 'POST', self._webhandle_config_POST)
#=== managers
self.websrv.route('/api/v1/config/managers', 'PUT', self._webhandle_managers_PUT)
self.websrv.route('/api/v1/config/managers', 'DELETE', self._webhandle_managers_DELETE)
self.websrv.error(code=404)(self._webhandler_error_404)
self.websrv.error(code=500)(self._webhandler_error_500)
self.websrv.hook('after_request')(self._add_JsonServer_token_if_requested)
webthread = threading.Thread(
target = self._bottle_try_running_forever,
args = (self.websrv.run,),
kwargs = {
'host' : '127.0.0.1',
'port' : self.tcpport,
'quiet' : True,
'debug' : False,
}
)
webthread.name = 'WebServer'
webthread.daemon = True
webthread.start()
#======================== admin ===========================================
def _bottle_try_running_forever(self,*args,**kwargs):
RETRY_PERIOD = 3
while True:
try:
args[0](**kwargs) # blocking
except socket.error as err:
if err[0]==10013:
print 'FATAL: cannot open TCP port {0}.'.format(kwargs['port'])
print ' Is another application running on that port?'
else:
print logError(err)
except Exception as err:
print logError(err)
print ' Trying again in {0} seconds'.format(RETRY_PERIOD),
for _ in range(RETRY_PERIOD):
time.sleep(1)
print '.',
print ''
#======================== CLI handlers ====================================
def _clihandle_quit(self):
self.jsonManager.close()
time.sleep(.3)
print "bye bye."
def _clihandle_status(self,params):
pp.pprint(self.jsonManager.status_GET())
def _clihandle_serialports(self,params):
pp.pprint(self.jsonManager.serialports_GET())
def _clihandle_connectmanager(self,params):
self.jsonManager.managers_PUT([params[0],])
def _clihandle_disconnectmanager(self,params):
self.jsonManager.managers_DELETE([params[0],])
#======================== web handlers ====================================
def _add_JsonServer_token_if_requested(self):
try:
bottle.response.headers['X-Correlation-ID'] = bottle.request.headers['X-Correlation-ID']
except KeyError:
pass
#=== root
def _webhandle_root_GET(self):
return bottle.static_file('index.html', root='.')
#=== static
def _webhandle_static(self,filename):
return bottle.static_file(filename, root='static/')
#=== status
def _webhandle_status_GET(self):
return self.jsonManager.status_GET()
#=== raw
def _webhandle_raw_POST(self):
commandArray = []
commandArray += [bottle.request.json['command']]
try:
commandArray += [bottle.request.json['subcommand']]
except KeyError:
pass
try:
fields = bottle.request.json['fields']
except KeyError:
fields = {}
manager = bottle.request.json['manager']
return self.jsonManager.raw_POST(commandArray, fields, manager)
#=== oap
# /info
def _webhandle_oap_info_GET(self,mac):
return self.jsonManager.oap_info_GET(mac)
# /main
def _webhandle_oap_main_GET(self,mac):
return self.jsonManager.oap_main_GET(mac)
def _webhandle_oap_main_PUT(self,mac):
return self.jsonManager.oap_main_PUT(mac,body=bottle.request.json)
# /digital_in
def _webhandle_oap_digital_in_D0_GET(self,mac):
return self.jsonManager.oap_digital_in_GET(mac,0)
def _webhandle_oap_digital_in_D0_PUT(self,mac):
return self.jsonManager.oap_digital_in_PUT(mac,0,body=bottle.request.json)
def _webhandle_oap_digital_in_D1_GET(self,mac):
return self.jsonManager.oap_digital_in_GET(mac,1)
def _webhandle_oap_digital_in_D1_PUT(self,mac):
return self.jsonManager.oap_digital_in_PUT(mac,1,body=bottle.request.json)
def _webhandle_oap_digital_in_D2_GET(self,mac):
return self.jsonManager.oap_digital_in_GET(mac,2)
def _webhandle_oap_digital_in_D2_PUT(self,mac):
return self.jsonManager.oap_digital_in_PUT(mac,2,body=bottle.request.json)
def _webhandle_oap_digital_in_D3_GET(self,mac):
return self.jsonManager.oap_digital_in_GET(mac,3)
def _webhandle_oap_digital_in_D3_PUT(self,mac):
return self.jsonManager.oap_digital_in_PUT(mac,3,body=bottle.request.json)
# /digital_out
def _webhandle_oap_digital_out_D4_PUT(self,mac):
return self.jsonManager.oap_digital_out_PUT(mac,0,body=bottle.request.json)
def _webhandle_oap_digital_out_D5_PUT(self,mac):
return self.jsonManager.oap_digital_out_PUT(mac,1,body=bottle.request.json)
def _webhandle_oap_digital_out_INDICATOR_0_PUT(self,mac):
return self.jsonManager.oap_digital_out_PUT(mac,2,body=bottle.request.json)
# /analog
def _webhandle_oap_analog_A0_GET(self,mac):
return self.jsonManager.oap_analog_GET(mac,0)
def _webhandle_oap_analog_A0_PUT(self,mac):
return self.jsonManager.oap_analog_PUT(mac,0,body=bottle.request.json)
def _webhandle_oap_analog_A1_GET(self,mac):
return self.jsonManager.oap_analog_GET(mac,1)
def _webhandle_oap_analog_A1_PUT(self,mac):
return self.jsonManager.oap_analog_PUT(mac,1,body=bottle.request.json)
def _webhandle_oap_analog_A2_GET(self,mac):
return self.jsonManager.oap_analog_GET(mac,2)
def _webhandle_oap_analog_A2_PUT(self,mac):
return self.jsonManager.oap_analog_PUT(mac,2,body=bottle.request.json)
def _webhandle_oap_analog_A3_GET(self,mac):
return self.jsonManager.oap_analog_GET(mac,3)
def _webhandle_oap_analog_A3_PUT(self,mac):
return self.jsonManager.oap_analog_PUT(mac,3,body=bottle.request.json)
# /temperature
def _webhandle_oap_temperature_GET(self,mac):
return self.jsonManager.oap_temperature_GET(mac)
def _webhandle_oap_temperature_PUT(self,mac):
return self.jsonManager.oap_temperature_PUT(mac,body=bottle.request.json)
# /pkgen
def _webhandle_oap_pkgen_echo_GET(self,mac):
return self.jsonManager.oap_pkgen_echo_GET(mac)
def _webhandle_oap_pkgen_PUT(self,mac):
return self.jsonManager.oap_pkgen_PUT(mac,body=bottle.request.json)
#=== helpers
def _webhandle_helpers_serialports_GET(self):
return self.jsonManager.serialports_GET()
def _webhandle_helpers_motes_GET(self):
return self.jsonManager.motes_GET()
def _webhandle_helpers_oapmotes_GET(self):
return self.jsonManager.oapmotes_GET()
def _webhandle_helpers_snapshot_POST(self):
try:
correlationID = bottle.request.json['correlationID']
except KeyError:
correlationID = None
return self.jsonManager.snapshot_POST(
manager = bottle.request.json['manager'],
correlationID = correlationID,
)
def _webhandle_helpers_snapshot_GET(self):
return self.jsonManager.snapshot_GET()
#=== config
def _webhandle_config_GET(self):
return self.jsonManager.config_GET()
def _webhandle_config_POST(self):
return self.jsonManager.config_POST(bottle.request.json)
def _webhandle_managers_PUT(self):
return self.jsonManager.managers_PUT(bottle.request.json['managers'])
def _webhandle_managers_DELETE(self):
return self.jsonManager.managers_DELETE(bottle.request.json['managers'])
#=== errors
def _webhandler_error_404(self,error):
error_data = {
'body': 'There\'s nothing there! https://vine.co/v/OiZOJxjDitQ/embed/simple?audio=1',
}
bottle.response.status = 404
bottle.response.content_type = 'application/json'
return json.dumps(error_data)
def _webhandler_error_500(self,error):
if type(error.exception)==NotImplementedError:
error_data = {
'body': 'Not implemented, yet :-)',
}
bottle.response.status = 501
else:
error_data = {
'body': 'internal server error',
'exception': str(error.exception),
'traceback': error.traceback,
}
bottle.response.content_type = 'application/json'
return json.dumps(error_data)
#=== notifications
def _notif_cb(self,notifName,notifJson):
# find notification URLs
urls = self.jsonManager.config_GET()['notification_urls'][notifName]
# send notifications
if urls:
for url in urls:
notifthread = threading.Thread(
target = self._send_notif_thread,
args = (
url,
),
kwargs = {
'data' : json.dumps(notifJson),
'headers' : {
'Content-type': 'application/json',
},
}
)
notifthread.name = '{0}->{1}'.format(notifName,url)
notifthread.start()
def _send_notif_thread(self,*args,**kwargs):
try:
requests.post(*args,**kwargs)
except requests.exceptions.ConnectionError:
pass
except Exception as err:
print err
#============================ main ============================================
def main(args):
jsonServer = JsonServer(**args)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--tcpport', default=8080)
parser.add_argument('--autoaddmgr', default=True)
parser.add_argument('--autodeletemgr', default=True)
parser.add_argument('--serialport', default=None)
parser.add_argument('--configfilename', default='JsonServer.config')
args = vars(parser.parse_args())
main(args)
|
{
"content_hash": "be466888dc995bb2f0bbc16071542dca",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 132,
"avg_line_length": 45.192546583850934,
"alnum_prop": 0.5260216236027121,
"repo_name": "realms-team/solmanager",
"id": "6b781a36d9d6d2cf3ff51cfaad177f43b1527484",
"size": "21928",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/smartmeshsdk-REL-1.3.0.1/app/JsonServer/JsonServer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3408"
},
{
"name": "CSS",
"bytes": "1148"
},
{
"name": "HTML",
"bytes": "1568"
},
{
"name": "JavaScript",
"bytes": "1430296"
},
{
"name": "Makefile",
"bytes": "8195"
},
{
"name": "Python",
"bytes": "3428922"
},
{
"name": "Smarty",
"bytes": "5800"
}
],
"symlink_target": ""
}
|
"""
Mail (SMTP) notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.smtp/
"""
import logging
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import email.utils
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_DATA, PLATFORM_SCHEMA,
BaseNotificationService)
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_PORT, CONF_SENDER, CONF_RECIPIENT)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_IMAGES = 'images' # optional embedded image file attachments
CONF_STARTTLS = 'starttls'
CONF_DEBUG = 'debug'
CONF_SERVER = 'server'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 25
DEFAULT_DEBUG = False
DEFAULT_STARTTLS = False
# pylint: disable=no-value-for-parameter
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RECIPIENT): vol.Email(),
vol.Optional(CONF_SERVER, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SENDER): vol.Email(),
vol.Optional(CONF_STARTTLS, default=DEFAULT_STARTTLS): cv.boolean,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
})
def get_service(hass, config):
"""Get the mail notification service."""
mail_service = MailNotificationService(
config.get(CONF_SERVER),
config.get(CONF_PORT),
config.get(CONF_SENDER),
config.get(CONF_STARTTLS),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
config.get(CONF_RECIPIENT),
config.get(CONF_DEBUG))
if mail_service.connection_is_valid():
return mail_service
else:
return None
class MailNotificationService(BaseNotificationService):
"""Implement the notification service for E-Mail messages."""
def __init__(self, server, port, sender, starttls, username,
password, recipient, debug):
"""Initialize the service."""
self._server = server
self._port = port
self._sender = sender
self.starttls = starttls
self.username = username
self.password = password
self.recipient = recipient
self.debug = debug
self.tries = 2
def connect(self):
"""Connect/authenticate to SMTP Server."""
mail = smtplib.SMTP(self._server, self._port, timeout=5)
mail.set_debuglevel(self.debug)
mail.ehlo_or_helo_if_needed()
if self.starttls:
mail.starttls()
mail.ehlo()
if self.username and self.password:
mail.login(self.username, self.password)
return mail
def connection_is_valid(self):
"""Check for valid config, verify connectivity."""
server = None
try:
server = self.connect()
except smtplib.socket.gaierror:
_LOGGER.exception(
"SMTP server not found (%s:%s). "
"Please check the IP address or hostname of your SMTP server",
self._server, self._port)
return False
except (smtplib.SMTPAuthenticationError, ConnectionRefusedError):
_LOGGER.exception(
"Login not possible. "
"Please check your setting and/or your credentials")
return False
finally:
if server:
server.quit()
return True
def send_message(self, message="", **kwargs):
"""
Build and send a message to a user.
Will send plain text normally, or will build a multipart HTML message
with inline image attachments if images config is defined.
"""
subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA)
if data:
msg = _build_multipart_msg(message, images=data.get(ATTR_IMAGES))
else:
msg = _build_text_msg(message)
msg['Subject'] = subject
msg['To'] = self.recipient
msg['From'] = self._sender
msg['X-Mailer'] = 'HomeAssistant'
msg['Date'] = email.utils.format_datetime(dt_util.now())
msg['Message-Id'] = email.utils.make_msgid()
return self._send_email(msg)
def _send_email(self, msg):
"""Send the message."""
mail = self.connect()
for _ in range(self.tries):
try:
mail.sendmail(self._sender, self.recipient,
msg.as_string())
break
except smtplib.SMTPException:
_LOGGER.warning('SMTPException sending mail: '
'retrying connection')
mail.quit()
mail = self.connect()
mail.quit()
def _build_text_msg(message):
"""Build plaintext email."""
_LOGGER.debug('Building plain text email')
return MIMEText(message)
def _build_multipart_msg(message, images):
"""Build Multipart message with in-line images."""
_LOGGER.debug('Building multipart email with embedded attachment(s)')
msg = MIMEMultipart('related')
msg_alt = MIMEMultipart('alternative')
msg.attach(msg_alt)
body_txt = MIMEText(message)
msg_alt.attach(body_txt)
body_text = ['<p>{}</p><br>'.format(message)]
for atch_num, atch_name in enumerate(images):
cid = 'image{}'.format(atch_num)
body_text.append('<img src="cid:{}"><br>'.format(cid))
try:
with open(atch_name, 'rb') as attachment_file:
attachment = MIMEImage(attachment_file.read())
msg.attach(attachment)
attachment.add_header('Content-ID', '<{}>'.format(cid))
except FileNotFoundError:
_LOGGER.warning('Attachment %s not found. Skipping',
atch_name)
body_html = MIMEText(''.join(body_text), 'html')
msg_alt.attach(body_html)
return msg
|
{
"content_hash": "d8b55e4533c135147189bb9fe5d9ac5c",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 78,
"avg_line_length": 32.617801047120416,
"alnum_prop": 0.6178170144462279,
"repo_name": "xifle/home-assistant",
"id": "6ef9bc3299000bb6fa099759dea69edd06464741",
"size": "6230",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/notify/smtp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1436909"
},
{
"name": "Python",
"bytes": "4512596"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "4460"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'FacebookAccessToken', fields ['app', 'account']
db.delete_unique('facebook_facebookaccesstoken', ['app_id', 'account_id'])
# Deleting model 'FacebookApp'
db.delete_table('facebook_facebookapp')
# Deleting model 'FacebookAccessToken'
db.delete_table('facebook_facebookaccesstoken')
# Deleting model 'FacebookAccount'
db.delete_table('facebook_facebookaccount')
def backwards(self, orm):
# Adding model 'FacebookApp'
db.create_table('facebook_facebookapp', (
('application_id', self.gf('django.db.models.fields.CharField')(max_length=80)),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('api_key', self.gf('django.db.models.fields.CharField')(max_length=80)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('application_secret', self.gf('django.db.models.fields.CharField')(max_length=80)),
))
db.send_create_signal('facebook', ['FacebookApp'])
# Adding model 'FacebookAccessToken'
db.create_table('facebook_facebookaccesstoken', (
('access_token', self.gf('django.db.models.fields.CharField')(max_length=200)),
('account', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['facebook.FacebookAccount'])),
('app', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['facebook.FacebookApp'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('facebook', ['FacebookAccessToken'])
# Adding unique constraint on 'FacebookAccessToken', fields ['app', 'account']
db.create_unique('facebook_facebookaccesstoken', ['app_id', 'account_id'])
# Adding model 'FacebookAccount'
db.create_table('facebook_facebookaccount', (
('socialaccount_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['socialaccount.SocialAccount'], unique=True, primary_key=True)),
('social_id', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('link', self.gf('django.db.models.fields.URLField')(max_length=200)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('facebook', ['FacebookAccount'])
models = {
}
complete_apps = ['facebook']
|
{
"content_hash": "dbd7da216cc494b15fae77441ff7cf7d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 162,
"avg_line_length": 45.17741935483871,
"alnum_prop": 0.6383434487682971,
"repo_name": "uroslates/django-allauth",
"id": "0032293446f5ce8f5c8af3c7193bad94ac6a5351",
"size": "2819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/facebook/migrations/0004_auto__del_facebookapp__del_facebookaccesstoken__del_unique_facebookacc.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import logging
import sys
import error as sb_err
from twisted.internet import defer, reactor
from twisted.internet.defer import inlineCallbacks
from task import *
from torrent import *
from webclient import *
from worker import *
from log import getLogger, TaskLoggerAdapter
from tasklog import *
class ProccessType:
"""Sickbeard API processing type"""
ALL = [ "auto", "manual" ]
AUTOMATIC, MANUAL = ALL
DEFAULT = AUTOMATIC
class ProccessMethod:
"""Sickbeard API processing method"""
ALL = [ "default" , "copy", "move", "hardlink", "symlink" ]
TRANSLATION = [ 'Sickbeard Default', 'Copy', 'Move', 'Hardlink', 'Symlink' ]
SB_DEFAULT, COPY, MOVE, HARDLINK, SYMLINK = ALL
DEFAULT = SB_DEFAULT
@staticmethod
def getTranslations():
"""Returns dictionary containing translation for each option"""
tr = {}
for i in range(0, len(ProccessMethod.ALL)):
tr[ProccessMethod.ALL[i]] = ProccessMethod.TRANSLATION[i]
return tr
class SickbeardWorker(Worker):
"""
Call Sickbeard API to post-process a torrent
Run Sickbeard post-processing in a controlled way making sure that both
the Deluge daemon as well as Sickbeard are not taking up all system
resources for post-processing. Espcially Deluge daemon's main priority
should remain downloading torrents.
"""
manager = None # Deluge torrent manager
config = None # Sickbeard deluge plugin configuration
tasklog = None # Log completed tasks
logger = getLogger('deluge.sickbeard.SickbeardWorker')
logger_webclient = getLogger('deluge.sickbeard.SickbeardWorker.WebClient')
_PATH_POSTPROCESS = "/home/postprocess/processEpisode"
ERRORS = [
'Processing failed for',
'Validator',
'Failed Download Processing failed',
'Problem(s) during processing',
'Error:'
]
SUCCESS = [
'Processing succeeded for',
'Failed Download Processing succeeded',
'Successfully processed',
"You're trying to post process a video that's already been processed, skipping"
]
def __init__(self, dqueue):
super(SickbeardWorker, self).__init__(dqueue)
if SickbeardWorker.manager is None or \
SickbeardWorker.config is None or \
SickbeardWorker.tasklog is None:
raise TypeError("SickbeardWorker requires a valid manager, config and tasklog object.")
self.task = None
self.log = SickbeardWorker.logger
self.tlog = TaskLoggerAdapter(SickbeardWorker.logger)
self.tlog.set_task(self.task)
self.wlog = TaskLoggerAdapter(SickbeardWorker.logger_webclient)
self.wlog.set_task(self.task)
@inlineCallbacks
def work(self, task):
task.status = TaskStatus.PROCESSING
tid = task.torrent_info.id
try:
# Store pointer to task
self.task = task
# Save worker info to task
self.task.worker_id = self.id
self.task.worker_seq = self.seq
# Initialize logger adapter with task being worked on
self.tlog.set_task(self.task)
self.wlog.set_task(self.task)
torrent = task.torrent
name = TorrentInfo.get_display_name(torrent)
id = TorrentInfo.get_id(torrent)
self.tlog.info("post-process torrent(%s)" % name)
if torrent is None:
self.tlog.error("torrent with id '%s' is no longer with us anymore." % id)
defer.returnValue(False)
result = yield self.call_sickbeard(task)
task.status = TaskStatus.SUCCESS if result else TaskStatus.FAILED
remove, remove_data = (self.config['remove'], self.config['remove_data'])
if result and remove:
self.tlog.info("schedule torrent(%s) for removal" % name)
self.manager.remove(id, remove_data)
else:
self.tlog.info("*skip* schedule torrent(%s) for removal" % name)
log_level = logging.INFO if result else logging.ERROR
self.tlog.log(log_level, "post-process status %s" % task.status)
except Exception as e:
result = False
task.status = TaskStatus.FAILED
self.tlog.error('Exception occurred while processing torrent: ' + str(e))
for line in sb_err.format_exception():
self.tlog.error("%s" % line)
finally:
self.tasklog.add(task)
defer.returnValue(result)
def _ensure_saved_path(self, torrent):
dir = TorrentInfo.get_saved_path(torrent)
name = TorrentInfo.get_display_name(torrent)
mode = TorrentInfo.get_mode(torrent)
if mode == TorrentMode.UNKNOWN and not os.path.isdir(dir):
self.tlog.info("Treating torrent with mode UNKNOWN as MULTI_FILE");
self.tlog.info("Creating missing directory %s for correct failed handling with Sickbeard." % dir);
os.makedirs(dir)
elif mode == TorrentMode.MULTI_FILE and not os.path.isdir(dir):
self.tlog.info("Creating missing directory %s for correct failed handling with Sickbeard." % dir);
os.makedirs(dir)
elif mode == TorrentMode.SINGLE_FILE and not os.path.isfile(dir + "/" + name):
self.tlog.info("Creating missing file %s for correct failed handling with Sickbeard." % (dir + "/" + name));
open(dir + "/" + name, 'a').close()
@inlineCallbacks
def call_sickbeard(self, task):
"""
Call Sickbeard to post-process torrent.
Sickbeard post-processing API:
dir : Directory to be post-processed by Sickbeard
SINGLE-FILE torrent: download-complete-path
MULTI-FILE torrent: download-complete-path/name (see spec below)
nzbname : name of torrent to be post-processed by Sickbeard
quiet : Output type
1 : no HTML output
unset: HTML output by Sickbeard
type : Sickbeard type of post-processing
manual: Scheduled Post Processing (Processes files and dirs in TV_DOWNLOAD_DIR)
auto : Script Post Processing (Processes files in a specified directory. Supports single file torrent.)
force : Force already Post Processed Dir/Files (on | unset)
is_priority: Mark Dir/Files as priority download (on | unset)
(Replace the file, even if it already exists at higher quality)
failed : Mark download as failed (1 or 0)
method : copy, move, hardlink or symlink
Sickbeard uses the "nzbname" to look for a "resource" in the post-processing
directory(dir). A "resource" is either a single video file file, or a directory
with video files. This to support SINGLE-FILE and MULTI-FILE torrents.
During post-processing this "resource" is first looked up in the "history"
table to quickly associate the "resource" to a tv-show, seasion, episode. In case
no "resource" is available in the history, Sickbeard falls back to scan the
post-processing directory(dir) for files and directories containing video files
and use its default naming parse to extract tv-show, seasion, episode details
from the directory and vide file names it discovered. This is very convenient
in case we manually downloaded torrents and still want them to be post-processed
with Sickbeard.
Failed downloading uses a release name, stored in a separate history table in the
SINGLE-FILE torrent spec:
name: the filename. This is purely advisory. (string)
MULTI-FILE torrent spec:
name: the file path of the directory in which to store all the files.
This is purely advisory. (string)
Deluge follows the Torrent specifcation advisory.
Returns:
bool: True on success. False otherwise.
"""
try:
torrent = task.torrent
dir = TorrentInfo.get_saved_path(torrent)
name = TorrentInfo.get_display_name(torrent)
mode = TorrentInfo.get_mode(torrent)
id = TorrentInfo.get_id(torrent)
params = {}
params['dir'] = dir.encode('utf-8')
params['nzbName'] = name.encode('utf-8')
params['quiet'] = 1 if self.config["quiet"] else None
params['type'] = ProccessType.AUTOMATIC
params['process_method'] = self.config["method"] if self.config["method"] != ProccessMethod.SB_DEFAULT else None
params['force'] = "on" if self.config["force"] or task.force else None
params['is_priority'] = "on" if self.config["priority"] or task.priority else None
params['failed'] = 1 if task.failed else 0
base_url = self.get_base_url()
self.tlog.info("Contacting Sickbeard for post-processing")
self.tlog.info("Torrent(nzbname): %s" % name)
self.tlog.info("Using base URL : %s" % base_url)
self.tlog.info("Username : %s" % self.config['username'])
self.tlog.info("Request Type : %s" % params['type'])
self.tlog.info("Directory : %s" % params['dir'])
self.tlog.info("Mode : %s" % mode)
methodDisplay = self.config["method"] if self.config["method"] != ProccessMethod.SB_DEFAULT else "Sickbeard Default"
self.tlog.info("Method : %s" % methodDisplay)
self.tlog.info("Priority : %s" % self.config["priority"])
self.tlog.info("Failed : %s" % params['failed'])
self.tlog.info("Quiet : %s" % self.config["quiet"])
# Downloaded content directory/file must exist, even if it download actually failed, in order
# for Sickbeard to be able to process the torrent
self._ensure_saved_path(torrent);
client = WebClient(self.wlog)
result = yield client.get(base_url, args = params, username = self.config['username'], password = self.config['password'])
except Exception as e:
result = False
self.tlog.error('Exception occurred while processing torrent: ' + str(e))
for line in sb_err.format_exception():
self.tlog.error("%s" % line)
errors = 0
success = 0
if result:
self.tlog.info("%s bytes received:" % len(result))
for line in result.split('\n'):
self.tlog.info(" %s" % line)
if line:
for pattern in SickbeardWorker.ERRORS:
errors += line.count(pattern)
for pattern in SickbeardWorker.SUCCESS:
success += line.count(pattern)
succeeded = True if errors == 0 and success >= 1 else False
if succeeded:
self.tlog.info("Sickbeard post-processing torrent(%s) succeeded" % name)
else:
self.tlog.info("Sickbeard post-processing torrent(%s) failed" % name)
defer.returnValue(succeeded)
@staticmethod
def get_base_url(config = None):
if not config:
if SickbeardWorker.manager is None or \
SickbeardWorker.config is None or \
SickbeardWorker.tasklog is None:
raise TypeError("SickbeardWorker requires a valid manager, config and tasklog object.")
config = SickbeardWorker.config
proto = 'https' if config['ssl'] else 'http'
base_url = proto + '://' \
+ config['host' ] + ':' \
+ str(config['port']) \
+ SickbeardWorker._PATH_POSTPROCESS
return bytes(base_url)
class SickbeardWorkerQueue(WorkerQueue):
"""
Sickbeard post-processing queue with tasks. Creates SickbeardWorker instances
to perform the actual work.
Run Sickbeard post-processing in a controlled way making sure that both
the Deluge daemon as well as Sickbeard are not taking up all system
resources for post-processing. Espcially Deluge daemon's main priority
should remain downloading torrents.
"""
def __init__(self, num_workers, log_status = False, log_interval = WorkerQueue._LOG_INTERVAL):
self.log = getLogger('deluge.sickbeard.SickbeardWorkerQueue')
super(SickbeardWorkerQueue, self).__init__(SickbeardWorker, num_workers, log_status, log_interval)
def put(self, task):
if not isinstance(task, Task):
raise TypeError('Instance not of class Task')
task.status = TaskStatus.QUEUED
return super(SickbeardWorkerQueue, self).put(task)
def __contains__(self, task):
return task.id in self.get_all()
if __name__ == "__main__":
logging.getLogger('WebClient').setLevel(logging.INFO)
from log import getLogger, DispatchingFormatter, TaskFormatter, DefaultFormatter, TaskHandler
# Get custom logger
log = getLogger('deluge.sickbeard')
# Do not propagate to higher level handlers
log.propagate = False
log.setLevel(logging.DEBUG)
# File handler with dispatching formatter
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(DispatchingFormatter([ TaskFormatter(), DefaultFormatter() ]))
log.addHandler(handler)
# Task handler
handler = TaskHandler()
handler.setFormatter(TaskFormatter())
log.addHandler(handler)
log.info('Starting test')
tr = ProccessMethod.getTranslations()
print tr
class Manager:
def remove():
pass
SickbeardWorker.tasklog = Tasklog()
SickbeardWorker.manager = Manager()
SickbeardWorker.config = {
'ssl' : False,
'host' : 'localhost',
'port' : 8081,
'username' : 'admin',
'password' : 'admin',
'method' : ProccessMethod.DEFAULT,
'quiet' : True,
'force' : False,
'priority' : False,
'remove' : False,
'remove_data': False,
'workers' : 4
}
num_workers = SickbeardWorker.config['workers']
wqueue = SickbeardWorkerQueue(num_workers = num_workers, log_status = True, log_interval = 1)
n = 0
#reactor.callLater(0, wqueue.put, "some job" + str(n ++ 1))
#reactor.callLater(1, wqueue.put, "some job" + str(n ++ 1))
#reactor.callLater(random.random() * 10, wqueue.put, "some job" + str(n ++ 1))
#reactor.callLater(random.random() * 10, wqueue.put, "some job" + str(n ++ 1))
#reactor.callLater(random.random() * 10, wqueue.put, "some job" + str(n ++ 1))
#reactor.callLater(random.random() * 10, wqueue.put, "some job" + str(n ++ 1))
#reactor.callLater(random.random() * 10, wqueue.put, "some job" + str(n ++ 1))
#reactor.callLater(random.random() * 10, wqueue.put, "some job" + str(n ++ 1))
reactor.callLater(0, wqueue.put, Task(Torrent()))
reactor.run()
|
{
"content_hash": "b0039268bf103adbf29dd2b181e8dbe1",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 135,
"avg_line_length": 40.22704081632653,
"alnum_prop": 0.5921111040649375,
"repo_name": "srluge/DelugeSickbeardPlugin",
"id": "edea38170d7449f38d6866f6d9e04e78a4a842f8",
"size": "16850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sickbeard/sickbeard_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "416662"
},
{
"name": "Python",
"bytes": "116644"
}
],
"symlink_target": ""
}
|
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "PunisherCoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
{
"content_hash": "8616daa64c4e516f3b86a16450e119c6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.7112462006079028,
"repo_name": "P666Coin/P666",
"id": "85cddb263f3c939ca4a074aca61a8907d177aaa7",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32873"
},
{
"name": "C++",
"bytes": "2606276"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18284"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "102418"
},
{
"name": "NSIS",
"bytes": "6048"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69734"
},
{
"name": "QMake",
"bytes": "14730"
},
{
"name": "Shell",
"bytes": "13173"
}
],
"symlink_target": ""
}
|
import requests
import base64
import time
import json
URL = "http://localhost:8080"
num_users = 3
#URL = "http://daranalysis-200000.appspot.com"
#num_users = 10
def create_users():
url = URL + "/api/v1/submitted_user"
usrPass = "superuser:password"
b64Val = base64.b64encode(usrPass)
userIds = []
for u_idx in range(0, num_users):
form_params = {}
if u_idx == 1:
userId = "Admin"
form_params["identity"] = userId
form_params["type"] = "Admin"
else:
userId = "User-" + "-" + str(u_idx)
form_params["identity"] = userId
form_params["type"] = "User"
form_params["email"] = userId + "@crytocurrency.com"
form_params["password"] = "defaultPassword"
requests.post(url,
data=form_params,
headers={"Authorization": "Basic %s" % b64Val})
userIds.append(userId)
return userIds
def delete_users():
url = URL + "/api/v1/delete_users"
usrPass = "superuser:password"
b64Val = base64.b64encode(usrPass)
response = requests.delete(url,
headers={"Authorization": "Basic %s" % b64Val})
def main():
create_users()
#delete_users()
if __name__ == "__main__":
main()
|
{
"content_hash": "fa01c184bee9fd04c8fdbc0112fe213a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 78,
"avg_line_length": 27.76595744680851,
"alnum_prop": 0.5547892720306513,
"repo_name": "paddyvishnubhatt/cryptocurrency",
"id": "1779fc96d563cb50044bc2f2f7ddb481744fdf3f",
"size": "1305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "17908"
},
{
"name": "C",
"bytes": "6964"
},
{
"name": "C#",
"bytes": "37439"
},
{
"name": "C++",
"bytes": "11198"
},
{
"name": "CSS",
"bytes": "27673"
},
{
"name": "HTML",
"bytes": "50328"
},
{
"name": "Java",
"bytes": "546700"
},
{
"name": "JavaScript",
"bytes": "101799"
},
{
"name": "Objective-C",
"bytes": "572971"
},
{
"name": "Python",
"bytes": "2157013"
},
{
"name": "QML",
"bytes": "2765"
},
{
"name": "Shell",
"bytes": "4132"
}
],
"symlink_target": ""
}
|
"""
Yet Another Django Profiler management command tests
"""
from __future__ import unicode_literals
import platform
import re
import os
import sys
from tempfile import NamedTemporaryFile
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.six.moves import cStringIO as StringIO
from django.utils.text import force_text
import pytest
class ManagementCommandCases(object):
def test_call_graph(self):
"""Using "profile" without a parameter should yield a PDF call graph"""
f = NamedTemporaryFile(delete=False)
f.close()
output = self._run_command(path=f.name)
assert 'Wrote call graph to {}'.format(f.name) in output
assert os.path.getsize(f.name) > 4
with open(f.name, 'rb') as pdf:
assert pdf.read(4) == b'%PDF'
os.unlink(f.name)
def test_calls_by_count(self):
"""Using "-s calls" should show a table of function calls sorted by call count"""
output = self._run_command(sort='calls')
assert 'Ordered by: call count' in output
def test_calls_by_cumulative(self):
"""Using "-s cumulative" should show a table of function calls sorted by cumulative time"""
output = self._run_command(sort='cumulative')
assert 'Ordered by: cumulative time' in output
def test_calls_by_file_name(self):
"""Using "-s file" should show a table of function calls sorted by file name"""
output = self._run_command(sort='file')
assert 'Ordered by: file name' in output
def test_calls_by_function_name(self):
"""Using "-s name" should show a table of function calls sorted by function name"""
output = self._run_command(sort='name')
assert 'Ordered by: function name' in output
def test_calls_by_function_name_file_and_line(self):
"""Using "-s nfl" should show a table of function calls sorted by function name, file, and line"""
output = self._run_command(sort='nfl')
assert 'Ordered by: name/file/line' in output
def test_calls_by_line_number(self):
"""Using "-s line" should show a table of function calls sorted by line_number"""
output = self._run_command(sort='line')
assert 'Ordered by: line number' in output
def test_calls_by_module(self):
"""Using "-s module" should show a table of function calls sorted by file name"""
output = self._run_command(sort='module')
assert 'Ordered by: file name' in output
def test_calls_by_primitive_call_count(self):
"""Using "-s pcalls" should show a table of function calls sorted by primitive call count"""
output = self._run_command(sort='pcalls')
assert re.search(r'Ordered by: (primitive )?call count', force_text(output, 'utf-8'))
def test_calls_by_stdname(self):
"""Using "-s stdname" should show a table of function calls sorted by standard name"""
output = self._run_command(sort='stdname')
assert 'Ordered by: standard name' in output
def test_calls_by_time(self):
"""Using "-s time" should show a table of function calls sorted by internal time"""
output = self._run_command(sort='time')
assert 'Ordered by: internal time' in output
def test_default_fraction(self):
"""By default, the fraction of displayed function calls should be 0.2"""
output = self._run_command(sort='time')
assert 'due to restriction <0.2>' in output
def test_custom_fraction(self):
"""It should be possible to specify the fraction of displayed function calls"""
output = self._run_command(sort='time', fraction='0.3')
assert 'due to restriction <0.3>' in output
def test_max_calls(self):
"""It should be possible to specify the maximum number of displayed function calls"""
output = self._run_command(sort='time', max_calls='5')
assert 'to 5 due to restriction <5>' in output
def test_pattern(self):
"""It should be possible to specify a regular expression filter pattern"""
output = self._run_command(sort='time', pattern='test')
assert re.search(r"due to restriction <u?'test'>", force_text(output, 'utf-8'))
def _run_command(self, **options):
"""Run the profile command with the given options on the diffsettings command and capture the output"""
output = StringIO()
options = options.copy()
options['backend'] = 'cProfile'
options['testing'] = True
for option in ('fraction', 'max_calls', 'path', 'pattern', 'sort'):
if option not in options:
options[option] = None
call_command('profile', 'diffsettings', stdout=output, **options)
text = output.getvalue()
assert 'INSTALLED_APPS' in text
return text
@override_settings(YADP_ENABLED=True)
class CProfileCommandTest(TestCase, ManagementCommandCases):
"""Management command tests using cProfile"""
def test_backend(self):
"""The cProfile profiling backend should be used"""
from yet_another_django_profiler.conf import settings
assert settings.YADP_PROFILER_BACKEND == 'cProfile'
@pytest.mark.skipif(platform.python_implementation() != 'CPython' or sys.version_info[:2] == (3, 2),
reason='yappi does not yet work in this Python implementation')
@override_settings(YADP_ENABLED=True, YADP_PROFILER_BACKEND='yappi')
class YappiCommandTest(TestCase, ManagementCommandCases):
"""Management command tests using Yappi instead of cProfile"""
def test_backend(self):
"""The Yappi profiling backend should be used"""
from yet_another_django_profiler.conf import settings
assert settings.YADP_PROFILER_BACKEND == 'yappi'
|
{
"content_hash": "668cc5734b96a7fe99ae2713e2b02c7d",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 111,
"avg_line_length": 42.12230215827338,
"alnum_prop": 0.6618274978650726,
"repo_name": "AlexandreProenca/yet-another-django-profiler",
"id": "a8d4b9c839cf2ff0616b7b7450ad4208e2bdad6d",
"size": "6133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yet_another_django_profiler/tests/test_management_command.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "145307"
}
],
"symlink_target": ""
}
|
class GameEvent:
GAME_EVENT_DEFAULT = 1
MOVE_UP = 2
MOVE_DOWN = 3
MOVE_LEFT = 4
MOVE_RIGHT = 5
def __init__(self, eventType):
self.eventType = eventType
def setEventData(self, data):
self.eventData = data
|
{
"content_hash": "3efdf83093dc68f9f7533a8cde441941",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 34,
"avg_line_length": 19.307692307692307,
"alnum_prop": 0.5896414342629482,
"repo_name": "eott/procgen",
"id": "f4e50ea767d0a0beba02789730137579cbdc1863",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GameEvent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13892"
}
],
"symlink_target": ""
}
|
"""
IPIF (IP IC) interface is simple bus interface used as a service bus in FPGA designs
* https://www.xilinx.com/support/documentation/ip_documentation/axi_lite_ipif/v3_0/pg155-axi-lite-ipif.pdf
"""
|
{
"content_hash": "322e2ca80a23756f998527e1c858a95b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 106,
"avg_line_length": 40,
"alnum_prop": 0.76,
"repo_name": "Nic30/hwtLib",
"id": "217e5d3a4f396b4f2badbe0083f8f41a2a384738",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/xilinx/ipif/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
}
|
"""
WSGI config for tnp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tnp.settings")
application = get_wsgi_application()
|
{
"content_hash": "88603dd92d844b99553bb19211b3a98c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24,
"alnum_prop": 0.765625,
"repo_name": "aakashrana1995/svnit-tnp",
"id": "758906899c6cde02e2c960379acc6cf699ae8500",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tnp/tnp/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45609"
},
{
"name": "HTML",
"bytes": "111453"
},
{
"name": "JavaScript",
"bytes": "68394"
},
{
"name": "Python",
"bytes": "112993"
}
],
"symlink_target": ""
}
|
from ._formats import (Bowtie2IndexFileFormat, Bowtie2IndexDirFmt)
from ._types import Bowtie2Index
__all__ = ['Bowtie2IndexFileFormat', 'Bowtie2IndexDirFmt', 'Bowtie2Index']
|
{
"content_hash": "ec936a9a26426de8b4cd5c315f4da6ad",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 74,
"avg_line_length": 35.4,
"alnum_prop": 0.7853107344632768,
"repo_name": "qiime2/q2-types",
"id": "ffdff72f961946ca59f24019ea0ef357d948ff25",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "q2_types/bowtie2/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "269"
},
{
"name": "Python",
"bytes": "418135"
},
{
"name": "TeX",
"bytes": "1121"
}
],
"symlink_target": ""
}
|
from ..models import Experiment, Component
from toposort import toposort_flatten
from ..ml_models import Classifier
from rest_framework import viewsets
from django.http import HttpResponse
from collections import Counter
# TODO: [refactor] this import statement should specify needed file instead of '*'
from pandas import *
import threading
import json
CACHE = {}
class myThread(threading.Thread):
def __init__(self, thread_id, name, experiment, component_id, max_results, cache_results):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.experiment = experiment
self.comp_id = component_id
self.result = {}
self.max_results = max_results
self.cache_results = cache_results
def run(self):
print "Run called for thread name", self.name, "End component", self.comp_id
exp = Experiment.objects.get(pk=self.experiment)
graph = exp.workflow.graph_data
graph_data = {}
print graph
status = "success"
err_msg = ""
tmp = graph.split(',')
for elem in tmp:
node = elem.split(":")
if len(node) > 1:
first_node = node[0]
second_node = node[1]
else:
first_node = node[0]
second_node = ''
if second_node in graph_data:
depend_nodes = graph_data[second_node]
depend_nodes.add(first_node)
else:
graph_data[second_node] = set()
graph_data[second_node].add(first_node)
topological_graph = toposort_flatten(graph_data)
print "Graph after topological sort", topological_graph
if self.experiment in CACHE:
input_data = CACHE[self.experiment]
else:
input_data = DataFrame
feature_names = None
feature_types = None
output_data = None
for data in topological_graph:
component_id = int(data)
comp = Component.objects.get(pk=component_id)
print "Component_id", component_id, " ", comp.operation_type
op = comp.operation_type
if op.function_type == 'Create':
if op.function_arg == 'Table':
if op.function_subtype == 'Input':
filename = op.function_subtype_arg
input_data = read_csv(filename)
feature_names = input_data.columns
# TODO: [refactor] elif?
if op.function_arg == 'Row':
if op.function_subtype == 'Row':
row_values = json.loads(op.function_subtype_arg)
input_data.loc[len(input_data) + 1] = row_values
if op.function_arg == 'Model':
if op.function_subtype == 'Train-Test':
params = json.loads(op.function_subtype_arg)
train_data_percentage = int(params["train_data_percentage"])
target_column = int(params["target_column"])
model_type = op.function_arg_id
print model_type, train_data_percentage, target_column
target_feature = feature_names[target_column]
try:
actual_target_column = input_data.columns.get_loc(target_feature)
input_feature_columns = range(len(input_data.columns))
input_feature_columns.remove(actual_target_column)
input_features = input_data.columns[input_feature_columns]
classifier = Classifier(
input_data, model_type, train_data_percentage,
input_features, target_feature)
output_data = classifier.learn()
except ValueError as e:
status = "failure"
err_msg = " Invalid input for the model training"
except KeyError as e:
status = "failure"
err_msg = target_feature + " column is not available for Model Training"
# TODO: [refactor] elif?
if op.function_type == 'Update':
if op.function_arg == 'Table':
if op.function_subtype == 'Metadata':
feature_types = json.loads(op.function_subtype_arg)
print "Feature Names", feature_names, " Feature_types ", feature_types
if op.function_arg == 'Column':
if op.function_subtype == 'Add':
constant_value = float(op.function_subtype_arg)
column_id = float(op.function_arg_id)
column_name = feature_names[column_id]
if column_name not in input_data:
#print "Column name ", column_name, " not present. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = column_name + " column is not available for current operation"
elif input_data[column_name].dtype == 'object':
#print "Column name ", column_name, " is not integer/float. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = " Invalid input in column "+ column_name+ " for the current operation"
else:
input_data[column_name] += constant_value
if op.function_subtype == 'Sub':
constant_value = float(op.function_subtype_arg)
column_id = float(op.function_arg_id)
column_name = feature_names[column_id]
if column_name not in input_data:
#print "Column name ", column_name, " not present. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = column_name + " column is not available for current operation"
elif input_data[column_name].dtype == 'object':
#print "Column name ", column_name, " is not integer/float. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = " Invalid input in column "+ column_name+ " for the current operation"
else:
input_data[column_name] -= constant_value
if op.function_subtype == 'Mult':
constant_value = float(op.function_subtype_arg)
column_id = float(op.function_arg_id)
column_name = feature_names[column_id]
if column_name not in input_data:
#print "Column name ", column_name, " not present. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = column_name + " column is not available for current operation"
elif input_data[column_name].dtype == 'object':
#print "Column name ", column_name, " is not integer/float. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = " Invalid input in column "+ column_name+ " for the current operation"
else:
input_data[column_name] *= constant_value
if op.function_subtype == 'Div':
constant_value = float(op.function_subtype_arg)
column_id = float(op.function_arg_id)
column_name = feature_names[column_id]
if column_name not in input_data:
#print "Column name ", column_name, " not present. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = column_name + " column is not available for current operation"
elif input_data[column_name].dtype == 'object':
#print "Column name ", column_name, " is not integer/float. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = " Invalid input in column "+ column_name+ " for the current operation"
else:
input_data[column_name] /= constant_value
if op.function_subtype == 'Normalize':
column_id = float(op.function_arg_id)
column_name = feature_names[column_id]
sum_array = input_data.sum(axis=0)
if column_name not in sum_array:
#print "Column name ", column_name, " not present. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = column_name + " column is not available for current operation"
else:
normalization_value = sum_array[column_name]
input_data[column_name] = input_data[column_name] / normalization_value
# TODO: [refactor] elif?
if op.function_type == 'Filter':
if op.function_arg == 'Table':
if op.function_subtype == 'Project':
column_id_list = json.loads(op.function_arg_id)
excluded_columns = range(len(feature_names))
for elem in column_id_list: # Bug: Calling Projection twice will break indexing logic
excluded_columns.remove(elem)
excluded_columns = [x for x in excluded_columns if feature_names[x] in input_data]
print "Excluded columns ", excluded_columns
if excluded_columns:
input_data = input_data.drop(feature_names[excluded_columns], axis=1)
if op.function_subtype == 'RemoveDup':
column_id_list = json.loads(op.function_arg_id)
column_name_list = []
for elem in column_id_list:
column_name = feature_names[elem]
if column_name not in input_data:
#print "Column name ", column_name, " not present. Skipping"
#continue # throw error in module status
status = "failure"
err_msg = column_name + " column is not available for current operation"
else:
column_name_list.append(column_name)
if column_name_list:
input_data = input_data.drop_duplicates(subset=column_name_list)
if op.function_subtype == 'RemoveMissing':
if op.function_subtype_arg == 'Replace_mean':
input_data = input_data.fillna(input_data.mean().round(2))
if op.function_subtype_arg == 'Replace_median':
input_data = input_data.fillna(input_data.median().round(2))
if op.function_subtype_arg == 'Replace_mode':
input_data = input_data.fillna(input_data.mode())
if op.function_subtype_arg == 'Drop_row':
input_data = input_data.dropna(axis=0)
if component_id == self.comp_id:
print "End component reached"
self.result["feature_names"] = list(input_data.columns)
if feature_types is not None:
self.result["feature_types"] = feature_types
# self.result["data"] = input_data[:self.max_results].to_json()
self.result["data"] = []
result_length = min(len(input_data), self.max_results)
for i in range(result_length):
tmp = []
for col in input_data.columns:
if json.dumps(input_data[col][i]) == 'NaN':
tmp.append('')
else:
tmp.append(input_data[col][i])
self.result["data"].append(tmp)
self.result["graph_data"] = []
for name in list(input_data.columns):
top_uniques = Counter(list(input_data[name])).most_common(4)
col_names = []
unique_count = []
for val in top_uniques:
if json.dumps(val[0]) == 'NaN':
continue
col_names.append(val[0])
unique_count.append(val[1])
tmp = [col_names, unique_count]
self.result["graph_data"].append(tmp)
if output_data is not None:
self.result["output"] = output_data
self.result["status"] = status
self.result["message"] = err_msg
self.result["missing_values"] = list(input_data.isnull().sum().values)
mean = input_data.mean().round(2)
median = input_data.median().round(2)
self.result["mean"] = []
self.result["median"] = []
for elem in input_data.columns:
if elem in mean:
self.result["mean"].append(mean[elem])
else:
self.result["mean"].append('')
if elem in median:
self.result["median"].append(median[elem])
else:
self.result["median"].append('')
self.result["unique_values"] = []
for elem in input_data.columns:
self.result["unique_values"].append(input_data[elem].nunique())
self.result["min"] = []
self.result["max"] = []
self.result["std"] = []
self.result["25_quartile"] = []
self.result["50_quartile"] = []
self.result["75_quartile"] = []
metric_val = input_data.describe()
for elem in input_data.columns:
if elem in metric_val:
val = metric_val[elem].round(2)
self.result["min"].append(val["min"])
self.result["max"].append(val["max"])
self.result["std"].append(val["std"])
self.result["25_quartile"].append(val["25%"])
self.result["50_quartile"].append(val["50%"])
self.result["75_quartile"].append(val["75%"])
else:
self.result["min"].append('')
self.result["max"].append('')
self.result["std"].append('')
self.result["25_quartile"].append('')
self.result["50_quartile"].append('')
self.result["75_quartile"].append('')
self.result["total_rows"] = input_data.shape[0]
self.result["total_columns"] = input_data.shape[1]
if self.cache_results is True:
CACHE[self.experiment] = input_data
#print self.result
print self.result["status"]
print self.result["message"]
break
class ResultViewSet(viewsets.ViewSet):
def list(self, request):
exp_id = int(request.GET.get('experiment', ''))
component_id = int(request.GET.get('component_id', ''))
print "Experiment ", exp_id
thread = myThread(1, "WorkFlow Thread", exp_id, component_id, 10, False)
thread.start()
thread.join()
return HttpResponse(json.dumps(thread.result), content_type="application/json")
|
{
"content_hash": "664fa35372402f24da6318671376a2db",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 110,
"avg_line_length": 50.877245508982035,
"alnum_prop": 0.47719649267345376,
"repo_name": "CiscoSystems/cognitive",
"id": "dbda4f2a5619248667e2255aa86161414772ff12",
"size": "17569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cognitive/app/api/results_local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4648"
},
{
"name": "HTML",
"bytes": "36002"
},
{
"name": "JavaScript",
"bytes": "52668"
},
{
"name": "Python",
"bytes": "78910"
}
],
"symlink_target": ""
}
|
def grade(arg, key):
if "backwards_text_rox" in key:
return True, "!seod ti seY"
else:
return False, "Incorrect"
def get_hints():
return [("10", "Need a mirror??"), ("10", "String Reversal")]
|
{
"content_hash": "a040a9c461ef77844d9c3efc46ad4eaa",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 64,
"avg_line_length": 27.5,
"alnum_prop": 0.5727272727272728,
"repo_name": "mcpa-stlouis/mcpa-ctf",
"id": "b3f6d64f49e937588eebd3abdc487c0a94420883",
"size": "220",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/graders/crypto/sdrawkcab/grader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5507"
},
{
"name": "CoffeeScript",
"bytes": "47157"
},
{
"name": "HTML",
"bytes": "84391"
},
{
"name": "Python",
"bytes": "211367"
},
{
"name": "Shell",
"bytes": "2347"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('historias', '0006_auto_20150413_0001'),
]
operations = [
migrations.CreateModel(
name='Evoluciones',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('modificado', models.DateTimeField(auto_now=True)),
('creado', models.DateTimeField(auto_now_add=True)),
('fecha', models.DateField()),
('descripcion', models.TextField()),
('historia', models.ForeignKey(to='historias.Historias')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
{
"content_hash": "adb7b91ec6eb46b9d22efeeb092f88e8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 114,
"avg_line_length": 31.178571428571427,
"alnum_prop": 0.5372279495990836,
"repo_name": "btenaglia/hpc-historias-clinicas",
"id": "ae8f13fb8b9aa33193047179afe35986cb2b50fd",
"size": "897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hpc-historias-clinicas/evoluciones/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "231102"
},
{
"name": "HTML",
"bytes": "148185"
},
{
"name": "JavaScript",
"bytes": "570412"
},
{
"name": "Python",
"bytes": "243694"
}
],
"symlink_target": ""
}
|
"""High level ssh library.
Usage examples:
Execute command and get output:
ssh = sshclient.SSH("root", "example.com", port=33)
status, stdout, stderr = ssh.execute("ps ax")
if status:
raise Exception("Command failed with non-zero status.")
print stdout.splitlines()
Execute command with huge output:
class PseudoFile(object):
def write(chunk):
if "error" in chunk:
email_admin(chunk)
ssh = sshclient.SSH("root", "example.com")
ssh.run("tail -f /var/log/syslog", stdout=PseudoFile(), timeout=False)
Execute local script on remote side:
ssh = sshclient.SSH("user", "example.com")
status, out, err = ssh.execute("/bin/sh -s arg1 arg2",
stdin=open("~/myscript.sh", "r"))
Upload file:
ssh = sshclient.SSH("user", "example.com")
ssh.run("cat > ~/upload/file.gz", stdin=open("/store/file.gz", "rb"))
Eventlet:
eventlet.monkey_patch(select=True, time=True)
or
eventlet.monkey_patch()
or
sshclient = eventlet.import_patched("opentstack.common.sshclient")
"""
import os
import six
import time
import select
import socket
import eventlet
import paramiko
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SSHError(Exception):
pass
class SSHTimeout(SSHError):
pass
class SSH(object):
"""Represent ssh connection."""
def __init__(self, user, host, port=22, pkey=None,
key_filename=None, password=None):
"""Initialize SSH client.
:param user: ssh username
:param host: hostname or ip address of remote ssh server
:param port: remote ssh port
:param pkey: RSA or DSS private key string or file object
:param key_filename: private key filename
:param password: password
"""
self.user = user
self.host = host
self.port = port
self.pkey = self._get_pkey(pkey) if pkey else None
self.password = password
self.key_filename = key_filename
self._client = False
def _get_pkey(self, key):
if isinstance(key, six.string_types):
key = six.moves.StringIO(key)
errors = []
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
try:
return key_class.from_private_key(key)
except paramiko.SSHException as e:
errors.append(e)
raise SSHError("Invalid pkey: %s" % (errors))
def _get_client(self):
if self._client:
return self._client
try:
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._client.connect(self.host, username=self.user,
port=self.port, pkey=self.pkey,
key_filename=self.key_filename,
password=self.password, timeout=30)
return self._client
except Exception as e:
message = ("Exception %(exception_type)s was raised "
"during connect to %(user)s@%(host)s:%(port)s. "
"Exception value is: %(exception)r")
self._client = False
raise SSHError(message % {"exception": e,
"user": self.user,
"host": self.host,
"port": self.port,
"exception_type": type(e)})
def close(self):
if self._client:
self._client.close()
self._client = False
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
"""Execute specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file or string to pass to stdin.
:param stdout: Open file to connect to stdout.
:param stderr: Open file to connect to stderr.
:param raise_on_error: If False then exit code will be return. If True
then exception will be raized if non-zero code.
:param timeout: Timeout in seconds for command execution.
Default 1 hour. No timeout if set to 0.
"""
client = self._get_client()
if isinstance(stdin, six.string_types):
stdin = six.moves.StringIO(stdin)
return self._run(client, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, raise_on_error=raise_on_error,
timeout=timeout)
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(six.moves.shlex_quote(str(p)) for p in cmd)
LOG.debug('Running cmd (subprocess): %s', cmd)
transport = client.get_transport()
session = transport.open_session()
session.exec_command(cmd)
start_time = time.time()
data_to_send = ""
stderr_data = None
# If we have data to be sent to stdin then `select' should also
# check for stdin availability.
if stdin and not stdin.closed:
writes = [session]
else:
writes = []
while True:
# Block until data can be read/write.
r, w, e = select.select([session], writes, [session], 1)
if session.recv_ready():
data = session.recv(4096)
LOG.debug("stdout: %r" % data)
if stdout is not None:
stdout.write(data)
continue
if session.recv_stderr_ready():
stderr_data = session.recv_stderr(4096)
LOG.debug("stderr: %r" % stderr_data)
if stderr is not None:
stderr.write(stderr_data)
continue
if session.send_ready():
if stdin is not None and not stdin.closed:
if not data_to_send:
data_to_send = stdin.read(4096)
if not data_to_send:
stdin.close()
session.shutdown_write()
writes = []
continue
sent_bytes = session.send(data_to_send)
LOG.debug("sent: %s" % data_to_send[:sent_bytes])
data_to_send = data_to_send[sent_bytes:]
if session.exit_status_ready():
break
if timeout and (time.time() - timeout) > start_time:
args = {"cmd": cmd, "host": self.host}
raise SSHTimeout(("Timeout executing command "
"'%(cmd)s' on host %(host)s") % args)
if e:
raise SSHError("Socket error.")
exit_status = session.recv_exit_status()
if 0 != exit_status and raise_on_error:
fmt = ("Command '%(cmd)s' failed with exit_status %(status)d.")
details = fmt % {"cmd": cmd, "status": exit_status}
if stderr_data:
details += (" Last stderr data: '%s'.") % stderr_data
raise SSHError(details)
return exit_status
def execute(self, cmd, stdin=None, timeout=3600):
"""Execute the specified command on the server.
:param cmd: Command to be executed, can be a list.
:param stdin: Open file to be sent on process stdin.
:param timeout: Timeout for execution of the command.
:returns: tuple (exit_status, stdout, stderr)
"""
stdout = six.moves.StringIO()
stderr = six.moves.StringIO()
exit_status = self.run(cmd, stderr=stderr,
stdout=stdout, stdin=stdin,
timeout=timeout, raise_on_error=False)
stdout.seek(0)
stderr.seek(0)
return (exit_status, stdout.read(), stderr.read())
def wait(self, timeout=120, interval=1):
"""Wait for the host will be available via ssh."""
start_time = time.time()
while True:
try:
return self.execute("uname")
except (socket.error, SSHError) as e:
LOG.debug("Ssh is still unavailable: %r" % e)
eventlet.greenthread.sleep(interval)
if time.time() > (start_time + timeout):
raise SSHTimeout(("Timeout waiting for '%s'") % self.host)
def _put_file_sftp(self, localpath, remotepath, mode=None):
client = self._get_client()
sftp = client.open_sftp()
sftp.put(localpath, remotepath)
if mode is None:
mode = 0o777 & os.stat(localpath).st_mode
sftp.chmod(remotepath, mode)
def _put_file_shell(self, localpath, remotepath, mode=None):
cmd = ["cat > %s" % remotepath]
if mode is not None:
cmd.append("chmod 0%o %s" % (mode, remotepath))
with open(localpath, "rb") as localfile:
cmd = "; ".join(cmd)
self.run(cmd, stdin=localfile)
def put_file(self, localpath, remotepath, mode=None):
"""Copy specified local file to the server.
:param localpath: Local filename.
:param remotepath: Remote filename.
:param mode: Permissions to set after upload
"""
try:
self._put_file_sftp(localpath, remotepath, mode=mode)
except paramiko.SSHException:
self._put_file_shell(localpath, remotepath, mode=mode)
|
{
"content_hash": "672ac5c112ebec45ed667b773c182c2d",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 79,
"avg_line_length": 34.92170818505338,
"alnum_prop": 0.5403036787934373,
"repo_name": "HybridF5/hybrid-jacket",
"id": "b430e7d0d67fc0d6458ff3e9143551b600c19ac7",
"size": "10444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder_jacket/volume/drivers/jacket/vcloud/sshclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "732194"
},
{
"name": "Shell",
"bytes": "377"
}
],
"symlink_target": ""
}
|
from core.domain import collection_services
from core.domain import exp_services
from core.domain import rating_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
gae_search_services = models.Registry.import_search_services()
class SearchServicesUnitTests(test_utils.GenericTestBase):
"""Test the search services module."""
EXP_ID = 'An_exploration_id'
COLLECTION_ID = 'A_collection_id'
def setUp(self):
super(SearchServicesUnitTests, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
user_services.create_new_user(self.owner_id, self.OWNER_EMAIL)
user_services.create_new_user(self.editor_id, self.EDITOR_EMAIL)
user_services.create_new_user(self.viewer_id, self.VIEWER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.set_admins([self.ADMIN_USERNAME])
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
def test_get_search_rank(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp_summary = exp_services.get_exploration_summary_by_id(self.EXP_ID)
base_search_rank = 20
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
rating_services.assign_rating_to_exploration(
self.owner_id, self.EXP_ID, 5)
exp_summary = exp_services.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank + 10)
rating_services.assign_rating_to_exploration(
self.user_id_admin, self.EXP_ID, 2)
exp_summary = exp_services.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank + 8)
def test_search_ranks_cannot_be_negative(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp_summary = exp_services.get_exploration_summary_by_id(self.EXP_ID)
base_search_rank = 20
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
# A user can (down-)rate an exploration at most once.
for i in xrange(50):
rating_services.assign_rating_to_exploration(
'user_id_1', self.EXP_ID, 1)
exp_summary = exp_services.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank - 5)
for i in xrange(50):
rating_services.assign_rating_to_exploration(
'user_id_%s' % i, self.EXP_ID, 1)
# The rank will be at least 0.
exp_summary = exp_services.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(search_services.get_search_rank_from_exp_summary(
exp_summary), 0)
def test_search_explorations(self):
expected_query_string = 'a query string'
expected_cursor = 'cursor'
expected_sort = 'title'
expected_limit = 30
expected_result_cursor = 'rcursor'
doc_ids = ['id1', 'id2']
def mock_search(query_string, index, cursor=None, limit=20, sort='',
ids_only=False, retries=3):
self.assertEqual(query_string, expected_query_string)
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
self.assertEqual(cursor, expected_cursor)
self.assertEqual(limit, expected_limit)
self.assertEqual(sort, expected_sort)
self.assertEqual(ids_only, True)
self.assertEqual(retries, 3)
return doc_ids, expected_result_cursor
with self.swap(gae_search_services, 'search', mock_search):
result, cursor = search_services.search_explorations(
expected_query_string,
expected_limit,
sort=expected_sort,
cursor=expected_cursor,
)
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)
def test_patch_exploration_search_document(self):
def mock_get_doc(doc_id, index):
self.assertEqual(doc_id, self.EXP_ID)
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
return {'a': 'b', 'c': 'd'}
def mock_add_docs(docs, index):
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
self.assertEqual(docs, [{'a': 'b', 'c': 'e', 'f': 'g'}])
get_doc_swap = self.swap(
gae_search_services, 'get_document_from_index', mock_get_doc)
add_docs_counter = test_utils.CallCounter(mock_add_docs)
add_docs_swap = self.swap(
gae_search_services, 'add_documents_to_index', add_docs_counter)
with get_doc_swap, add_docs_swap:
patch = {'c': 'e', 'f': 'g'}
search_services.patch_exploration_search_document(
self.EXP_ID, patch)
self.assertEqual(add_docs_counter.times_called, 1)
def test_search_collections(self):
expected_query_string = 'a query string'
expected_cursor = 'cursor'
expected_sort = 'title'
expected_limit = 30
expected_result_cursor = 'rcursor'
doc_ids = ['id1', 'id2']
def mock_search(query_string, index, cursor=None, limit=20, sort='',
ids_only=False, retries=3):
self.assertEqual(query_string, expected_query_string)
self.assertEqual(
index, collection_services.SEARCH_INDEX_COLLECTIONS)
self.assertEqual(cursor, expected_cursor)
self.assertEqual(limit, expected_limit)
self.assertEqual(sort, expected_sort)
self.assertEqual(ids_only, True)
self.assertEqual(retries, 3)
return doc_ids, expected_result_cursor
with self.swap(gae_search_services, 'search', mock_search):
result, cursor = search_services.search_collections(
expected_query_string,
expected_limit,
sort=expected_sort,
cursor=expected_cursor,
)
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)
def test_patch_collection_search_document(self):
def mock_get_doc(doc_id, index):
self.assertEqual(doc_id, self.COLLECTION_ID)
self.assertEqual(
index, search_services.SEARCH_INDEX_COLLECTIONS)
return {'a': 'b', 'c': 'd'}
def mock_add_docs(docs, index):
self.assertEqual(
index, search_services.SEARCH_INDEX_COLLECTIONS)
self.assertEqual(docs, [{'a': 'b', 'c': 'e', 'f': 'g'}])
get_doc_swap = self.swap(
gae_search_services, 'get_document_from_index', mock_get_doc)
add_docs_counter = test_utils.CallCounter(mock_add_docs)
add_docs_swap = self.swap(
gae_search_services, 'add_documents_to_index', add_docs_counter)
with get_doc_swap, add_docs_swap:
patch = {'c': 'e', 'f': 'g'}
search_services.patch_collection_search_document(
self.COLLECTION_ID, patch)
self.assertEqual(add_docs_counter.times_called, 1)
def test_update_private_collection_status_in_search(self):
def mock_delete_docs(ids, index):
self.assertEqual(ids, [self.COLLECTION_ID])
self.assertEqual(
index, search_services.SEARCH_INDEX_COLLECTIONS)
def mock_get_rights(unused_collection_id):
return rights_manager.ActivityRights(
self.COLLECTION_ID,
[self.owner_id], [self.editor_id], [self.viewer_id],
status=rights_manager.ACTIVITY_STATUS_PRIVATE
)
delete_docs_counter = test_utils.CallCounter(mock_delete_docs)
delete_docs_swap = self.swap(
gae_search_services, 'delete_documents_from_index',
delete_docs_counter)
get_rights_swap = self.swap(
rights_manager, 'get_collection_rights', mock_get_rights)
with get_rights_swap, delete_docs_swap:
search_services.update_collection_status_in_search(
self.COLLECTION_ID)
self.assertEqual(delete_docs_counter.times_called, 1)
def test_demo_collections_are_added_to_search_index(self):
results = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(results, [])
collection_services.load_demo('0')
results = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(results, ['0'])
def test_demo_explorations_are_added_to_search_index(self):
results, _ = search_services.search_explorations('Welcome', 2)
self.assertEqual(results, [])
exp_services.load_demo('0')
results, _ = search_services.search_explorations('Welcome', 2)
self.assertEqual(results, ['0'])
def test_update_private_exploration_status_in_search(self):
def mock_delete_docs(ids, index):
self.assertEqual(ids, [self.EXP_ID])
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
def mock_get_rights(unused_exp_id):
return rights_manager.ActivityRights(
self.EXP_ID,
[self.owner_id], [self.editor_id], [self.viewer_id],
status=rights_manager.ACTIVITY_STATUS_PRIVATE
)
delete_docs_counter = test_utils.CallCounter(mock_delete_docs)
delete_docs_swap = self.swap(
gae_search_services, 'delete_documents_from_index',
delete_docs_counter)
get_rights_swap = self.swap(
rights_manager, 'get_exploration_rights', mock_get_rights)
with get_rights_swap, delete_docs_swap:
search_services.update_exploration_status_in_search(self.EXP_ID)
self.assertEqual(delete_docs_counter.times_called, 1)
|
{
"content_hash": "4c8242a7bbc23199bbc4bab5b1debc4c",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 78,
"avg_line_length": 39.56115107913669,
"alnum_prop": 0.6222040370976542,
"repo_name": "himanshu-dixit/oppia",
"id": "34c5f88da09a8f3a19e6a701200a8d0d5a530359",
"size": "11621",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "core/domain/search_services_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "101439"
},
{
"name": "HTML",
"bytes": "899603"
},
{
"name": "JavaScript",
"bytes": "2950299"
},
{
"name": "Python",
"bytes": "3818679"
},
{
"name": "Shell",
"bytes": "47818"
}
],
"symlink_target": ""
}
|
"""
This is MIDAS' lightweight ORM
"""
import sqlite3
from helpers.utilities import to_ascii
class TyORM():
"""
This is Tripyarn's lightweight ORM class
"""
def __init__(self, filename):
self.conn = sqlite3.connect(filename)
self.cursor = self.conn.cursor()
def __del__(self):
self.conn.close()
def commit(self):
"""commit is a simple wrapper around self.conn.commit()"""
self.conn.commit()
def raw_sql(self, sql, params=None):
"""raw_sql executes raw SQL provided to it in the 'sql' parameter"""
if params:
self.cursor.execute(sql, params)
else:
self.cursor.execute(sql)
fetchall = self.cursor.fetchall()
self.commit()
return fetchall
###########################################################################
# Create / alter table methods
###########################################################################
def parse_attr(self, attr):
"""parse_attr parses table attributes"""
i = attr.keys()[0]
sql_col = "\"%s\" %s" % (i, attr[i]['type'])
try:
if attr[i]["default"]:
sql_col += " DEFAULT %s" % attr[i]["default"]
except KeyError:
pass
try:
if not attr[i]["nullable"]:
sql_col += " NOT NULL"
except KeyError:
pass
try:
if attr[i]["attrs"]:
sql_col += " %s" % attr[i]["attrs"]
except KeyError:
pass
try:
if attr[i]["primary_key"]:
sql_col += " PRIMARY KEY"
except KeyError:
pass
return sql_col
def create_table(self, table_name, attrs):
"""create_table create a table defined by a supplied table name and
table attributes"""
sql = "CREATE TABLE IF NOT EXISTS \"%s\"(\n\t" % table_name
sql += "\"id\" integer PRIMARY KEY,\n\t"
for attr in attrs:
i = {attr : attrs[attr]}
sql += self.parse_attr(i)
sql += ",\n\t"
sql = sql.strip(",\n\t")
sql += "\n);"
self.raw_sql(sql)
def alter_table(self, table_name, attrs):
"""alter_table alters a given table based on a supplied table name and
potentially updated table attributes"""
sql = "PRAGMA table_info(\"%s\")" % table_name
table_info = self.raw_sql(sql)
db_cols = []
new_cols = attrs.keys()
for each in table_info:
db_cols.append(each[1])
alter_cols = list(set(new_cols) - set(db_cols))
sql = "ALTER TABLE \"%s\" ADD COLUMN " % table_name
new_attrs = {}
for i in attrs:
if i in alter_cols:
new_attrs[i] = attrs[i]
alter_sql = "%s%s%s" % (sql, self.parse_attr(new_attrs), ";")
self.raw_sql(alter_sql)
new_attrs = {}
def create_index(self, indexes):
"""create_index creates a supplied index on a given table"""
for index in indexes:
sql = "CREATE INDEX IF NOT EXISTS %s;" % index
index = self.raw_sql(sql)
def initialize_table(self, table_name, attrs, indexes=None):
"""initialize_table creates the table if it doesn't exist, alters the
table if it's definition is different and creates any indexes that it
needs to if they don't already exist"""
self.create_table(table_name, attrs)
self.alter_table(table_name, attrs)
if indexes:
self.create_index(indexes)
###########################################################################
# Create methods
###########################################################################
def insert(self, table_name, data):
"""insert is your basic insertion method"""
data = to_ascii(data)
if data is None:
return None
sql = "INSERT INTO %s" % table_name
sql += "(id, %s) VALUES" % ', '.join(data.keys())
sql += "(NULL, "
sql += ', '.join(['?'] * len(data.values()))
sql = "%s);" % sql
params = data.values()
self.raw_sql(sql, params)
###########################################################################
# Read methods
###########################################################################
def __parse_columns(self, table_name, columns):
"""internal helper for column parsing"""
select_columns = []
if columns is None or columns == "*":
sql = "PRAGMA table_info(\"%s\");" % table_name
results = self.raw_sql(sql)
columns = []
for result in results:
columns.append(result[1])
if isinstance(columns, (list, tuple)):
select_columns = columns
elif isinstance(column, basestring):
columns = columns.replace(" ", "").split(",")
select_columns = columns
if isinstance(select_columns, (list, tuple)) and select_columns:
select_columns = ', '.join(select_columns)
original_columns = []
for i in columns:
original_columns.append("_%s" % i)
return columns, select_columns, original_columns
def select(self, table_name, columns=None, where=None, limit=None, \
order_by=None):
"""select is your basic selection method"""
columns, select_columns, original_columns = self.__parse_columns(
table_name,
columns
)
sql = "SELECT %s FROM \"%s\"" % (select_columns, table_name)
parameterized_attrs = None
if where is not None:
if not isinstance(where, (tuple, list)):
sql += " WHERE %s" % where
else:
sql += "WHERE %s" % where[0]
parameterized_attrs = where[1]
if limit is not None:
sql += " LIMIT %s" % limit
if order_by is not None:
sql += " ORDER BY %s" % order_by
sql += ";"
if not parameterized_attrs:
results = self.raw_sql(sql)
else:
results = self.raw_sql(sql, parameterized_attrs)
return_values = []
for i in results:
data = dict(zip(columns, i))
if 'id' in data:
del(data['id'])
final_data = dict(
data.items() +\
dict(zip(original_columns, i)).items() + \
{"_table": table_name}.items()
)
return_values.append(final_data)
if not return_values:
return None
return return_values
###########################################################################
# Update methods
###########################################################################
def update(self, data):
"""update is your basic update method"""
data = to_ascii(data)
if data is None:
return None
original_data = {}
updated_data = {}
for i in data:
if i.startswith("_") and i != "_table" and i != "_id":
original_data[i] = data[i]
else:
updated_data[i] = data[i]
to_change = {}
for i in updated_data:
if i != "_table" and i != "_id":
if updated_data[i] != original_data["_%s" % i]:
to_change[i] = updated_data[i]
sql = "UPDATE \"%s\" SET" % data["_table"]
if not to_change:
return None
for i in to_change:
sql += " %s=?," % i
sql = sql.strip(",")
sql += " WHERE id = ?;"
params = to_change.values()
params.append(data["_id"])
self.raw_sql(sql, params)
###########################################################################
# Delete methods
###########################################################################
def delete(self, data):
"""delete is your basic deletion method"""
data = to_ascii(data)
if data is None:
return None
if isinstance(data, dict):
sql = "DELETE FROM \"%s\" WHERE id = ?;" % data["_table"]
self.raw_sql(sql, [data["_id"]])
return
elif isinstance(data, (list, tuple)):
tables_and_ids = {}
for i in data:
table = i["_table"]
if table not in tables_and_ids:
tables_and_ids[table] = []
tables_and_ids[table].append(i["_id"])
for k, j in tables_and_ids.iteritems():
sql = "DELETE FROM \"%s\" WHERE id IN (%s);" % (
k,
', '.join(['?']*len(j))
)
self.raw_sql(sql, j)
|
{
"content_hash": "accdeda9ede50219e4ffcffdb6a96c79",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 79,
"avg_line_length": 32.8929889298893,
"alnum_prop": 0.4544536683868073,
"repo_name": "jaimeblasco/MIDAS",
"id": "f313adf915594537117a3abbbd8d339e39827922",
"size": "8936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "midas/modules/lib/ty_orm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105826"
},
{
"name": "Shell",
"bytes": "202"
}
],
"symlink_target": ""
}
|
from .base import (NullBrowser, # noqa: F401
certificate_domain_list,
get_timeout_multiplier, # noqa: F401
maybe_add_args)
from .webkit import WebKitBrowser # noqa: F401
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.base import WdspecExecutor # noqa: F401
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor, # noqa: F401
WebDriverCrashtestExecutor) # noqa: F401
__wptrunner__ = {"product": "epiphany",
"check_args": "check_args",
"browser": {None: "WebKitBrowser",
"wdspec": "NullBrowser"},
"browser_kwargs": "browser_kwargs",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "WdspecExecutor",
"crashtest": "WebDriverCrashtestExecutor"},
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"timeout_multiplier": "get_timeout_multiplier"}
def check_args(**kwargs):
pass
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
# Workaround for https://gitlab.gnome.org/GNOME/libsoup/issues/172
webdriver_required_args = ["--host=127.0.0.1"]
webdriver_args = maybe_add_args(webdriver_required_args, kwargs.get("webdriver_args"))
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": webdriver_args}
def capabilities(server_config, **kwargs):
args = kwargs.get("binary_args", [])
if "--automation-mode" not in args:
args.append("--automation-mode")
return {
"browserName": "Epiphany",
"browserVersion": "3.31.4", # First version to support automation
"platformName": "ANY",
"webkitgtk:browserOptions": {
"binary": kwargs["binary"],
"args": args,
"certificates": certificate_domain_list(server_config.domains_set, kwargs["host_cert_path"])}}
def executor_kwargs(logger, test_type, test_environment, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = capabilities(test_environment.config, **kwargs)
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {}
def run_info_extras(**kwargs):
return {"webkit_port": "gtk"}
|
{
"content_hash": "3f80d5f83301d6a6685dcc5d20df0d90",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 106,
"avg_line_length": 39.821917808219176,
"alnum_prop": 0.585827313381493,
"repo_name": "chromium/chromium",
"id": "912173a52e46ee141452cc98d1557c1c87a9adb4",
"size": "2935",
"binary": false,
"copies": "15",
"ref": "refs/heads/main",
"path": "third_party/wpt_tools/wpt/tools/wptrunner/wptrunner/browsers/epiphany.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Copyright 2017 Peter Urda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# noinspection PyUnresolvedReferences
from .base import * # noqa
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
]
|
{
"content_hash": "7e8e6fab3e7767351e002e2b65bec875",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 28.25,
"alnum_prop": 0.7669616519174042,
"repo_name": "urda/mr.butler",
"id": "b7cfc03aa2261be8e6928eba4b1d1cf3c1c2961a",
"size": "678",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/core/settings/dev.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1486"
},
{
"name": "Python",
"bytes": "73961"
},
{
"name": "Shell",
"bytes": "1431"
}
],
"symlink_target": ""
}
|
import testtools
from quantumclient.common import exceptions
from quantumclient.quantum import v2_0 as quantumV20
class CLITestArgs(testtools.TestCase):
def test_empty(self):
_mydict = quantumV20.parse_args_to_dict([])
self.assertEqual({}, _mydict)
def test_default_bool(self):
_specs = ['--my_bool', '--arg1', 'value1']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertTrue(_mydict['my_bool'])
def test_bool_true(self):
_specs = ['--my-bool', 'type=bool', 'true', '--arg1', 'value1']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertTrue(_mydict['my_bool'])
def test_bool_false(self):
_specs = ['--my_bool', 'type=bool', 'false', '--arg1', 'value1']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertFalse(_mydict['my_bool'])
def test_nargs(self):
_specs = ['--tag', 'x', 'y', '--arg1', 'value1']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertTrue('x' in _mydict['tag'])
self.assertTrue('y' in _mydict['tag'])
def test_badarg(self):
_specs = ['--tag=t', 'x', 'y', '--arg1', 'value1']
self.assertRaises(exceptions.CommandError,
quantumV20.parse_args_to_dict, _specs)
def test_badarg_with_minus(self):
_specs = ['--arg1', 'value1', '-D']
self.assertRaises(exceptions.CommandError,
quantumV20.parse_args_to_dict, _specs)
def test_goodarg_with_minus_number(self):
_specs = ['--arg1', 'value1', '-1', '-1.0']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertEqual(['value1', '-1', '-1.0'],
_mydict['arg1'])
def test_badarg_duplicate(self):
_specs = ['--tag=t', '--arg1', 'value1', '--arg1', 'value1']
self.assertRaises(exceptions.CommandError,
quantumV20.parse_args_to_dict, _specs)
def test_badarg_early_type_specification(self):
_specs = ['type=dict', 'key=value']
self.assertRaises(exceptions.CommandError,
quantumV20.parse_args_to_dict, _specs)
def test_arg(self):
_specs = ['--tag=t', '--arg1', 'value1']
self.assertEqual('value1',
quantumV20.parse_args_to_dict(_specs)['arg1'])
def test_dict_arg(self):
_specs = ['--tag=t', '--arg1', 'type=dict', 'key1=value1,key2=value2']
arg1 = quantumV20.parse_args_to_dict(_specs)['arg1']
self.assertEqual('value1', arg1['key1'])
self.assertEqual('value2', arg1['key2'])
def test_dict_arg_with_attribute_named_type(self):
_specs = ['--tag=t', '--arg1', 'type=dict', 'type=value1,key2=value2']
arg1 = quantumV20.parse_args_to_dict(_specs)['arg1']
self.assertEqual('value1', arg1['type'])
self.assertEqual('value2', arg1['key2'])
def test_list_of_dict_arg(self):
_specs = ['--tag=t', '--arg1', 'type=dict',
'list=true', 'key1=value1,key2=value2']
arg1 = quantumV20.parse_args_to_dict(_specs)['arg1']
self.assertEqual('value1', arg1[0]['key1'])
self.assertEqual('value2', arg1[0]['key2'])
|
{
"content_hash": "7693b12b32d77cceac8cca61921f5b72",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 78,
"avg_line_length": 39.548780487804876,
"alnum_prop": 0.5670675300647549,
"repo_name": "wallnerryan/quantum_migrate",
"id": "fcfff9331442dfb36e7a3e9e68abcf00cb28f3b1",
"size": "3918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quantumclient/tests/unit/test_casual_args.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""\
=============================
Wrapper for pygame components
=============================
A wrapper for two dimensional pygame components that allows to display
them on a Plane in 3D using OpenGL.
This component is a subclass of OpenGLComponent and therefore uses the
OpenGL display service.
Example Usage
-------------
The following example shows a wrapped Ticker and MagnaDoodle component::
# override pygame display service
ogl_display = OpenGLDisplay.getDisplayService()
PygameDisplay.setDisplayService(ogl_display[0])
TICKER = Ticker(size = (150, 150)).activate()
TICKER_WRAPPER = PygameWrapper(wrap=TICKER, position=(4, 1,-10), rotation=(-20,15,3)).activate()
MAGNADOODLE = MagnaDoodle(size=(200,200)).activate()
MAGNADOODLEWRAPPER = PygameWrapper(wrap=MAGNADOODLE, position=(-2, -2,-10), rotation=(20,10,0)).activate()
READER = ConsoleReader().activate()
READER.link( (READER,"outbox"), (TICKER, "inbox") )
Axon.Scheduler.scheduler.run.runThreads()
How does it work?
-----------------
This component is a subclass of OpenGLComponent. It overrides
__init__(), setup(), draw(), handleEvents() and frame().
In setup() first the needed additional mailboxes are created. These are
the "eventrequest" and "wrapcallback" inboxes and the "wrapped_events"
outbox:
- "eventrequest" is used for the reception of ADDLISTENEVENT and REMOVELISTENEVENT requests of the wrapped component.
- "wrapcallback" is used to receive the response from the display service.
- "wrapped_events" is where the input events get sent to.
Additionally, a WRAPPERREQUEST is sent to the OpenGL display service. It
contains the objectid of the wrapped component as well as the comms for
callback and eventrequests.
In frame(), it is waited for the response on the WRAPPERREQUEST. The
response should contain the OpenGL texture name, the texture size and
the size of the wrapped component. The wanted events are stored and the
"wrapped_events" outbox is linked to the wrapped components "events"
inbox. If the size of the wrapper is not set, it is calculated using the
wrapped component pixel size multiplied by the pixelscaling factor.
To handle event requests by the wrapped component, the method
handleEventRequests() gets called.
In handleEvents() received mouse events get translated into the 2d space
of the wrapped component and sent to it if requested. This is done by
using ray/polygon intersection to determine the point of intersection in
3d. The 2d coordinates are then calculated by using the dot product
between the point of intersection relative to the top left corner and
the edge vectors.
In draw() a cuboid gets drawn with the texture of the pygame component
on its front plane. If the z component of the size is set to zero, only
the front plane is drawn.
"""
import Axon
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from Vector import Vector
from Transform import Transform
from OpenGLComponent import *
from Intersect import *
import copy
class PygameWrapper(OpenGLComponent):
"""\
PygameWrapper(...) -> A new PygameWrapper component.
A wrapper for two dimensional pygame components that allows to display
them on a Plane in 3D using OpenGL.
Keyword arguments:
- wrap -- Pygame component to wrap
- pixelscaling -- Factor to convert pixels to units in 3d, ignored if size is specified (default=100)
- sidecolour -- Colour of side and back planes (default=(200,200,244))
- thickness -- Thickness of wrapper, ignored if size is specified (default=0.3)
"""
def __init__(self, **argd):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(PygameWrapper, self).__init__(**argd)
self.pixelscaling = argd.get("pixelscaling", 100.0)
self.sideColour = argd.get("sidecolour", (200,200,244))
self.wrapped_comp = argd.get("wrap")
self.thickness = argd.get("thickness", 0.3)
self.texname = 0
self.texsize = (0,0)
self.wrappedsize = (0,0)
self.eventswanted = {}
self.vertices = []
def setup(self):
# used to receive event requests from the wrapped components
self.addInbox("eventrequests")
# for response to wrapperrequest
self.addInbox("wrapcallback")
self.addOutbox("wrapped_events")
# send wrapper request
wraprequest = { "WRAPPERREQUEST" : True,
"wrapcallback" : (self, "wrapcallback"),
"eventrequests" : (self, "eventrequests"),
"wrap_objectid": id(self.wrapped_comp) }
self.send( wraprequest, "display_signal")
def draw(self):
""" Draw cuboid."""
hs = self.size/2.0
if hs.z != 0:
# draw faces
glBegin(GL_QUADS)
glColor4f(self.sideColour[0]/256.0, self.sideColour[1]/256.0, self.sideColour[2]/256.0, 0.5)
glVertex3f(hs.x,hs.y,hs.z)
glVertex3f(hs.x,-hs.y,hs.z)
glVertex3f(hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,hs.y,-hs.z)
glVertex3f(-hs.x,hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,-hs.z)
glVertex3f(-hs.x,hs.y,-hs.z)
glVertex3f(hs.x,hs.y,hs.z)
glVertex3f(-hs.x,hs.y,hs.z)
glVertex3f(-hs.x,hs.y,-hs.z)
glVertex3f(hs.x,hs.y,-hs.z)
glVertex3f(hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,hs.y,-hs.z)
glVertex3f(-hs.x,hs.y,-hs.z)
glVertex3f(-hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,-hs.y,-hs.z)
glEnd()
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texname)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
glBegin(GL_QUADS)
# front plane
glTexCoord2f(0.0, 1.0-self.texsize[1])
glVertex3f(-hs.x,-hs.y,hs.z)
glTexCoord2f(self.texsize[0], 1.0-self.texsize[1])
glVertex3f(hs.x,-hs.y,hs.z)
glTexCoord2f(self.texsize[0], 1.0)
glVertex3f(hs.x,hs.y,hs.z)
glTexCoord2f(0.0, 1.0)
glVertex3f(-hs.x,hs.y,hs.z)
glEnd()
glDisable(GL_TEXTURE_2D)
def handleEvents(self):
while self.dataReady("events"):
event = copy.copy(self.recv("events"))
try:
if self.eventswanted[event.type] and self.identifier in event.hitobjects:
# transform vertices for intersection test
self.transformedVertices = [self.transform.transformVector(v) for v in self.vertices]
# calculate distance of intersection
t = Intersect.ray_Polygon(Vector(0,0,0), event.direction, self.transformedVertices);
# point of intersection
p = event.direction*t
Ap = p-self.transformedVertices[0]
# vectors of edges
AB = self.transformedVertices[1]-self.transformedVertices[0]
AD = self.transformedVertices[3]-self.transformedVertices[0]
# calc position on plane
x = Ap.dot(AB)/(AB.length()**2)
y = Ap.dot(AD)/(AD.length()**2)
event.pos = (x*self.wrappedsize[0],y*self.wrappedsize[1])
self.send([event], "wrapped_events")
except KeyError: pass # event not wanted
except AttributeError:
if not hasattr(event, "hitobjects"):
pass # Means it's probably a keyboard press - or similar - instead (no objects hit)
else:
print "FAIL, Here's why:", event
raise # rethrow if it's not that
def frame(self):
if self.dataReady("wrapcallback"):
response = self.recv("wrapcallback")
self.texname = response["texname"]
self.texsize = response["texsize"]
self.wrappedsize = response["size"]
if response["eventswanted"] is not None:
self.eventswanted = response["eventswanted"]
wantedevents = []
for (event, wanted) in self.eventswanted.items():
if wanted: wantedevents.append(event)
self.addListenEvents( wantedevents )
if response["eventservice"] is not None:
self.link((self, "wrapped_events"), response["eventservice"])
# calc size if not set
if self.size == Vector(0,0,0):
w = self.wrappedsize[0]/self.pixelscaling
h = self.wrappedsize[1]/self.pixelscaling
self.size = Vector(w, h, self.thickness)
#prepare vertices for intersection test
hs = self.size/2.0
self.vertices = [ Vector(-hs.x, hs.y, hs.z),
Vector(hs.x, hs.y, hs.z),
Vector(hs.x, -hs.y, hs.z),
Vector(-hs.x, -hs.y, hs.z)
]
self.redraw()
self.handleEventRequests()
def handleEventRequests(self):
while self.dataReady("eventrequests"):
message = self.recv("eventrequests")
if message.get("ADDLISTENEVENT", None) is not None:
self.eventswanted[message["ADDLISTENEVENT"]] = True
self.addListenEvents([message["ADDLISTENEVENT"]])
elif message.get("REMOVELISTENEVENT", None) is not None:
self.eventswanted[message["REMOVELISTENEVENT"]] = False
self.removeListenEvents([message["REMOVELISTENEVENT"]])
__kamaelia_components__ = (PygameWrapper,)
if __name__=='__main__':
from Kamaelia.Util.Console import ConsoleReader
from Kamaelia.UI.PygameDisplay import PygameDisplay
from Kamaelia.UI.Pygame.Ticker import Ticker
import sys;
sys.path.append("../Pygame/")
from MagnaDoodle import *
# override pygame display service
ogl_display = OpenGLDisplay.getDisplayService()
PygameDisplay.setDisplayService(ogl_display[0])
TICKER = Ticker(size = (150, 150)).activate()
TICKER_WRAPPER = PygameWrapper(wrap=TICKER, position=(4, 1,-10), rotation=(-20,15,3)).activate()
MAGNADOODLE = MagnaDoodle(size=(200,200)).activate()
MAGNADOODLEWRAPPER = PygameWrapper(wrap=MAGNADOODLE, position=(-2, -2,-10), rotation=(20,10,0)).activate()
READER = ConsoleReader().activate()
READER.link( (READER,"outbox"), (TICKER, "inbox") )
Axon.Scheduler.scheduler.run.runThreads()
# Licensed to the BBC under a Contributor Agreement: THF
|
{
"content_hash": "14f3dc03c711bbf41dd1843aa7efc3ca",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 117,
"avg_line_length": 38.33797909407666,
"alnum_prop": 0.606561846769063,
"repo_name": "bbc/kamaelia",
"id": "1976a9eaa27f3409ecc91da1cde0a59feebc5572",
"size": "11909",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/UI/OpenGL/PygameWrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
}
|
"""
Design a data structure that supports the following two operations:
void addWord(word)
bool search(word)
search(word) can search a literal word or a regular expression string containing only letters a-z or .. A . means it can represent any one letter.
For example:
addWord("bad")
addWord("dad")
addWord("mad")
search("pad") -> false
search("bad") -> true
search(".ad") -> true
search("b..") -> true
"""
class TrieNode:
# Initialize your data structure here.
def __init__(self):
# reference to related trie node
self.childern = {}
# flag to determine if this node represents a word ending
self.word_end = False
def add(self, char):
self.childern[char] = TrieNode()
class Trie:
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Inserts a word into the trie.
def insert(self, word):
node = self.root
for char in word:
if not char in node.childern:
node.add(char)
node = node.childern[char]
node.word_end = True
# @param {string} word
# @return {boolean}
# Returns if the word is in the trie.
def search(self, word):
return self.search_helper(self.root, word, 0)
def search_helper(self, root, word, index):
# base case
if index == len(word):
return root.word_end
char = word[index]
# wild matching case
if char == ".":
for c in root.childern:
if self.search_helper(root.childern[c], word, index + 1):
return True
# normal matching case
else:
if char in root.childern:
return self.search_helper(root.childern[char], word, index + 1)
# match failed
return False
class WordDictionary:
def __init__(self):
self.dict = Trie()
# @param {string} word
# @return {void}
# Adds a word into the data structure.
def addWord(self, word):
self.dict.insert(word)
# @param {string} word
# @return {boolean}
# Returns if the word is in the data structure. A word could
# contain the dot character '.' to represent any one letter.
def search(self, word):
return self.dict.search(word)
# Your WordDictionary object will be instantiated and called as such:
wordDictionary = WordDictionary()
wordDictionary.addWord("word")
print wordDictionary.search("..r.")
|
{
"content_hash": "76d2c37f1e1d5e5f449909a09cd2fd5d",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 146,
"avg_line_length": 22.15,
"alnum_prop": 0.6772009029345373,
"repo_name": "Ahmed--Mohsen/leetcode",
"id": "ba21ae5058d1fbf1de28cc7a1e9762b7beeb3b86",
"size": "2215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "add_and_search_word.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317482"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
from django.conf import settings # noqa
app = Celery('tests')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
{
"content_hash": "a3d2fde94b14d72f196e1447d2c78380",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 25.863636363636363,
"alnum_prop": 0.7557117750439367,
"repo_name": "jlmadurga/permabots",
"id": "248f7c127e9821f4266384cda84caf066275845d",
"size": "569",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/celery.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1236"
},
{
"name": "Python",
"bytes": "496583"
}
],
"symlink_target": ""
}
|
"""
Module for diffent connections.
:author: Stefan Lehmann
:email: stefan.st.lehmann@gmail.com
"""
import blinker
import logging
import threading
from serial import Serial
import socket
logger = logging.getLogger(__name__)
class Connection:
pass
class SerialConnection(Connection):
def __init__(self, *args, **kwargs):
super().__init__()
self.serial = Serial(*args, **kwargs)
def open(self):
self.serial.open()
def close(self):
self.serial.close()
def send(self, data):
return self.serial.write(data)
def receive(self, size=1):
return self.serial.read(size)
@property
def connected(self):
return self.serial.is_open()
class SocketConnection(Connection):
def __init__(self, address=None):
super().__init__()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connected = False
if address:
self.open(address[0], address[1])
def open(self, host=None, port=None):
self.socket.connect((host, port))
self._connected = True
def close(self):
self.socket.close()
self._connected = False
def send(self, data: bytes):
return self.socket.send(bytes)
def receive(self):
return self.socket.recv(4096)
@property
def connected(self):
return self._connected
class ConnectionThread(threading.Thread):
def __init__(self, connection, *args, **kwargs):
self.connection = connection
self._run = True
# buffer for incoming bytes
self._buffer = ''
# message buffer
self._messages = []
# clear messages after read
self._clear_messages = False
# blinker signal for received data
self.new_messages = blinker.signal('new_messages')
super(ConnectionThread, self).__init__(*args, **kwargs)
def run(self):
while self._run:
# clear messages after last read
if self._clear_messages:
self._messages.clear()
self._clear_messages = False
new_data = self.connection.receive()
self._buffer += new_data.decode()
messages = self._buffer.split('\n')
while len(messages) > 1:
message = messages.pop(0)
self._messages.append(message)
self.new_messages.send(self)
def get_messages(self):
self._clear_messages = True
return self._messages[:]
def stop(self):
self._run = False
def new_messages(sender):
messages = sender.get_messages()
for message in messages:
print('incoming message: {0}'.format(message))
if __name__ == '__main__':
conn = SocketConnection(('localhost', 5000))
thread = ConnectionThread(conn)
thread.new_messages.connect(new_messages)
thread.run()
|
{
"content_hash": "44726533f2dc904cedb1b2598000f77f",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 71,
"avg_line_length": 23.67479674796748,
"alnum_prop": 0.5944368131868132,
"repo_name": "MrLeeh/jsonwatch",
"id": "bf875ae13090bc7ea5158ccb48c0369b5a4ab31f",
"size": "2936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonwatch/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111432"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from p2ptracker import bencode
import hashlib
import binascii
import urllib
import logging
import os
import redis
import socket
import shutil
from mocker import Mocker
__author__ = 'ramon'
log = logging.getLogger("hyves.%s" % __name__)
def write_redis_config(port=6379, host='127.0.0.1', dbfilename="redis.db", redisdir='/tmp/redis-test'):
"""Write a reasonable redis config file for use in testing"""
if not os.path.exists(redisdir):
os.makedirs(redisdir)
params = dict(port=port, host=host, dbfilename=dbfilename,redisdir=redisdir)
params['pidfile'] = "%s/pid" % redisdir
params['logfile'] = "%s/redis.log" % redisdir
params['host'] = socket.gethostbyname(params['host'])
log.debug(params)
config = """
daemonize yes\n
pidfile %(pidfile)s\n
port %(port)s\n
bind %(host)s\n
timeout 30\n
loglevel notice\n
logfile %(logfile)s\n
databases 16\n
save 900 1\n\n
save 300 10\n
save 60 10000\n
rdbcompression yes\n
dbfilename %(dbfilename)s\n
dir %(redisdir)s\n
""" % params
filename = os.path.join(os.path.dirname(__file__), 'redis-test.conf')
file = open(filename, 'w')
file.write(config)
file.close()
return filename
def start_redis_server(**kwords):
'''Start a local instance of the redis server with data in tmp'''
REDISCONF=write_redis_config(**kwords)
try:
REDISBIN=os.popen("source /etc/profile; which redis-server2 2>/dev/null").read().strip()
if not REDISBIN:
REDISBIN=os.popen("source /etc/profile; which redis-server").read().strip()
log.debug("Found redis binary: %s" % REDISBIN)
except:
log.warning("Found no redis server, cannot proceed")
raise
try:
os.system("%s %s" % (REDISBIN, REDISCONF))
except:
log.warning("Failed to start the redis server")
raise
def stop_redis_server(port=6379, host='127.0.0.1', dbfilename="redis.db", redisdir='/tmp/redis-test'):
r = redis.Redis(host, port)
r.flushdb()
r.shutdown()
if os.path.exists(os.path.join(os.path.dirname(__file__), 'redis-test.conf')):
os.remove(os.path.join(os.path.dirname(__file__), 'redis-test.conf'))
if os.path.exists(redisdir):
shutil.rmtree(redisdir)
def get_infohash_from_file(file):
'''Return a string hash value for a torrentfile, raise exception otherwise'''
file.seek(0)
data = bencode.bdecode(file.read())
return hashlib.sha1(bencode.bencode(data['info'])).hexdigest()
def get_size_from_torrentfile(file):
'''Return an integer describing the size ofthe torrent payload'''
file.seek(0)
data = bencode.bdecode(file.read())
length = 0
if 'files' in data['info']:
for file in data['info']['files']:
length += file['length']
else:
length += data['info']['length']
return length
def get_ihash_from_filename(filename):
try:
file = open(filename, 'r')
return get_infohash_from_file(file)
finally:
file.close()
def get_size_from_filename(filename):
try:
file = open(filename, 'r')
return get_size_from_torrentfile(file)
finally:
file.close()
def urlquote_ihash(ihash):
hex_bin = binascii.unhexlify(ihash)
return urllib.quote(hex_bin)
def mock_smdb_get_rack(ipaddress, rackname):
mocker = Mocker()
mocked_get_rack = mocker.replace('p2ptracker.smdbapi.get_rack')
mocked_get_rack(ipaddress)
mocker.result(rackname)
mocker.replay()
def add_client(app, ihash, ipaddress, rackname, left,
event=None, peer_id="test_client", port=10004,
compact=1, uploaded=0, downloaded=0, key='test_key', mock_smdb=True):
'''Call app with a suitable announce url to add a seeder'''
#
#'''/announce/'''
#'''?info_hash=%13L%B2%81%DDT%02%1B%BF%D1l%B9%C6%25%1E%CD-g%DC%BF'''
#'''&peer_id=-lt0C60-f%F2%3Ef%EC%E8%C0%21%EF%FFzM'''
#'''&key=359a335e'''
#'''&ip=10.2.5.18'''
#'''&compact=1'''
#'''&port=10004'''
#'''&uploaded=1317224448'''
#'''&downloaded=1073741824'''
#'''&left=0'''
if mock_smdb:
mock_smdb_get_rack(ipaddress, rackname)
params = dict()
url = '/announce/?info_hash=%(ihash)s'
url += '&peer_id=%(peer_id)s&key=%(key)s'
url += '&compact=%(compact)s&ip=%(ipaddress)s&port=%(port)s&uploaded=%(uploaded)s'
url += '&downloaded=%(downloaded)s&left=%(left)s'
if event:
url += '&event=%(event)s'
params['event'] = event
params.update({'ihash':urlquote_ihash(ihash), 'ipaddress':ipaddress, 'left':left, 'peer_id':peer_id,
'port':port, 'key':key, 'compact':compact, 'uploaded':uploaded,
'downloaded':downloaded})
with app.test_client() as c:
return c.get(url % params)
def post_torrent(client, filename):
try:
file = open(filename, 'r')
return client.post('torrents/%s' % os.path.basename(filename), data={
filename: [file] })
finally:
file.close()
def delete_torrent(client, filename):
try:
file = open(filename, 'r')
return client.delete('torrents/%s' % os.path.basename(filename), data={
filename: [file] })
finally:
file.close()
def get_torrentfile(client, filename):
return client.get('torrents/%s' % os.path.basename(filename))
def post_torrentfile(client, filename, file):
return client.post('torrents/%s' % os.path.basename(filename), data={
filename: [file] })
|
{
"content_hash": "caa8e7c61adfe7fdd76956e279846ad2",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 104,
"avg_line_length": 32.10344827586207,
"alnum_prop": 0.6210168277837451,
"repo_name": "TMG-nl/p2ptracker",
"id": "b7b7a65f37e80d35fe91fe7adc1748349f61ba41",
"size": "5586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p2ptracker/tests/helpers/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89858"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Comment the next two lines to disable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin (Comment the next line to disable the admin)
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("theses.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{
"content_hash": "3facf41e4645c65a062f332bd9e3447d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 91,
"avg_line_length": 34.73076923076923,
"alnum_prop": 0.7187153931339978,
"repo_name": "aadu/theses",
"id": "dfb54fe5bc7a49b077c67e5545192235b262025c",
"size": "927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1212"
},
{
"name": "HTML",
"bytes": "19627"
},
{
"name": "JavaScript",
"bytes": "2375"
},
{
"name": "Python",
"bytes": "27076"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
}
|
"""Set up the environment for doctests
This file is automatically evaluated by py.test. It ensures that we can write
doctests without importing anything. The entire content for qnet, as well as
numpy and sympy will be available in all doctests.
"""
import numpy
import sympy
import qnet
from collections import OrderedDict
# noinspection PyPackageRequirements
import pytest
qnet.init_printing(repr_format='unicode')
@pytest.fixture(autouse=True)
def set_doctest_env(doctest_namespace):
doctest_namespace['numpy'] = numpy
doctest_namespace['sympy'] = sympy
doctest_namespace['OrderedDict'] = OrderedDict
for name in qnet.__all__:
doctest_namespace[name] = getattr(qnet, name)
|
{
"content_hash": "fdd6fb86ea3e2ae3c2afbb659a908d46",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 28.2,
"alnum_prop": 0.7602836879432624,
"repo_name": "mabuchilab/QNET",
"id": "23c3944e1512d1754a3bcb4647847e86d1dc2d10",
"size": "705",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "docs/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3915"
},
{
"name": "Python",
"bytes": "1100786"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.